tap: Use numbered tap/tun devices on all *BSD OS's
[qemu.git] / linux-user / syscall.c
blobecead512a0269ef6bb2cddf09fd0ad4483df5291
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include "linux_loop.h"
110 #include "cpu-uname.h"
112 #include "qemu.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 //#define DEBUG
119 //#include <linux/msdos_fs.h>
120 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
121 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
124 #undef _syscall0
125 #undef _syscall1
126 #undef _syscall2
127 #undef _syscall3
128 #undef _syscall4
129 #undef _syscall5
130 #undef _syscall6
132 #define _syscall0(type,name) \
133 static type name (void) \
135 return syscall(__NR_##name); \
138 #define _syscall1(type,name,type1,arg1) \
139 static type name (type1 arg1) \
141 return syscall(__NR_##name, arg1); \
144 #define _syscall2(type,name,type1,arg1,type2,arg2) \
145 static type name (type1 arg1,type2 arg2) \
147 return syscall(__NR_##name, arg1, arg2); \
150 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
151 static type name (type1 arg1,type2 arg2,type3 arg3) \
153 return syscall(__NR_##name, arg1, arg2, arg3); \
156 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
162 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
170 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5,type6,arg6) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
173 type6 arg6) \
175 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
179 #define __NR_sys_uname __NR_uname
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
185 #define __NR_sys_syslog __NR_syslog
186 #define __NR_sys_tgkill __NR_tgkill
187 #define __NR_sys_tkill __NR_tkill
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
194 defined(__s390x__)
195 #define __NR__llseek __NR_lseek
196 #endif
198 #ifdef __NR_gettid
199 _syscall0(int, gettid)
200 #else
201 /* This is a replacement for the host gettid() and must return a host
202 errno. */
203 static int gettid(void) {
204 return -ENOSYS;
206 #endif
207 #ifdef __NR_getdents
208 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
209 #endif
210 #if !defined(__NR_getdents) || \
211 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
212 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
213 #endif
214 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
215 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
216 loff_t *, res, uint, wh);
217 #endif
218 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
219 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
220 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
221 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
222 #endif
223 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
224 _syscall2(int,sys_tkill,int,tid,int,sig)
225 #endif
226 #ifdef __NR_exit_group
227 _syscall1(int,exit_group,int,error_code)
228 #endif
229 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
230 _syscall1(int,set_tid_address,int *,tidptr)
231 #endif
232 #if defined(TARGET_NR_futex) && defined(__NR_futex)
233 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
234 const struct timespec *,timeout,int *,uaddr2,int,val3)
235 #endif
236 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
237 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
238 unsigned long *, user_mask_ptr);
239 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
240 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
241 unsigned long *, user_mask_ptr);
242 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
243 void *, arg);
245 static bitmask_transtbl fcntl_flags_tbl[] = {
246 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
247 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
248 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
249 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
250 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
251 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
252 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
253 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
254 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
255 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
256 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
257 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
258 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
259 #if defined(O_DIRECT)
260 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
261 #endif
262 #if defined(O_NOATIME)
263 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
264 #endif
265 #if defined(O_CLOEXEC)
266 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
267 #endif
268 #if defined(O_PATH)
269 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
270 #endif
271 /* Don't terminate the list prematurely on 64-bit host+guest. */
272 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
273 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
274 #endif
275 { 0, 0, 0, 0 }
278 #define COPY_UTSNAME_FIELD(dest, src) \
279 do { \
280 /* __NEW_UTS_LEN doesn't include terminating null */ \
281 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
282 (dest)[__NEW_UTS_LEN] = '\0'; \
283 } while (0)
285 static int sys_uname(struct new_utsname *buf)
287 struct utsname uts_buf;
289 if (uname(&uts_buf) < 0)
290 return (-1);
293 * Just in case these have some differences, we
294 * translate utsname to new_utsname (which is the
295 * struct linux kernel uses).
298 memset(buf, 0, sizeof(*buf));
299 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
300 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
301 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
302 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
303 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
304 #ifdef _GNU_SOURCE
305 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
306 #endif
307 return (0);
309 #undef COPY_UTSNAME_FIELD
312 static int sys_getcwd1(char *buf, size_t size)
314 if (getcwd(buf, size) == NULL) {
315 /* getcwd() sets errno */
316 return (-1);
318 return strlen(buf)+1;
321 #ifdef TARGET_NR_openat
322 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
325 * open(2) has extra parameter 'mode' when called with
326 * flag O_CREAT.
328 if ((flags & O_CREAT) != 0) {
329 return (openat(dirfd, pathname, flags, mode));
331 return (openat(dirfd, pathname, flags));
333 #endif
335 #ifdef TARGET_NR_utimensat
336 #ifdef CONFIG_UTIMENSAT
337 static int sys_utimensat(int dirfd, const char *pathname,
338 const struct timespec times[2], int flags)
340 if (pathname == NULL)
341 return futimens(dirfd, times);
342 else
343 return utimensat(dirfd, pathname, times, flags);
345 #elif defined(__NR_utimensat)
346 #define __NR_sys_utimensat __NR_utimensat
347 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
348 const struct timespec *,tsp,int,flags)
349 #else
350 static int sys_utimensat(int dirfd, const char *pathname,
351 const struct timespec times[2], int flags)
353 errno = ENOSYS;
354 return -1;
356 #endif
357 #endif /* TARGET_NR_utimensat */
359 #ifdef CONFIG_INOTIFY
360 #include <sys/inotify.h>
362 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
363 static int sys_inotify_init(void)
365 return (inotify_init());
367 #endif
368 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
369 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
371 return (inotify_add_watch(fd, pathname, mask));
373 #endif
374 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
375 static int sys_inotify_rm_watch(int fd, int32_t wd)
377 return (inotify_rm_watch(fd, wd));
379 #endif
380 #ifdef CONFIG_INOTIFY1
381 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
382 static int sys_inotify_init1(int flags)
384 return (inotify_init1(flags));
386 #endif
387 #endif
388 #else
389 /* Userspace can usually survive runtime without inotify */
390 #undef TARGET_NR_inotify_init
391 #undef TARGET_NR_inotify_init1
392 #undef TARGET_NR_inotify_add_watch
393 #undef TARGET_NR_inotify_rm_watch
394 #endif /* CONFIG_INOTIFY */
396 #if defined(TARGET_NR_ppoll)
397 #ifndef __NR_ppoll
398 # define __NR_ppoll -1
399 #endif
400 #define __NR_sys_ppoll __NR_ppoll
401 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
402 struct timespec *, timeout, const __sigset_t *, sigmask,
403 size_t, sigsetsize)
404 #endif
406 #if defined(TARGET_NR_pselect6)
407 #ifndef __NR_pselect6
408 # define __NR_pselect6 -1
409 #endif
410 #define __NR_sys_pselect6 __NR_pselect6
411 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
412 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
413 #endif
415 #if defined(TARGET_NR_prlimit64)
416 #ifndef __NR_prlimit64
417 # define __NR_prlimit64 -1
418 #endif
419 #define __NR_sys_prlimit64 __NR_prlimit64
420 /* The glibc rlimit structure may not be that used by the underlying syscall */
421 struct host_rlimit64 {
422 uint64_t rlim_cur;
423 uint64_t rlim_max;
425 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
426 const struct host_rlimit64 *, new_limit,
427 struct host_rlimit64 *, old_limit)
428 #endif
430 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
431 #ifdef TARGET_ARM
432 static inline int regpairs_aligned(void *cpu_env) {
433 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
435 #elif defined(TARGET_MIPS)
436 static inline int regpairs_aligned(void *cpu_env) { return 1; }
437 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
438 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
439 * of registers which translates to the same as ARM/MIPS, because we start with
440 * r3 as arg1 */
441 static inline int regpairs_aligned(void *cpu_env) { return 1; }
442 #else
443 static inline int regpairs_aligned(void *cpu_env) { return 0; }
444 #endif
446 #define ERRNO_TABLE_SIZE 1200
448 /* target_to_host_errno_table[] is initialized from
449 * host_to_target_errno_table[] in syscall_init(). */
450 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
454 * This list is the union of errno values overridden in asm-<arch>/errno.h
455 * minus the errnos that are not actually generic to all archs.
457 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
458 [EIDRM] = TARGET_EIDRM,
459 [ECHRNG] = TARGET_ECHRNG,
460 [EL2NSYNC] = TARGET_EL2NSYNC,
461 [EL3HLT] = TARGET_EL3HLT,
462 [EL3RST] = TARGET_EL3RST,
463 [ELNRNG] = TARGET_ELNRNG,
464 [EUNATCH] = TARGET_EUNATCH,
465 [ENOCSI] = TARGET_ENOCSI,
466 [EL2HLT] = TARGET_EL2HLT,
467 [EDEADLK] = TARGET_EDEADLK,
468 [ENOLCK] = TARGET_ENOLCK,
469 [EBADE] = TARGET_EBADE,
470 [EBADR] = TARGET_EBADR,
471 [EXFULL] = TARGET_EXFULL,
472 [ENOANO] = TARGET_ENOANO,
473 [EBADRQC] = TARGET_EBADRQC,
474 [EBADSLT] = TARGET_EBADSLT,
475 [EBFONT] = TARGET_EBFONT,
476 [ENOSTR] = TARGET_ENOSTR,
477 [ENODATA] = TARGET_ENODATA,
478 [ETIME] = TARGET_ETIME,
479 [ENOSR] = TARGET_ENOSR,
480 [ENONET] = TARGET_ENONET,
481 [ENOPKG] = TARGET_ENOPKG,
482 [EREMOTE] = TARGET_EREMOTE,
483 [ENOLINK] = TARGET_ENOLINK,
484 [EADV] = TARGET_EADV,
485 [ESRMNT] = TARGET_ESRMNT,
486 [ECOMM] = TARGET_ECOMM,
487 [EPROTO] = TARGET_EPROTO,
488 [EDOTDOT] = TARGET_EDOTDOT,
489 [EMULTIHOP] = TARGET_EMULTIHOP,
490 [EBADMSG] = TARGET_EBADMSG,
491 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
492 [EOVERFLOW] = TARGET_EOVERFLOW,
493 [ENOTUNIQ] = TARGET_ENOTUNIQ,
494 [EBADFD] = TARGET_EBADFD,
495 [EREMCHG] = TARGET_EREMCHG,
496 [ELIBACC] = TARGET_ELIBACC,
497 [ELIBBAD] = TARGET_ELIBBAD,
498 [ELIBSCN] = TARGET_ELIBSCN,
499 [ELIBMAX] = TARGET_ELIBMAX,
500 [ELIBEXEC] = TARGET_ELIBEXEC,
501 [EILSEQ] = TARGET_EILSEQ,
502 [ENOSYS] = TARGET_ENOSYS,
503 [ELOOP] = TARGET_ELOOP,
504 [ERESTART] = TARGET_ERESTART,
505 [ESTRPIPE] = TARGET_ESTRPIPE,
506 [ENOTEMPTY] = TARGET_ENOTEMPTY,
507 [EUSERS] = TARGET_EUSERS,
508 [ENOTSOCK] = TARGET_ENOTSOCK,
509 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
510 [EMSGSIZE] = TARGET_EMSGSIZE,
511 [EPROTOTYPE] = TARGET_EPROTOTYPE,
512 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
513 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
514 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
515 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
516 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
517 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
518 [EADDRINUSE] = TARGET_EADDRINUSE,
519 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
520 [ENETDOWN] = TARGET_ENETDOWN,
521 [ENETUNREACH] = TARGET_ENETUNREACH,
522 [ENETRESET] = TARGET_ENETRESET,
523 [ECONNABORTED] = TARGET_ECONNABORTED,
524 [ECONNRESET] = TARGET_ECONNRESET,
525 [ENOBUFS] = TARGET_ENOBUFS,
526 [EISCONN] = TARGET_EISCONN,
527 [ENOTCONN] = TARGET_ENOTCONN,
528 [EUCLEAN] = TARGET_EUCLEAN,
529 [ENOTNAM] = TARGET_ENOTNAM,
530 [ENAVAIL] = TARGET_ENAVAIL,
531 [EISNAM] = TARGET_EISNAM,
532 [EREMOTEIO] = TARGET_EREMOTEIO,
533 [ESHUTDOWN] = TARGET_ESHUTDOWN,
534 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
535 [ETIMEDOUT] = TARGET_ETIMEDOUT,
536 [ECONNREFUSED] = TARGET_ECONNREFUSED,
537 [EHOSTDOWN] = TARGET_EHOSTDOWN,
538 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
539 [EALREADY] = TARGET_EALREADY,
540 [EINPROGRESS] = TARGET_EINPROGRESS,
541 [ESTALE] = TARGET_ESTALE,
542 [ECANCELED] = TARGET_ECANCELED,
543 [ENOMEDIUM] = TARGET_ENOMEDIUM,
544 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
545 #ifdef ENOKEY
546 [ENOKEY] = TARGET_ENOKEY,
547 #endif
548 #ifdef EKEYEXPIRED
549 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
550 #endif
551 #ifdef EKEYREVOKED
552 [EKEYREVOKED] = TARGET_EKEYREVOKED,
553 #endif
554 #ifdef EKEYREJECTED
555 [EKEYREJECTED] = TARGET_EKEYREJECTED,
556 #endif
557 #ifdef EOWNERDEAD
558 [EOWNERDEAD] = TARGET_EOWNERDEAD,
559 #endif
560 #ifdef ENOTRECOVERABLE
561 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
562 #endif
565 static inline int host_to_target_errno(int err)
567 if(host_to_target_errno_table[err])
568 return host_to_target_errno_table[err];
569 return err;
572 static inline int target_to_host_errno(int err)
574 if (target_to_host_errno_table[err])
575 return target_to_host_errno_table[err];
576 return err;
579 static inline abi_long get_errno(abi_long ret)
581 if (ret == -1)
582 return -host_to_target_errno(errno);
583 else
584 return ret;
587 static inline int is_error(abi_long ret)
589 return (abi_ulong)ret >= (abi_ulong)(-4096);
592 char *target_strerror(int err)
594 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
595 return NULL;
597 return strerror(target_to_host_errno(err));
600 static abi_ulong target_brk;
601 static abi_ulong target_original_brk;
602 static abi_ulong brk_page;
604 void target_set_brk(abi_ulong new_brk)
606 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
607 brk_page = HOST_PAGE_ALIGN(target_brk);
610 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
611 #define DEBUGF_BRK(message, args...)
613 /* do_brk() must return target values and target errnos. */
614 abi_long do_brk(abi_ulong new_brk)
616 abi_long mapped_addr;
617 int new_alloc_size;
619 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
621 if (!new_brk) {
622 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
623 return target_brk;
625 if (new_brk < target_original_brk) {
626 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
627 target_brk);
628 return target_brk;
631 /* If the new brk is less than the highest page reserved to the
632 * target heap allocation, set it and we're almost done... */
633 if (new_brk <= brk_page) {
634 /* Heap contents are initialized to zero, as for anonymous
635 * mapped pages. */
636 if (new_brk > target_brk) {
637 memset(g2h(target_brk), 0, new_brk - target_brk);
639 target_brk = new_brk;
640 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
641 return target_brk;
644 /* We need to allocate more memory after the brk... Note that
645 * we don't use MAP_FIXED because that will map over the top of
646 * any existing mapping (like the one with the host libc or qemu
647 * itself); instead we treat "mapped but at wrong address" as
648 * a failure and unmap again.
650 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
651 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
652 PROT_READ|PROT_WRITE,
653 MAP_ANON|MAP_PRIVATE, 0, 0));
655 if (mapped_addr == brk_page) {
656 /* Heap contents are initialized to zero, as for anonymous
657 * mapped pages. Technically the new pages are already
658 * initialized to zero since they *are* anonymous mapped
659 * pages, however we have to take care with the contents that
660 * come from the remaining part of the previous page: it may
661 * contains garbage data due to a previous heap usage (grown
662 * then shrunken). */
663 memset(g2h(target_brk), 0, brk_page - target_brk);
665 target_brk = new_brk;
666 brk_page = HOST_PAGE_ALIGN(target_brk);
667 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
668 target_brk);
669 return target_brk;
670 } else if (mapped_addr != -1) {
671 /* Mapped but at wrong address, meaning there wasn't actually
672 * enough space for this brk.
674 target_munmap(mapped_addr, new_alloc_size);
675 mapped_addr = -1;
676 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
678 else {
679 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
682 #if defined(TARGET_ALPHA)
683 /* We (partially) emulate OSF/1 on Alpha, which requires we
684 return a proper errno, not an unchanged brk value. */
685 return -TARGET_ENOMEM;
686 #endif
687 /* For everything else, return the previous break. */
688 return target_brk;
691 static inline abi_long copy_from_user_fdset(fd_set *fds,
692 abi_ulong target_fds_addr,
693 int n)
695 int i, nw, j, k;
696 abi_ulong b, *target_fds;
698 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
699 if (!(target_fds = lock_user(VERIFY_READ,
700 target_fds_addr,
701 sizeof(abi_ulong) * nw,
702 1)))
703 return -TARGET_EFAULT;
705 FD_ZERO(fds);
706 k = 0;
707 for (i = 0; i < nw; i++) {
708 /* grab the abi_ulong */
709 __get_user(b, &target_fds[i]);
710 for (j = 0; j < TARGET_ABI_BITS; j++) {
711 /* check the bit inside the abi_ulong */
712 if ((b >> j) & 1)
713 FD_SET(k, fds);
714 k++;
718 unlock_user(target_fds, target_fds_addr, 0);
720 return 0;
723 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
724 abi_ulong target_fds_addr,
725 int n)
727 if (target_fds_addr) {
728 if (copy_from_user_fdset(fds, target_fds_addr, n))
729 return -TARGET_EFAULT;
730 *fds_ptr = fds;
731 } else {
732 *fds_ptr = NULL;
734 return 0;
737 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
738 const fd_set *fds,
739 int n)
741 int i, nw, j, k;
742 abi_long v;
743 abi_ulong *target_fds;
745 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
746 if (!(target_fds = lock_user(VERIFY_WRITE,
747 target_fds_addr,
748 sizeof(abi_ulong) * nw,
749 0)))
750 return -TARGET_EFAULT;
752 k = 0;
753 for (i = 0; i < nw; i++) {
754 v = 0;
755 for (j = 0; j < TARGET_ABI_BITS; j++) {
756 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
757 k++;
759 __put_user(v, &target_fds[i]);
762 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
764 return 0;
767 #if defined(__alpha__)
768 #define HOST_HZ 1024
769 #else
770 #define HOST_HZ 100
771 #endif
773 static inline abi_long host_to_target_clock_t(long ticks)
775 #if HOST_HZ == TARGET_HZ
776 return ticks;
777 #else
778 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
779 #endif
782 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
783 const struct rusage *rusage)
785 struct target_rusage *target_rusage;
787 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
788 return -TARGET_EFAULT;
789 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
790 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
791 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
792 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
793 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
794 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
795 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
796 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
797 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
798 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
799 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
800 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
801 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
802 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
803 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
804 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
805 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
806 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
807 unlock_user_struct(target_rusage, target_addr, 1);
809 return 0;
812 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
814 abi_ulong target_rlim_swap;
815 rlim_t result;
817 target_rlim_swap = tswapal(target_rlim);
818 if (target_rlim_swap == TARGET_RLIM_INFINITY)
819 return RLIM_INFINITY;
821 result = target_rlim_swap;
822 if (target_rlim_swap != (rlim_t)result)
823 return RLIM_INFINITY;
825 return result;
828 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
830 abi_ulong target_rlim_swap;
831 abi_ulong result;
833 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
834 target_rlim_swap = TARGET_RLIM_INFINITY;
835 else
836 target_rlim_swap = rlim;
837 result = tswapal(target_rlim_swap);
839 return result;
842 static inline int target_to_host_resource(int code)
844 switch (code) {
845 case TARGET_RLIMIT_AS:
846 return RLIMIT_AS;
847 case TARGET_RLIMIT_CORE:
848 return RLIMIT_CORE;
849 case TARGET_RLIMIT_CPU:
850 return RLIMIT_CPU;
851 case TARGET_RLIMIT_DATA:
852 return RLIMIT_DATA;
853 case TARGET_RLIMIT_FSIZE:
854 return RLIMIT_FSIZE;
855 case TARGET_RLIMIT_LOCKS:
856 return RLIMIT_LOCKS;
857 case TARGET_RLIMIT_MEMLOCK:
858 return RLIMIT_MEMLOCK;
859 case TARGET_RLIMIT_MSGQUEUE:
860 return RLIMIT_MSGQUEUE;
861 case TARGET_RLIMIT_NICE:
862 return RLIMIT_NICE;
863 case TARGET_RLIMIT_NOFILE:
864 return RLIMIT_NOFILE;
865 case TARGET_RLIMIT_NPROC:
866 return RLIMIT_NPROC;
867 case TARGET_RLIMIT_RSS:
868 return RLIMIT_RSS;
869 case TARGET_RLIMIT_RTPRIO:
870 return RLIMIT_RTPRIO;
871 case TARGET_RLIMIT_SIGPENDING:
872 return RLIMIT_SIGPENDING;
873 case TARGET_RLIMIT_STACK:
874 return RLIMIT_STACK;
875 default:
876 return code;
880 static inline abi_long copy_from_user_timeval(struct timeval *tv,
881 abi_ulong target_tv_addr)
883 struct target_timeval *target_tv;
885 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
886 return -TARGET_EFAULT;
888 __get_user(tv->tv_sec, &target_tv->tv_sec);
889 __get_user(tv->tv_usec, &target_tv->tv_usec);
891 unlock_user_struct(target_tv, target_tv_addr, 0);
893 return 0;
896 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
897 const struct timeval *tv)
899 struct target_timeval *target_tv;
901 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
902 return -TARGET_EFAULT;
904 __put_user(tv->tv_sec, &target_tv->tv_sec);
905 __put_user(tv->tv_usec, &target_tv->tv_usec);
907 unlock_user_struct(target_tv, target_tv_addr, 1);
909 return 0;
912 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
913 #include <mqueue.h>
915 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
916 abi_ulong target_mq_attr_addr)
918 struct target_mq_attr *target_mq_attr;
920 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
921 target_mq_attr_addr, 1))
922 return -TARGET_EFAULT;
924 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
925 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
926 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
927 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
929 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
931 return 0;
934 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
935 const struct mq_attr *attr)
937 struct target_mq_attr *target_mq_attr;
939 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
940 target_mq_attr_addr, 0))
941 return -TARGET_EFAULT;
943 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
944 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
945 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
946 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
948 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
950 return 0;
952 #endif
954 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
955 /* do_select() must return target values and target errnos. */
956 static abi_long do_select(int n,
957 abi_ulong rfd_addr, abi_ulong wfd_addr,
958 abi_ulong efd_addr, abi_ulong target_tv_addr)
960 fd_set rfds, wfds, efds;
961 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
962 struct timeval tv, *tv_ptr;
963 abi_long ret;
965 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
966 if (ret) {
967 return ret;
969 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
970 if (ret) {
971 return ret;
973 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
974 if (ret) {
975 return ret;
978 if (target_tv_addr) {
979 if (copy_from_user_timeval(&tv, target_tv_addr))
980 return -TARGET_EFAULT;
981 tv_ptr = &tv;
982 } else {
983 tv_ptr = NULL;
986 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
988 if (!is_error(ret)) {
989 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
990 return -TARGET_EFAULT;
991 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
992 return -TARGET_EFAULT;
993 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
994 return -TARGET_EFAULT;
996 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
997 return -TARGET_EFAULT;
1000 return ret;
1002 #endif
1004 static abi_long do_pipe2(int host_pipe[], int flags)
1006 #ifdef CONFIG_PIPE2
1007 return pipe2(host_pipe, flags);
1008 #else
1009 return -ENOSYS;
1010 #endif
1013 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1014 int flags, int is_pipe2)
1016 int host_pipe[2];
1017 abi_long ret;
1018 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1020 if (is_error(ret))
1021 return get_errno(ret);
1023 /* Several targets have special calling conventions for the original
1024 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1025 if (!is_pipe2) {
1026 #if defined(TARGET_ALPHA)
1027 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1028 return host_pipe[0];
1029 #elif defined(TARGET_MIPS)
1030 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1031 return host_pipe[0];
1032 #elif defined(TARGET_SH4)
1033 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1034 return host_pipe[0];
1035 #elif defined(TARGET_SPARC)
1036 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1037 return host_pipe[0];
1038 #endif
1041 if (put_user_s32(host_pipe[0], pipedes)
1042 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1043 return -TARGET_EFAULT;
1044 return get_errno(ret);
1047 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1048 abi_ulong target_addr,
1049 socklen_t len)
1051 struct target_ip_mreqn *target_smreqn;
1053 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1054 if (!target_smreqn)
1055 return -TARGET_EFAULT;
1056 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1057 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1058 if (len == sizeof(struct target_ip_mreqn))
1059 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1060 unlock_user(target_smreqn, target_addr, 0);
1062 return 0;
1065 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1066 abi_ulong target_addr,
1067 socklen_t len)
1069 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1070 sa_family_t sa_family;
1071 struct target_sockaddr *target_saddr;
1073 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1074 if (!target_saddr)
1075 return -TARGET_EFAULT;
1077 sa_family = tswap16(target_saddr->sa_family);
1079 /* Oops. The caller might send a incomplete sun_path; sun_path
1080 * must be terminated by \0 (see the manual page), but
1081 * unfortunately it is quite common to specify sockaddr_un
1082 * length as "strlen(x->sun_path)" while it should be
1083 * "strlen(...) + 1". We'll fix that here if needed.
1084 * Linux kernel has a similar feature.
1087 if (sa_family == AF_UNIX) {
1088 if (len < unix_maxlen && len > 0) {
1089 char *cp = (char*)target_saddr;
1091 if ( cp[len-1] && !cp[len] )
1092 len++;
1094 if (len > unix_maxlen)
1095 len = unix_maxlen;
1098 memcpy(addr, target_saddr, len);
1099 addr->sa_family = sa_family;
1100 unlock_user(target_saddr, target_addr, 0);
1102 return 0;
1105 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1106 struct sockaddr *addr,
1107 socklen_t len)
1109 struct target_sockaddr *target_saddr;
1111 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1112 if (!target_saddr)
1113 return -TARGET_EFAULT;
1114 memcpy(target_saddr, addr, len);
1115 target_saddr->sa_family = tswap16(addr->sa_family);
1116 unlock_user(target_saddr, target_addr, len);
1118 return 0;
1121 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1122 struct target_msghdr *target_msgh)
1124 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1125 abi_long msg_controllen;
1126 abi_ulong target_cmsg_addr;
1127 struct target_cmsghdr *target_cmsg;
1128 socklen_t space = 0;
1130 msg_controllen = tswapal(target_msgh->msg_controllen);
1131 if (msg_controllen < sizeof (struct target_cmsghdr))
1132 goto the_end;
1133 target_cmsg_addr = tswapal(target_msgh->msg_control);
1134 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1135 if (!target_cmsg)
1136 return -TARGET_EFAULT;
1138 while (cmsg && target_cmsg) {
1139 void *data = CMSG_DATA(cmsg);
1140 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1142 int len = tswapal(target_cmsg->cmsg_len)
1143 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1145 space += CMSG_SPACE(len);
1146 if (space > msgh->msg_controllen) {
1147 space -= CMSG_SPACE(len);
1148 gemu_log("Host cmsg overflow\n");
1149 break;
1152 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1153 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1154 cmsg->cmsg_len = CMSG_LEN(len);
1156 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1157 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1158 memcpy(data, target_data, len);
1159 } else {
1160 int *fd = (int *)data;
1161 int *target_fd = (int *)target_data;
1162 int i, numfds = len / sizeof(int);
1164 for (i = 0; i < numfds; i++)
1165 fd[i] = tswap32(target_fd[i]);
1168 cmsg = CMSG_NXTHDR(msgh, cmsg);
1169 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1171 unlock_user(target_cmsg, target_cmsg_addr, 0);
1172 the_end:
1173 msgh->msg_controllen = space;
1174 return 0;
1177 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1178 struct msghdr *msgh)
1180 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1181 abi_long msg_controllen;
1182 abi_ulong target_cmsg_addr;
1183 struct target_cmsghdr *target_cmsg;
1184 socklen_t space = 0;
1186 msg_controllen = tswapal(target_msgh->msg_controllen);
1187 if (msg_controllen < sizeof (struct target_cmsghdr))
1188 goto the_end;
1189 target_cmsg_addr = tswapal(target_msgh->msg_control);
1190 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1191 if (!target_cmsg)
1192 return -TARGET_EFAULT;
1194 while (cmsg && target_cmsg) {
1195 void *data = CMSG_DATA(cmsg);
1196 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1198 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1200 space += TARGET_CMSG_SPACE(len);
1201 if (space > msg_controllen) {
1202 space -= TARGET_CMSG_SPACE(len);
1203 gemu_log("Target cmsg overflow\n");
1204 break;
1207 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1208 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1209 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1211 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1212 (cmsg->cmsg_type == SCM_RIGHTS)) {
1213 int *fd = (int *)data;
1214 int *target_fd = (int *)target_data;
1215 int i, numfds = len / sizeof(int);
1217 for (i = 0; i < numfds; i++)
1218 target_fd[i] = tswap32(fd[i]);
1219 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1220 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1221 (len == sizeof(struct timeval))) {
1222 /* copy struct timeval to target */
1223 struct timeval *tv = (struct timeval *)data;
1224 struct target_timeval *target_tv =
1225 (struct target_timeval *)target_data;
1227 target_tv->tv_sec = tswapal(tv->tv_sec);
1228 target_tv->tv_usec = tswapal(tv->tv_usec);
1229 } else {
1230 gemu_log("Unsupported ancillary data: %d/%d\n",
1231 cmsg->cmsg_level, cmsg->cmsg_type);
1232 memcpy(target_data, data, len);
1235 cmsg = CMSG_NXTHDR(msgh, cmsg);
1236 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1238 unlock_user(target_cmsg, target_cmsg_addr, space);
1239 the_end:
1240 target_msgh->msg_controllen = tswapal(space);
1241 return 0;
1244 /* do_setsockopt() Must return target values and target errnos. */
1245 static abi_long do_setsockopt(int sockfd, int level, int optname,
1246 abi_ulong optval_addr, socklen_t optlen)
1248 abi_long ret;
1249 int val;
1250 struct ip_mreqn *ip_mreq;
1251 struct ip_mreq_source *ip_mreq_source;
1253 switch(level) {
1254 case SOL_TCP:
1255 /* TCP options all take an 'int' value. */
1256 if (optlen < sizeof(uint32_t))
1257 return -TARGET_EINVAL;
1259 if (get_user_u32(val, optval_addr))
1260 return -TARGET_EFAULT;
1261 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1262 break;
1263 case SOL_IP:
1264 switch(optname) {
1265 case IP_TOS:
1266 case IP_TTL:
1267 case IP_HDRINCL:
1268 case IP_ROUTER_ALERT:
1269 case IP_RECVOPTS:
1270 case IP_RETOPTS:
1271 case IP_PKTINFO:
1272 case IP_MTU_DISCOVER:
1273 case IP_RECVERR:
1274 case IP_RECVTOS:
1275 #ifdef IP_FREEBIND
1276 case IP_FREEBIND:
1277 #endif
1278 case IP_MULTICAST_TTL:
1279 case IP_MULTICAST_LOOP:
1280 val = 0;
1281 if (optlen >= sizeof(uint32_t)) {
1282 if (get_user_u32(val, optval_addr))
1283 return -TARGET_EFAULT;
1284 } else if (optlen >= 1) {
1285 if (get_user_u8(val, optval_addr))
1286 return -TARGET_EFAULT;
1288 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1289 break;
1290 case IP_ADD_MEMBERSHIP:
1291 case IP_DROP_MEMBERSHIP:
1292 if (optlen < sizeof (struct target_ip_mreq) ||
1293 optlen > sizeof (struct target_ip_mreqn))
1294 return -TARGET_EINVAL;
1296 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1297 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1298 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1299 break;
1301 case IP_BLOCK_SOURCE:
1302 case IP_UNBLOCK_SOURCE:
1303 case IP_ADD_SOURCE_MEMBERSHIP:
1304 case IP_DROP_SOURCE_MEMBERSHIP:
1305 if (optlen != sizeof (struct target_ip_mreq_source))
1306 return -TARGET_EINVAL;
1308 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1309 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1310 unlock_user (ip_mreq_source, optval_addr, 0);
1311 break;
1313 default:
1314 goto unimplemented;
1316 break;
1317 case SOL_RAW:
1318 switch (optname) {
1319 case ICMP_FILTER:
1320 /* struct icmp_filter takes an u32 value */
1321 if (optlen < sizeof(uint32_t)) {
1322 return -TARGET_EINVAL;
1325 if (get_user_u32(val, optval_addr)) {
1326 return -TARGET_EFAULT;
1328 ret = get_errno(setsockopt(sockfd, level, optname,
1329 &val, sizeof(val)));
1330 break;
1332 default:
1333 goto unimplemented;
1335 break;
1336 case TARGET_SOL_SOCKET:
1337 switch (optname) {
1338 case TARGET_SO_RCVTIMEO:
1340 struct timeval tv;
1342 optname = SO_RCVTIMEO;
1344 set_timeout:
1345 if (optlen != sizeof(struct target_timeval)) {
1346 return -TARGET_EINVAL;
1349 if (copy_from_user_timeval(&tv, optval_addr)) {
1350 return -TARGET_EFAULT;
1353 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1354 &tv, sizeof(tv)));
1355 return ret;
1357 case TARGET_SO_SNDTIMEO:
1358 optname = SO_SNDTIMEO;
1359 goto set_timeout;
1360 /* Options with 'int' argument. */
1361 case TARGET_SO_DEBUG:
1362 optname = SO_DEBUG;
1363 break;
1364 case TARGET_SO_REUSEADDR:
1365 optname = SO_REUSEADDR;
1366 break;
1367 case TARGET_SO_TYPE:
1368 optname = SO_TYPE;
1369 break;
1370 case TARGET_SO_ERROR:
1371 optname = SO_ERROR;
1372 break;
1373 case TARGET_SO_DONTROUTE:
1374 optname = SO_DONTROUTE;
1375 break;
1376 case TARGET_SO_BROADCAST:
1377 optname = SO_BROADCAST;
1378 break;
1379 case TARGET_SO_SNDBUF:
1380 optname = SO_SNDBUF;
1381 break;
1382 case TARGET_SO_RCVBUF:
1383 optname = SO_RCVBUF;
1384 break;
1385 case TARGET_SO_KEEPALIVE:
1386 optname = SO_KEEPALIVE;
1387 break;
1388 case TARGET_SO_OOBINLINE:
1389 optname = SO_OOBINLINE;
1390 break;
1391 case TARGET_SO_NO_CHECK:
1392 optname = SO_NO_CHECK;
1393 break;
1394 case TARGET_SO_PRIORITY:
1395 optname = SO_PRIORITY;
1396 break;
1397 #ifdef SO_BSDCOMPAT
1398 case TARGET_SO_BSDCOMPAT:
1399 optname = SO_BSDCOMPAT;
1400 break;
1401 #endif
1402 case TARGET_SO_PASSCRED:
1403 optname = SO_PASSCRED;
1404 break;
1405 case TARGET_SO_TIMESTAMP:
1406 optname = SO_TIMESTAMP;
1407 break;
1408 case TARGET_SO_RCVLOWAT:
1409 optname = SO_RCVLOWAT;
1410 break;
1411 break;
1412 default:
1413 goto unimplemented;
1415 if (optlen < sizeof(uint32_t))
1416 return -TARGET_EINVAL;
1418 if (get_user_u32(val, optval_addr))
1419 return -TARGET_EFAULT;
1420 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1421 break;
1422 default:
1423 unimplemented:
1424 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1425 ret = -TARGET_ENOPROTOOPT;
1427 return ret;
1430 /* do_getsockopt() Must return target values and target errnos. */
1431 static abi_long do_getsockopt(int sockfd, int level, int optname,
1432 abi_ulong optval_addr, abi_ulong optlen)
1434 abi_long ret;
1435 int len, val;
1436 socklen_t lv;
1438 switch(level) {
1439 case TARGET_SOL_SOCKET:
1440 level = SOL_SOCKET;
1441 switch (optname) {
1442 /* These don't just return a single integer */
1443 case TARGET_SO_LINGER:
1444 case TARGET_SO_RCVTIMEO:
1445 case TARGET_SO_SNDTIMEO:
1446 case TARGET_SO_PEERNAME:
1447 goto unimplemented;
1448 case TARGET_SO_PEERCRED: {
1449 struct ucred cr;
1450 socklen_t crlen;
1451 struct target_ucred *tcr;
1453 if (get_user_u32(len, optlen)) {
1454 return -TARGET_EFAULT;
1456 if (len < 0) {
1457 return -TARGET_EINVAL;
1460 crlen = sizeof(cr);
1461 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1462 &cr, &crlen));
1463 if (ret < 0) {
1464 return ret;
1466 if (len > crlen) {
1467 len = crlen;
1469 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1470 return -TARGET_EFAULT;
1472 __put_user(cr.pid, &tcr->pid);
1473 __put_user(cr.uid, &tcr->uid);
1474 __put_user(cr.gid, &tcr->gid);
1475 unlock_user_struct(tcr, optval_addr, 1);
1476 if (put_user_u32(len, optlen)) {
1477 return -TARGET_EFAULT;
1479 break;
1481 /* Options with 'int' argument. */
1482 case TARGET_SO_DEBUG:
1483 optname = SO_DEBUG;
1484 goto int_case;
1485 case TARGET_SO_REUSEADDR:
1486 optname = SO_REUSEADDR;
1487 goto int_case;
1488 case TARGET_SO_TYPE:
1489 optname = SO_TYPE;
1490 goto int_case;
1491 case TARGET_SO_ERROR:
1492 optname = SO_ERROR;
1493 goto int_case;
1494 case TARGET_SO_DONTROUTE:
1495 optname = SO_DONTROUTE;
1496 goto int_case;
1497 case TARGET_SO_BROADCAST:
1498 optname = SO_BROADCAST;
1499 goto int_case;
1500 case TARGET_SO_SNDBUF:
1501 optname = SO_SNDBUF;
1502 goto int_case;
1503 case TARGET_SO_RCVBUF:
1504 optname = SO_RCVBUF;
1505 goto int_case;
1506 case TARGET_SO_KEEPALIVE:
1507 optname = SO_KEEPALIVE;
1508 goto int_case;
1509 case TARGET_SO_OOBINLINE:
1510 optname = SO_OOBINLINE;
1511 goto int_case;
1512 case TARGET_SO_NO_CHECK:
1513 optname = SO_NO_CHECK;
1514 goto int_case;
1515 case TARGET_SO_PRIORITY:
1516 optname = SO_PRIORITY;
1517 goto int_case;
1518 #ifdef SO_BSDCOMPAT
1519 case TARGET_SO_BSDCOMPAT:
1520 optname = SO_BSDCOMPAT;
1521 goto int_case;
1522 #endif
1523 case TARGET_SO_PASSCRED:
1524 optname = SO_PASSCRED;
1525 goto int_case;
1526 case TARGET_SO_TIMESTAMP:
1527 optname = SO_TIMESTAMP;
1528 goto int_case;
1529 case TARGET_SO_RCVLOWAT:
1530 optname = SO_RCVLOWAT;
1531 goto int_case;
1532 default:
1533 goto int_case;
1535 break;
1536 case SOL_TCP:
1537 /* TCP options all take an 'int' value. */
1538 int_case:
1539 if (get_user_u32(len, optlen))
1540 return -TARGET_EFAULT;
1541 if (len < 0)
1542 return -TARGET_EINVAL;
1543 lv = sizeof(lv);
1544 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1545 if (ret < 0)
1546 return ret;
1547 if (len > lv)
1548 len = lv;
1549 if (len == 4) {
1550 if (put_user_u32(val, optval_addr))
1551 return -TARGET_EFAULT;
1552 } else {
1553 if (put_user_u8(val, optval_addr))
1554 return -TARGET_EFAULT;
1556 if (put_user_u32(len, optlen))
1557 return -TARGET_EFAULT;
1558 break;
1559 case SOL_IP:
1560 switch(optname) {
1561 case IP_TOS:
1562 case IP_TTL:
1563 case IP_HDRINCL:
1564 case IP_ROUTER_ALERT:
1565 case IP_RECVOPTS:
1566 case IP_RETOPTS:
1567 case IP_PKTINFO:
1568 case IP_MTU_DISCOVER:
1569 case IP_RECVERR:
1570 case IP_RECVTOS:
1571 #ifdef IP_FREEBIND
1572 case IP_FREEBIND:
1573 #endif
1574 case IP_MULTICAST_TTL:
1575 case IP_MULTICAST_LOOP:
1576 if (get_user_u32(len, optlen))
1577 return -TARGET_EFAULT;
1578 if (len < 0)
1579 return -TARGET_EINVAL;
1580 lv = sizeof(lv);
1581 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1582 if (ret < 0)
1583 return ret;
1584 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1585 len = 1;
1586 if (put_user_u32(len, optlen)
1587 || put_user_u8(val, optval_addr))
1588 return -TARGET_EFAULT;
1589 } else {
1590 if (len > sizeof(int))
1591 len = sizeof(int);
1592 if (put_user_u32(len, optlen)
1593 || put_user_u32(val, optval_addr))
1594 return -TARGET_EFAULT;
1596 break;
1597 default:
1598 ret = -TARGET_ENOPROTOOPT;
1599 break;
1601 break;
1602 default:
1603 unimplemented:
1604 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1605 level, optname);
1606 ret = -TARGET_EOPNOTSUPP;
1607 break;
1609 return ret;
1612 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1613 int count, int copy)
1615 struct target_iovec *target_vec;
1616 struct iovec *vec;
1617 abi_ulong total_len, max_len;
1618 int i;
1620 if (count == 0) {
1621 errno = 0;
1622 return NULL;
1624 if (count < 0 || count > IOV_MAX) {
1625 errno = EINVAL;
1626 return NULL;
1629 vec = calloc(count, sizeof(struct iovec));
1630 if (vec == NULL) {
1631 errno = ENOMEM;
1632 return NULL;
1635 target_vec = lock_user(VERIFY_READ, target_addr,
1636 count * sizeof(struct target_iovec), 1);
1637 if (target_vec == NULL) {
1638 errno = EFAULT;
1639 goto fail2;
1642 /* ??? If host page size > target page size, this will result in a
1643 value larger than what we can actually support. */
1644 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1645 total_len = 0;
1647 for (i = 0; i < count; i++) {
1648 abi_ulong base = tswapal(target_vec[i].iov_base);
1649 abi_long len = tswapal(target_vec[i].iov_len);
1651 if (len < 0) {
1652 errno = EINVAL;
1653 goto fail;
1654 } else if (len == 0) {
1655 /* Zero length pointer is ignored. */
1656 vec[i].iov_base = 0;
1657 } else {
1658 vec[i].iov_base = lock_user(type, base, len, copy);
1659 if (!vec[i].iov_base) {
1660 errno = EFAULT;
1661 goto fail;
1663 if (len > max_len - total_len) {
1664 len = max_len - total_len;
1667 vec[i].iov_len = len;
1668 total_len += len;
1671 unlock_user(target_vec, target_addr, 0);
1672 return vec;
1674 fail:
1675 free(vec);
1676 fail2:
1677 unlock_user(target_vec, target_addr, 0);
1678 return NULL;
1681 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1682 int count, int copy)
1684 struct target_iovec *target_vec;
1685 int i;
1687 target_vec = lock_user(VERIFY_READ, target_addr,
1688 count * sizeof(struct target_iovec), 1);
1689 if (target_vec) {
1690 for (i = 0; i < count; i++) {
1691 abi_ulong base = tswapal(target_vec[i].iov_base);
1692 abi_long len = tswapal(target_vec[i].iov_base);
1693 if (len < 0) {
1694 break;
1696 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1698 unlock_user(target_vec, target_addr, 0);
1701 free(vec);
1704 static inline void target_to_host_sock_type(int *type)
1706 int host_type = 0;
1707 int target_type = *type;
1709 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1710 case TARGET_SOCK_DGRAM:
1711 host_type = SOCK_DGRAM;
1712 break;
1713 case TARGET_SOCK_STREAM:
1714 host_type = SOCK_STREAM;
1715 break;
1716 default:
1717 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1718 break;
1720 if (target_type & TARGET_SOCK_CLOEXEC) {
1721 host_type |= SOCK_CLOEXEC;
1723 if (target_type & TARGET_SOCK_NONBLOCK) {
1724 host_type |= SOCK_NONBLOCK;
1726 *type = host_type;
1729 /* do_socket() Must return target values and target errnos. */
1730 static abi_long do_socket(int domain, int type, int protocol)
1732 target_to_host_sock_type(&type);
1734 if (domain == PF_NETLINK)
1735 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1736 return get_errno(socket(domain, type, protocol));
1739 /* do_bind() Must return target values and target errnos. */
1740 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1741 socklen_t addrlen)
1743 void *addr;
1744 abi_long ret;
1746 if ((int)addrlen < 0) {
1747 return -TARGET_EINVAL;
1750 addr = alloca(addrlen+1);
1752 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1753 if (ret)
1754 return ret;
1756 return get_errno(bind(sockfd, addr, addrlen));
1759 /* do_connect() Must return target values and target errnos. */
1760 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1761 socklen_t addrlen)
1763 void *addr;
1764 abi_long ret;
1766 if ((int)addrlen < 0) {
1767 return -TARGET_EINVAL;
1770 addr = alloca(addrlen);
1772 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1773 if (ret)
1774 return ret;
1776 return get_errno(connect(sockfd, addr, addrlen));
1779 /* do_sendrecvmsg() Must return target values and target errnos. */
1780 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1781 int flags, int send)
1783 abi_long ret, len;
1784 struct target_msghdr *msgp;
1785 struct msghdr msg;
1786 int count;
1787 struct iovec *vec;
1788 abi_ulong target_vec;
1790 /* FIXME */
1791 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1792 msgp,
1793 target_msg,
1794 send ? 1 : 0))
1795 return -TARGET_EFAULT;
1796 if (msgp->msg_name) {
1797 msg.msg_namelen = tswap32(msgp->msg_namelen);
1798 msg.msg_name = alloca(msg.msg_namelen);
1799 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1800 msg.msg_namelen);
1801 if (ret) {
1802 goto out2;
1804 } else {
1805 msg.msg_name = NULL;
1806 msg.msg_namelen = 0;
1808 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1809 msg.msg_control = alloca(msg.msg_controllen);
1810 msg.msg_flags = tswap32(msgp->msg_flags);
1812 count = tswapal(msgp->msg_iovlen);
1813 target_vec = tswapal(msgp->msg_iov);
1814 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1815 target_vec, count, send);
1816 if (vec == NULL) {
1817 ret = -host_to_target_errno(errno);
1818 goto out2;
1820 msg.msg_iovlen = count;
1821 msg.msg_iov = vec;
1823 if (send) {
1824 ret = target_to_host_cmsg(&msg, msgp);
1825 if (ret == 0)
1826 ret = get_errno(sendmsg(fd, &msg, flags));
1827 } else {
1828 ret = get_errno(recvmsg(fd, &msg, flags));
1829 if (!is_error(ret)) {
1830 len = ret;
1831 ret = host_to_target_cmsg(msgp, &msg);
1832 if (!is_error(ret)) {
1833 msgp->msg_namelen = tswap32(msg.msg_namelen);
1834 if (msg.msg_name != NULL) {
1835 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1836 msg.msg_name, msg.msg_namelen);
1837 if (ret) {
1838 goto out;
1842 ret = len;
1847 out:
1848 unlock_iovec(vec, target_vec, count, !send);
1849 out2:
1850 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1851 return ret;
1854 /* If we don't have a system accept4() then just call accept.
1855 * The callsites to do_accept4() will ensure that they don't
1856 * pass a non-zero flags argument in this config.
1858 #ifndef CONFIG_ACCEPT4
1859 static inline int accept4(int sockfd, struct sockaddr *addr,
1860 socklen_t *addrlen, int flags)
1862 assert(flags == 0);
1863 return accept(sockfd, addr, addrlen);
1865 #endif
1867 /* do_accept4() Must return target values and target errnos. */
1868 static abi_long do_accept4(int fd, abi_ulong target_addr,
1869 abi_ulong target_addrlen_addr, int flags)
1871 socklen_t addrlen;
1872 void *addr;
1873 abi_long ret;
1875 if (target_addr == 0) {
1876 return get_errno(accept4(fd, NULL, NULL, flags));
1879 /* linux returns EINVAL if addrlen pointer is invalid */
1880 if (get_user_u32(addrlen, target_addrlen_addr))
1881 return -TARGET_EINVAL;
1883 if ((int)addrlen < 0) {
1884 return -TARGET_EINVAL;
1887 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1888 return -TARGET_EINVAL;
1890 addr = alloca(addrlen);
1892 ret = get_errno(accept4(fd, addr, &addrlen, flags));
1893 if (!is_error(ret)) {
1894 host_to_target_sockaddr(target_addr, addr, addrlen);
1895 if (put_user_u32(addrlen, target_addrlen_addr))
1896 ret = -TARGET_EFAULT;
1898 return ret;
1901 /* do_getpeername() Must return target values and target errnos. */
1902 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1903 abi_ulong target_addrlen_addr)
1905 socklen_t addrlen;
1906 void *addr;
1907 abi_long ret;
1909 if (get_user_u32(addrlen, target_addrlen_addr))
1910 return -TARGET_EFAULT;
1912 if ((int)addrlen < 0) {
1913 return -TARGET_EINVAL;
1916 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1917 return -TARGET_EFAULT;
1919 addr = alloca(addrlen);
1921 ret = get_errno(getpeername(fd, addr, &addrlen));
1922 if (!is_error(ret)) {
1923 host_to_target_sockaddr(target_addr, addr, addrlen);
1924 if (put_user_u32(addrlen, target_addrlen_addr))
1925 ret = -TARGET_EFAULT;
1927 return ret;
1930 /* do_getsockname() Must return target values and target errnos. */
1931 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1932 abi_ulong target_addrlen_addr)
1934 socklen_t addrlen;
1935 void *addr;
1936 abi_long ret;
1938 if (get_user_u32(addrlen, target_addrlen_addr))
1939 return -TARGET_EFAULT;
1941 if ((int)addrlen < 0) {
1942 return -TARGET_EINVAL;
1945 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1946 return -TARGET_EFAULT;
1948 addr = alloca(addrlen);
1950 ret = get_errno(getsockname(fd, addr, &addrlen));
1951 if (!is_error(ret)) {
1952 host_to_target_sockaddr(target_addr, addr, addrlen);
1953 if (put_user_u32(addrlen, target_addrlen_addr))
1954 ret = -TARGET_EFAULT;
1956 return ret;
1959 /* do_socketpair() Must return target values and target errnos. */
1960 static abi_long do_socketpair(int domain, int type, int protocol,
1961 abi_ulong target_tab_addr)
1963 int tab[2];
1964 abi_long ret;
1966 target_to_host_sock_type(&type);
1968 ret = get_errno(socketpair(domain, type, protocol, tab));
1969 if (!is_error(ret)) {
1970 if (put_user_s32(tab[0], target_tab_addr)
1971 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1972 ret = -TARGET_EFAULT;
1974 return ret;
1977 /* do_sendto() Must return target values and target errnos. */
1978 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1979 abi_ulong target_addr, socklen_t addrlen)
1981 void *addr;
1982 void *host_msg;
1983 abi_long ret;
1985 if ((int)addrlen < 0) {
1986 return -TARGET_EINVAL;
1989 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1990 if (!host_msg)
1991 return -TARGET_EFAULT;
1992 if (target_addr) {
1993 addr = alloca(addrlen);
1994 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1995 if (ret) {
1996 unlock_user(host_msg, msg, 0);
1997 return ret;
1999 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2000 } else {
2001 ret = get_errno(send(fd, host_msg, len, flags));
2003 unlock_user(host_msg, msg, 0);
2004 return ret;
2007 /* do_recvfrom() Must return target values and target errnos. */
2008 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2009 abi_ulong target_addr,
2010 abi_ulong target_addrlen)
2012 socklen_t addrlen;
2013 void *addr;
2014 void *host_msg;
2015 abi_long ret;
2017 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2018 if (!host_msg)
2019 return -TARGET_EFAULT;
2020 if (target_addr) {
2021 if (get_user_u32(addrlen, target_addrlen)) {
2022 ret = -TARGET_EFAULT;
2023 goto fail;
2025 if ((int)addrlen < 0) {
2026 ret = -TARGET_EINVAL;
2027 goto fail;
2029 addr = alloca(addrlen);
2030 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2031 } else {
2032 addr = NULL; /* To keep compiler quiet. */
2033 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2035 if (!is_error(ret)) {
2036 if (target_addr) {
2037 host_to_target_sockaddr(target_addr, addr, addrlen);
2038 if (put_user_u32(addrlen, target_addrlen)) {
2039 ret = -TARGET_EFAULT;
2040 goto fail;
2043 unlock_user(host_msg, msg, len);
2044 } else {
2045 fail:
2046 unlock_user(host_msg, msg, 0);
2048 return ret;
2051 #ifdef TARGET_NR_socketcall
2052 /* do_socketcall() Must return target values and target errnos. */
2053 static abi_long do_socketcall(int num, abi_ulong vptr)
2055 abi_long ret;
2056 const int n = sizeof(abi_ulong);
2058 switch(num) {
2059 case SOCKOP_socket:
2061 abi_ulong domain, type, protocol;
2063 if (get_user_ual(domain, vptr)
2064 || get_user_ual(type, vptr + n)
2065 || get_user_ual(protocol, vptr + 2 * n))
2066 return -TARGET_EFAULT;
2068 ret = do_socket(domain, type, protocol);
2070 break;
2071 case SOCKOP_bind:
2073 abi_ulong sockfd;
2074 abi_ulong target_addr;
2075 socklen_t addrlen;
2077 if (get_user_ual(sockfd, vptr)
2078 || get_user_ual(target_addr, vptr + n)
2079 || get_user_ual(addrlen, vptr + 2 * n))
2080 return -TARGET_EFAULT;
2082 ret = do_bind(sockfd, target_addr, addrlen);
2084 break;
2085 case SOCKOP_connect:
2087 abi_ulong sockfd;
2088 abi_ulong target_addr;
2089 socklen_t addrlen;
2091 if (get_user_ual(sockfd, vptr)
2092 || get_user_ual(target_addr, vptr + n)
2093 || get_user_ual(addrlen, vptr + 2 * n))
2094 return -TARGET_EFAULT;
2096 ret = do_connect(sockfd, target_addr, addrlen);
2098 break;
2099 case SOCKOP_listen:
2101 abi_ulong sockfd, backlog;
2103 if (get_user_ual(sockfd, vptr)
2104 || get_user_ual(backlog, vptr + n))
2105 return -TARGET_EFAULT;
2107 ret = get_errno(listen(sockfd, backlog));
2109 break;
2110 case SOCKOP_accept:
2112 abi_ulong sockfd;
2113 abi_ulong target_addr, target_addrlen;
2115 if (get_user_ual(sockfd, vptr)
2116 || get_user_ual(target_addr, vptr + n)
2117 || get_user_ual(target_addrlen, vptr + 2 * n))
2118 return -TARGET_EFAULT;
2120 ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
2122 break;
2123 case SOCKOP_getsockname:
2125 abi_ulong sockfd;
2126 abi_ulong target_addr, target_addrlen;
2128 if (get_user_ual(sockfd, vptr)
2129 || get_user_ual(target_addr, vptr + n)
2130 || get_user_ual(target_addrlen, vptr + 2 * n))
2131 return -TARGET_EFAULT;
2133 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2135 break;
2136 case SOCKOP_getpeername:
2138 abi_ulong sockfd;
2139 abi_ulong target_addr, target_addrlen;
2141 if (get_user_ual(sockfd, vptr)
2142 || get_user_ual(target_addr, vptr + n)
2143 || get_user_ual(target_addrlen, vptr + 2 * n))
2144 return -TARGET_EFAULT;
2146 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2148 break;
2149 case SOCKOP_socketpair:
2151 abi_ulong domain, type, protocol;
2152 abi_ulong tab;
2154 if (get_user_ual(domain, vptr)
2155 || get_user_ual(type, vptr + n)
2156 || get_user_ual(protocol, vptr + 2 * n)
2157 || get_user_ual(tab, vptr + 3 * n))
2158 return -TARGET_EFAULT;
2160 ret = do_socketpair(domain, type, protocol, tab);
2162 break;
2163 case SOCKOP_send:
2165 abi_ulong sockfd;
2166 abi_ulong msg;
2167 size_t len;
2168 abi_ulong flags;
2170 if (get_user_ual(sockfd, vptr)
2171 || get_user_ual(msg, vptr + n)
2172 || get_user_ual(len, vptr + 2 * n)
2173 || get_user_ual(flags, vptr + 3 * n))
2174 return -TARGET_EFAULT;
2176 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2178 break;
2179 case SOCKOP_recv:
2181 abi_ulong sockfd;
2182 abi_ulong msg;
2183 size_t len;
2184 abi_ulong flags;
2186 if (get_user_ual(sockfd, vptr)
2187 || get_user_ual(msg, vptr + n)
2188 || get_user_ual(len, vptr + 2 * n)
2189 || get_user_ual(flags, vptr + 3 * n))
2190 return -TARGET_EFAULT;
2192 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2194 break;
2195 case SOCKOP_sendto:
2197 abi_ulong sockfd;
2198 abi_ulong msg;
2199 size_t len;
2200 abi_ulong flags;
2201 abi_ulong addr;
2202 socklen_t addrlen;
2204 if (get_user_ual(sockfd, vptr)
2205 || get_user_ual(msg, vptr + n)
2206 || get_user_ual(len, vptr + 2 * n)
2207 || get_user_ual(flags, vptr + 3 * n)
2208 || get_user_ual(addr, vptr + 4 * n)
2209 || get_user_ual(addrlen, vptr + 5 * n))
2210 return -TARGET_EFAULT;
2212 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2214 break;
2215 case SOCKOP_recvfrom:
2217 abi_ulong sockfd;
2218 abi_ulong msg;
2219 size_t len;
2220 abi_ulong flags;
2221 abi_ulong addr;
2222 socklen_t addrlen;
2224 if (get_user_ual(sockfd, vptr)
2225 || get_user_ual(msg, vptr + n)
2226 || get_user_ual(len, vptr + 2 * n)
2227 || get_user_ual(flags, vptr + 3 * n)
2228 || get_user_ual(addr, vptr + 4 * n)
2229 || get_user_ual(addrlen, vptr + 5 * n))
2230 return -TARGET_EFAULT;
2232 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2234 break;
2235 case SOCKOP_shutdown:
2237 abi_ulong sockfd, how;
2239 if (get_user_ual(sockfd, vptr)
2240 || get_user_ual(how, vptr + n))
2241 return -TARGET_EFAULT;
2243 ret = get_errno(shutdown(sockfd, how));
2245 break;
2246 case SOCKOP_sendmsg:
2247 case SOCKOP_recvmsg:
2249 abi_ulong fd;
2250 abi_ulong target_msg;
2251 abi_ulong flags;
2253 if (get_user_ual(fd, vptr)
2254 || get_user_ual(target_msg, vptr + n)
2255 || get_user_ual(flags, vptr + 2 * n))
2256 return -TARGET_EFAULT;
2258 ret = do_sendrecvmsg(fd, target_msg, flags,
2259 (num == SOCKOP_sendmsg));
2261 break;
2262 case SOCKOP_setsockopt:
2264 abi_ulong sockfd;
2265 abi_ulong level;
2266 abi_ulong optname;
2267 abi_ulong optval;
2268 socklen_t optlen;
2270 if (get_user_ual(sockfd, vptr)
2271 || get_user_ual(level, vptr + n)
2272 || get_user_ual(optname, vptr + 2 * n)
2273 || get_user_ual(optval, vptr + 3 * n)
2274 || get_user_ual(optlen, vptr + 4 * n))
2275 return -TARGET_EFAULT;
2277 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2279 break;
2280 case SOCKOP_getsockopt:
2282 abi_ulong sockfd;
2283 abi_ulong level;
2284 abi_ulong optname;
2285 abi_ulong optval;
2286 socklen_t optlen;
2288 if (get_user_ual(sockfd, vptr)
2289 || get_user_ual(level, vptr + n)
2290 || get_user_ual(optname, vptr + 2 * n)
2291 || get_user_ual(optval, vptr + 3 * n)
2292 || get_user_ual(optlen, vptr + 4 * n))
2293 return -TARGET_EFAULT;
2295 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2297 break;
2298 default:
2299 gemu_log("Unsupported socketcall: %d\n", num);
2300 ret = -TARGET_ENOSYS;
2301 break;
2303 return ret;
2305 #endif
2307 #define N_SHM_REGIONS 32
2309 static struct shm_region {
2310 abi_ulong start;
2311 abi_ulong size;
2312 } shm_regions[N_SHM_REGIONS];
2314 struct target_ipc_perm
2316 abi_long __key;
2317 abi_ulong uid;
2318 abi_ulong gid;
2319 abi_ulong cuid;
2320 abi_ulong cgid;
2321 unsigned short int mode;
2322 unsigned short int __pad1;
2323 unsigned short int __seq;
2324 unsigned short int __pad2;
2325 abi_ulong __unused1;
2326 abi_ulong __unused2;
2329 struct target_semid_ds
2331 struct target_ipc_perm sem_perm;
2332 abi_ulong sem_otime;
2333 abi_ulong __unused1;
2334 abi_ulong sem_ctime;
2335 abi_ulong __unused2;
2336 abi_ulong sem_nsems;
2337 abi_ulong __unused3;
2338 abi_ulong __unused4;
2341 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2342 abi_ulong target_addr)
2344 struct target_ipc_perm *target_ip;
2345 struct target_semid_ds *target_sd;
2347 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2348 return -TARGET_EFAULT;
2349 target_ip = &(target_sd->sem_perm);
2350 host_ip->__key = tswapal(target_ip->__key);
2351 host_ip->uid = tswapal(target_ip->uid);
2352 host_ip->gid = tswapal(target_ip->gid);
2353 host_ip->cuid = tswapal(target_ip->cuid);
2354 host_ip->cgid = tswapal(target_ip->cgid);
2355 host_ip->mode = tswap16(target_ip->mode);
2356 unlock_user_struct(target_sd, target_addr, 0);
2357 return 0;
2360 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2361 struct ipc_perm *host_ip)
2363 struct target_ipc_perm *target_ip;
2364 struct target_semid_ds *target_sd;
2366 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2367 return -TARGET_EFAULT;
2368 target_ip = &(target_sd->sem_perm);
2369 target_ip->__key = tswapal(host_ip->__key);
2370 target_ip->uid = tswapal(host_ip->uid);
2371 target_ip->gid = tswapal(host_ip->gid);
2372 target_ip->cuid = tswapal(host_ip->cuid);
2373 target_ip->cgid = tswapal(host_ip->cgid);
2374 target_ip->mode = tswap16(host_ip->mode);
2375 unlock_user_struct(target_sd, target_addr, 1);
2376 return 0;
2379 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2380 abi_ulong target_addr)
2382 struct target_semid_ds *target_sd;
2384 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2385 return -TARGET_EFAULT;
2386 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2387 return -TARGET_EFAULT;
2388 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2389 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2390 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2391 unlock_user_struct(target_sd, target_addr, 0);
2392 return 0;
2395 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2396 struct semid_ds *host_sd)
2398 struct target_semid_ds *target_sd;
2400 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2401 return -TARGET_EFAULT;
2402 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2403 return -TARGET_EFAULT;
2404 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2405 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2406 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2407 unlock_user_struct(target_sd, target_addr, 1);
2408 return 0;
2411 struct target_seminfo {
2412 int semmap;
2413 int semmni;
2414 int semmns;
2415 int semmnu;
2416 int semmsl;
2417 int semopm;
2418 int semume;
2419 int semusz;
2420 int semvmx;
2421 int semaem;
2424 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2425 struct seminfo *host_seminfo)
2427 struct target_seminfo *target_seminfo;
2428 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2429 return -TARGET_EFAULT;
2430 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2431 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2432 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2433 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2434 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2435 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2436 __put_user(host_seminfo->semume, &target_seminfo->semume);
2437 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2438 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2439 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2440 unlock_user_struct(target_seminfo, target_addr, 1);
2441 return 0;
2444 union semun {
2445 int val;
2446 struct semid_ds *buf;
2447 unsigned short *array;
2448 struct seminfo *__buf;
2451 union target_semun {
2452 int val;
2453 abi_ulong buf;
2454 abi_ulong array;
2455 abi_ulong __buf;
2458 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2459 abi_ulong target_addr)
2461 int nsems;
2462 unsigned short *array;
2463 union semun semun;
2464 struct semid_ds semid_ds;
2465 int i, ret;
2467 semun.buf = &semid_ds;
2469 ret = semctl(semid, 0, IPC_STAT, semun);
2470 if (ret == -1)
2471 return get_errno(ret);
2473 nsems = semid_ds.sem_nsems;
2475 *host_array = malloc(nsems*sizeof(unsigned short));
2476 array = lock_user(VERIFY_READ, target_addr,
2477 nsems*sizeof(unsigned short), 1);
2478 if (!array)
2479 return -TARGET_EFAULT;
2481 for(i=0; i<nsems; i++) {
2482 __get_user((*host_array)[i], &array[i]);
2484 unlock_user(array, target_addr, 0);
2486 return 0;
2489 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2490 unsigned short **host_array)
2492 int nsems;
2493 unsigned short *array;
2494 union semun semun;
2495 struct semid_ds semid_ds;
2496 int i, ret;
2498 semun.buf = &semid_ds;
2500 ret = semctl(semid, 0, IPC_STAT, semun);
2501 if (ret == -1)
2502 return get_errno(ret);
2504 nsems = semid_ds.sem_nsems;
2506 array = lock_user(VERIFY_WRITE, target_addr,
2507 nsems*sizeof(unsigned short), 0);
2508 if (!array)
2509 return -TARGET_EFAULT;
2511 for(i=0; i<nsems; i++) {
2512 __put_user((*host_array)[i], &array[i]);
2514 free(*host_array);
2515 unlock_user(array, target_addr, 1);
2517 return 0;
2520 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2521 union target_semun target_su)
2523 union semun arg;
2524 struct semid_ds dsarg;
2525 unsigned short *array = NULL;
2526 struct seminfo seminfo;
2527 abi_long ret = -TARGET_EINVAL;
2528 abi_long err;
2529 cmd &= 0xff;
2531 switch( cmd ) {
2532 case GETVAL:
2533 case SETVAL:
2534 arg.val = tswap32(target_su.val);
2535 ret = get_errno(semctl(semid, semnum, cmd, arg));
2536 target_su.val = tswap32(arg.val);
2537 break;
2538 case GETALL:
2539 case SETALL:
2540 err = target_to_host_semarray(semid, &array, target_su.array);
2541 if (err)
2542 return err;
2543 arg.array = array;
2544 ret = get_errno(semctl(semid, semnum, cmd, arg));
2545 err = host_to_target_semarray(semid, target_su.array, &array);
2546 if (err)
2547 return err;
2548 break;
2549 case IPC_STAT:
2550 case IPC_SET:
2551 case SEM_STAT:
2552 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2553 if (err)
2554 return err;
2555 arg.buf = &dsarg;
2556 ret = get_errno(semctl(semid, semnum, cmd, arg));
2557 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2558 if (err)
2559 return err;
2560 break;
2561 case IPC_INFO:
2562 case SEM_INFO:
2563 arg.__buf = &seminfo;
2564 ret = get_errno(semctl(semid, semnum, cmd, arg));
2565 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2566 if (err)
2567 return err;
2568 break;
2569 case IPC_RMID:
2570 case GETPID:
2571 case GETNCNT:
2572 case GETZCNT:
2573 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2574 break;
2577 return ret;
2580 struct target_sembuf {
2581 unsigned short sem_num;
2582 short sem_op;
2583 short sem_flg;
2586 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2587 abi_ulong target_addr,
2588 unsigned nsops)
2590 struct target_sembuf *target_sembuf;
2591 int i;
2593 target_sembuf = lock_user(VERIFY_READ, target_addr,
2594 nsops*sizeof(struct target_sembuf), 1);
2595 if (!target_sembuf)
2596 return -TARGET_EFAULT;
2598 for(i=0; i<nsops; i++) {
2599 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2600 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2601 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2604 unlock_user(target_sembuf, target_addr, 0);
2606 return 0;
2609 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2611 struct sembuf sops[nsops];
2613 if (target_to_host_sembuf(sops, ptr, nsops))
2614 return -TARGET_EFAULT;
2616 return get_errno(semop(semid, sops, nsops));
2619 struct target_msqid_ds
2621 struct target_ipc_perm msg_perm;
2622 abi_ulong msg_stime;
2623 #if TARGET_ABI_BITS == 32
2624 abi_ulong __unused1;
2625 #endif
2626 abi_ulong msg_rtime;
2627 #if TARGET_ABI_BITS == 32
2628 abi_ulong __unused2;
2629 #endif
2630 abi_ulong msg_ctime;
2631 #if TARGET_ABI_BITS == 32
2632 abi_ulong __unused3;
2633 #endif
2634 abi_ulong __msg_cbytes;
2635 abi_ulong msg_qnum;
2636 abi_ulong msg_qbytes;
2637 abi_ulong msg_lspid;
2638 abi_ulong msg_lrpid;
2639 abi_ulong __unused4;
2640 abi_ulong __unused5;
2643 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2644 abi_ulong target_addr)
2646 struct target_msqid_ds *target_md;
2648 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2649 return -TARGET_EFAULT;
2650 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2651 return -TARGET_EFAULT;
2652 host_md->msg_stime = tswapal(target_md->msg_stime);
2653 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2654 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2655 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2656 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2657 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2658 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2659 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2660 unlock_user_struct(target_md, target_addr, 0);
2661 return 0;
2664 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2665 struct msqid_ds *host_md)
2667 struct target_msqid_ds *target_md;
2669 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2670 return -TARGET_EFAULT;
2671 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2672 return -TARGET_EFAULT;
2673 target_md->msg_stime = tswapal(host_md->msg_stime);
2674 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2675 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2676 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2677 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2678 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2679 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2680 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2681 unlock_user_struct(target_md, target_addr, 1);
2682 return 0;
2685 struct target_msginfo {
2686 int msgpool;
2687 int msgmap;
2688 int msgmax;
2689 int msgmnb;
2690 int msgmni;
2691 int msgssz;
2692 int msgtql;
2693 unsigned short int msgseg;
2696 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2697 struct msginfo *host_msginfo)
2699 struct target_msginfo *target_msginfo;
2700 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2701 return -TARGET_EFAULT;
2702 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2703 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2704 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2705 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2706 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2707 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2708 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2709 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2710 unlock_user_struct(target_msginfo, target_addr, 1);
2711 return 0;
2714 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2716 struct msqid_ds dsarg;
2717 struct msginfo msginfo;
2718 abi_long ret = -TARGET_EINVAL;
2720 cmd &= 0xff;
2722 switch (cmd) {
2723 case IPC_STAT:
2724 case IPC_SET:
2725 case MSG_STAT:
2726 if (target_to_host_msqid_ds(&dsarg,ptr))
2727 return -TARGET_EFAULT;
2728 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2729 if (host_to_target_msqid_ds(ptr,&dsarg))
2730 return -TARGET_EFAULT;
2731 break;
2732 case IPC_RMID:
2733 ret = get_errno(msgctl(msgid, cmd, NULL));
2734 break;
2735 case IPC_INFO:
2736 case MSG_INFO:
2737 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2738 if (host_to_target_msginfo(ptr, &msginfo))
2739 return -TARGET_EFAULT;
2740 break;
2743 return ret;
2746 struct target_msgbuf {
2747 abi_long mtype;
2748 char mtext[1];
2751 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2752 unsigned int msgsz, int msgflg)
2754 struct target_msgbuf *target_mb;
2755 struct msgbuf *host_mb;
2756 abi_long ret = 0;
2758 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2759 return -TARGET_EFAULT;
2760 host_mb = malloc(msgsz+sizeof(long));
2761 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2762 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2763 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2764 free(host_mb);
2765 unlock_user_struct(target_mb, msgp, 0);
2767 return ret;
2770 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2771 unsigned int msgsz, abi_long msgtyp,
2772 int msgflg)
2774 struct target_msgbuf *target_mb;
2775 char *target_mtext;
2776 struct msgbuf *host_mb;
2777 abi_long ret = 0;
2779 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2780 return -TARGET_EFAULT;
2782 host_mb = g_malloc(msgsz+sizeof(long));
2783 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2785 if (ret > 0) {
2786 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2787 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2788 if (!target_mtext) {
2789 ret = -TARGET_EFAULT;
2790 goto end;
2792 memcpy(target_mb->mtext, host_mb->mtext, ret);
2793 unlock_user(target_mtext, target_mtext_addr, ret);
2796 target_mb->mtype = tswapal(host_mb->mtype);
2798 end:
2799 if (target_mb)
2800 unlock_user_struct(target_mb, msgp, 1);
2801 g_free(host_mb);
2802 return ret;
2805 struct target_shmid_ds
2807 struct target_ipc_perm shm_perm;
2808 abi_ulong shm_segsz;
2809 abi_ulong shm_atime;
2810 #if TARGET_ABI_BITS == 32
2811 abi_ulong __unused1;
2812 #endif
2813 abi_ulong shm_dtime;
2814 #if TARGET_ABI_BITS == 32
2815 abi_ulong __unused2;
2816 #endif
2817 abi_ulong shm_ctime;
2818 #if TARGET_ABI_BITS == 32
2819 abi_ulong __unused3;
2820 #endif
2821 int shm_cpid;
2822 int shm_lpid;
2823 abi_ulong shm_nattch;
2824 unsigned long int __unused4;
2825 unsigned long int __unused5;
2828 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2829 abi_ulong target_addr)
2831 struct target_shmid_ds *target_sd;
2833 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2834 return -TARGET_EFAULT;
2835 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2836 return -TARGET_EFAULT;
2837 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2838 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2839 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2840 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2841 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2842 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2843 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2844 unlock_user_struct(target_sd, target_addr, 0);
2845 return 0;
2848 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2849 struct shmid_ds *host_sd)
2851 struct target_shmid_ds *target_sd;
2853 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2854 return -TARGET_EFAULT;
2855 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2856 return -TARGET_EFAULT;
2857 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2858 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2859 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2860 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2861 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2862 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2863 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2864 unlock_user_struct(target_sd, target_addr, 1);
2865 return 0;
2868 struct target_shminfo {
2869 abi_ulong shmmax;
2870 abi_ulong shmmin;
2871 abi_ulong shmmni;
2872 abi_ulong shmseg;
2873 abi_ulong shmall;
2876 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2877 struct shminfo *host_shminfo)
2879 struct target_shminfo *target_shminfo;
2880 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2881 return -TARGET_EFAULT;
2882 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2883 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2884 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2885 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2886 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2887 unlock_user_struct(target_shminfo, target_addr, 1);
2888 return 0;
2891 struct target_shm_info {
2892 int used_ids;
2893 abi_ulong shm_tot;
2894 abi_ulong shm_rss;
2895 abi_ulong shm_swp;
2896 abi_ulong swap_attempts;
2897 abi_ulong swap_successes;
2900 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2901 struct shm_info *host_shm_info)
2903 struct target_shm_info *target_shm_info;
2904 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2905 return -TARGET_EFAULT;
2906 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2907 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2908 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2909 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2910 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2911 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2912 unlock_user_struct(target_shm_info, target_addr, 1);
2913 return 0;
2916 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2918 struct shmid_ds dsarg;
2919 struct shminfo shminfo;
2920 struct shm_info shm_info;
2921 abi_long ret = -TARGET_EINVAL;
2923 cmd &= 0xff;
2925 switch(cmd) {
2926 case IPC_STAT:
2927 case IPC_SET:
2928 case SHM_STAT:
2929 if (target_to_host_shmid_ds(&dsarg, buf))
2930 return -TARGET_EFAULT;
2931 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2932 if (host_to_target_shmid_ds(buf, &dsarg))
2933 return -TARGET_EFAULT;
2934 break;
2935 case IPC_INFO:
2936 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2937 if (host_to_target_shminfo(buf, &shminfo))
2938 return -TARGET_EFAULT;
2939 break;
2940 case SHM_INFO:
2941 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2942 if (host_to_target_shm_info(buf, &shm_info))
2943 return -TARGET_EFAULT;
2944 break;
2945 case IPC_RMID:
2946 case SHM_LOCK:
2947 case SHM_UNLOCK:
2948 ret = get_errno(shmctl(shmid, cmd, NULL));
2949 break;
2952 return ret;
2955 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2957 abi_long raddr;
2958 void *host_raddr;
2959 struct shmid_ds shm_info;
2960 int i,ret;
2962 /* find out the length of the shared memory segment */
2963 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2964 if (is_error(ret)) {
2965 /* can't get length, bail out */
2966 return ret;
2969 mmap_lock();
2971 if (shmaddr)
2972 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2973 else {
2974 abi_ulong mmap_start;
2976 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2978 if (mmap_start == -1) {
2979 errno = ENOMEM;
2980 host_raddr = (void *)-1;
2981 } else
2982 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2985 if (host_raddr == (void *)-1) {
2986 mmap_unlock();
2987 return get_errno((long)host_raddr);
2989 raddr=h2g((unsigned long)host_raddr);
2991 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2992 PAGE_VALID | PAGE_READ |
2993 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2995 for (i = 0; i < N_SHM_REGIONS; i++) {
2996 if (shm_regions[i].start == 0) {
2997 shm_regions[i].start = raddr;
2998 shm_regions[i].size = shm_info.shm_segsz;
2999 break;
3003 mmap_unlock();
3004 return raddr;
3008 static inline abi_long do_shmdt(abi_ulong shmaddr)
3010 int i;
3012 for (i = 0; i < N_SHM_REGIONS; ++i) {
3013 if (shm_regions[i].start == shmaddr) {
3014 shm_regions[i].start = 0;
3015 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3016 break;
3020 return get_errno(shmdt(g2h(shmaddr)));
3023 #ifdef TARGET_NR_ipc
3024 /* ??? This only works with linear mappings. */
3025 /* do_ipc() must return target values and target errnos. */
3026 static abi_long do_ipc(unsigned int call, int first,
3027 int second, int third,
3028 abi_long ptr, abi_long fifth)
3030 int version;
3031 abi_long ret = 0;
3033 version = call >> 16;
3034 call &= 0xffff;
3036 switch (call) {
3037 case IPCOP_semop:
3038 ret = do_semop(first, ptr, second);
3039 break;
3041 case IPCOP_semget:
3042 ret = get_errno(semget(first, second, third));
3043 break;
3045 case IPCOP_semctl:
3046 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3047 break;
3049 case IPCOP_msgget:
3050 ret = get_errno(msgget(first, second));
3051 break;
3053 case IPCOP_msgsnd:
3054 ret = do_msgsnd(first, ptr, second, third);
3055 break;
3057 case IPCOP_msgctl:
3058 ret = do_msgctl(first, second, ptr);
3059 break;
3061 case IPCOP_msgrcv:
3062 switch (version) {
3063 case 0:
3065 struct target_ipc_kludge {
3066 abi_long msgp;
3067 abi_long msgtyp;
3068 } *tmp;
3070 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3071 ret = -TARGET_EFAULT;
3072 break;
3075 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3077 unlock_user_struct(tmp, ptr, 0);
3078 break;
3080 default:
3081 ret = do_msgrcv(first, ptr, second, fifth, third);
3083 break;
3085 case IPCOP_shmat:
3086 switch (version) {
3087 default:
3089 abi_ulong raddr;
3090 raddr = do_shmat(first, ptr, second);
3091 if (is_error(raddr))
3092 return get_errno(raddr);
3093 if (put_user_ual(raddr, third))
3094 return -TARGET_EFAULT;
3095 break;
3097 case 1:
3098 ret = -TARGET_EINVAL;
3099 break;
3101 break;
3102 case IPCOP_shmdt:
3103 ret = do_shmdt(ptr);
3104 break;
3106 case IPCOP_shmget:
3107 /* IPC_* flag values are the same on all linux platforms */
3108 ret = get_errno(shmget(first, second, third));
3109 break;
3111 /* IPC_* and SHM_* command values are the same on all linux platforms */
3112 case IPCOP_shmctl:
3113 ret = do_shmctl(first, second, third);
3114 break;
3115 default:
3116 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3117 ret = -TARGET_ENOSYS;
3118 break;
3120 return ret;
3122 #endif
3124 /* kernel structure types definitions */
3126 #define STRUCT(name, ...) STRUCT_ ## name,
3127 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3128 enum {
3129 #include "syscall_types.h"
3131 #undef STRUCT
3132 #undef STRUCT_SPECIAL
3134 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3135 #define STRUCT_SPECIAL(name)
3136 #include "syscall_types.h"
3137 #undef STRUCT
3138 #undef STRUCT_SPECIAL
3140 typedef struct IOCTLEntry IOCTLEntry;
3142 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3143 int fd, abi_long cmd, abi_long arg);
3145 struct IOCTLEntry {
3146 unsigned int target_cmd;
3147 unsigned int host_cmd;
3148 const char *name;
3149 int access;
3150 do_ioctl_fn *do_ioctl;
3151 const argtype arg_type[5];
3154 #define IOC_R 0x0001
3155 #define IOC_W 0x0002
3156 #define IOC_RW (IOC_R | IOC_W)
3158 #define MAX_STRUCT_SIZE 4096
3160 #ifdef CONFIG_FIEMAP
3161 /* So fiemap access checks don't overflow on 32 bit systems.
3162 * This is very slightly smaller than the limit imposed by
3163 * the underlying kernel.
3165 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3166 / sizeof(struct fiemap_extent))
3168 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3169 int fd, abi_long cmd, abi_long arg)
3171 /* The parameter for this ioctl is a struct fiemap followed
3172 * by an array of struct fiemap_extent whose size is set
3173 * in fiemap->fm_extent_count. The array is filled in by the
3174 * ioctl.
3176 int target_size_in, target_size_out;
3177 struct fiemap *fm;
3178 const argtype *arg_type = ie->arg_type;
3179 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3180 void *argptr, *p;
3181 abi_long ret;
3182 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3183 uint32_t outbufsz;
3184 int free_fm = 0;
3186 assert(arg_type[0] == TYPE_PTR);
3187 assert(ie->access == IOC_RW);
3188 arg_type++;
3189 target_size_in = thunk_type_size(arg_type, 0);
3190 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3191 if (!argptr) {
3192 return -TARGET_EFAULT;
3194 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3195 unlock_user(argptr, arg, 0);
3196 fm = (struct fiemap *)buf_temp;
3197 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3198 return -TARGET_EINVAL;
3201 outbufsz = sizeof (*fm) +
3202 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3204 if (outbufsz > MAX_STRUCT_SIZE) {
3205 /* We can't fit all the extents into the fixed size buffer.
3206 * Allocate one that is large enough and use it instead.
3208 fm = malloc(outbufsz);
3209 if (!fm) {
3210 return -TARGET_ENOMEM;
3212 memcpy(fm, buf_temp, sizeof(struct fiemap));
3213 free_fm = 1;
3215 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3216 if (!is_error(ret)) {
3217 target_size_out = target_size_in;
3218 /* An extent_count of 0 means we were only counting the extents
3219 * so there are no structs to copy
3221 if (fm->fm_extent_count != 0) {
3222 target_size_out += fm->fm_mapped_extents * extent_size;
3224 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3225 if (!argptr) {
3226 ret = -TARGET_EFAULT;
3227 } else {
3228 /* Convert the struct fiemap */
3229 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3230 if (fm->fm_extent_count != 0) {
3231 p = argptr + target_size_in;
3232 /* ...and then all the struct fiemap_extents */
3233 for (i = 0; i < fm->fm_mapped_extents; i++) {
3234 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3235 THUNK_TARGET);
3236 p += extent_size;
3239 unlock_user(argptr, arg, target_size_out);
3242 if (free_fm) {
3243 free(fm);
3245 return ret;
3247 #endif
3249 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3250 int fd, abi_long cmd, abi_long arg)
3252 const argtype *arg_type = ie->arg_type;
3253 int target_size;
3254 void *argptr;
3255 int ret;
3256 struct ifconf *host_ifconf;
3257 uint32_t outbufsz;
3258 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3259 int target_ifreq_size;
3260 int nb_ifreq;
3261 int free_buf = 0;
3262 int i;
3263 int target_ifc_len;
3264 abi_long target_ifc_buf;
3265 int host_ifc_len;
3266 char *host_ifc_buf;
3268 assert(arg_type[0] == TYPE_PTR);
3269 assert(ie->access == IOC_RW);
3271 arg_type++;
3272 target_size = thunk_type_size(arg_type, 0);
3274 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3275 if (!argptr)
3276 return -TARGET_EFAULT;
3277 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3278 unlock_user(argptr, arg, 0);
3280 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3281 target_ifc_len = host_ifconf->ifc_len;
3282 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3284 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3285 nb_ifreq = target_ifc_len / target_ifreq_size;
3286 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3288 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3289 if (outbufsz > MAX_STRUCT_SIZE) {
3290 /* We can't fit all the extents into the fixed size buffer.
3291 * Allocate one that is large enough and use it instead.
3293 host_ifconf = malloc(outbufsz);
3294 if (!host_ifconf) {
3295 return -TARGET_ENOMEM;
3297 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3298 free_buf = 1;
3300 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3302 host_ifconf->ifc_len = host_ifc_len;
3303 host_ifconf->ifc_buf = host_ifc_buf;
3305 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3306 if (!is_error(ret)) {
3307 /* convert host ifc_len to target ifc_len */
3309 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3310 target_ifc_len = nb_ifreq * target_ifreq_size;
3311 host_ifconf->ifc_len = target_ifc_len;
3313 /* restore target ifc_buf */
3315 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3317 /* copy struct ifconf to target user */
3319 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3320 if (!argptr)
3321 return -TARGET_EFAULT;
3322 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3323 unlock_user(argptr, arg, target_size);
3325 /* copy ifreq[] to target user */
3327 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3328 for (i = 0; i < nb_ifreq ; i++) {
3329 thunk_convert(argptr + i * target_ifreq_size,
3330 host_ifc_buf + i * sizeof(struct ifreq),
3331 ifreq_arg_type, THUNK_TARGET);
3333 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3336 if (free_buf) {
3337 free(host_ifconf);
3340 return ret;
3343 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3344 abi_long cmd, abi_long arg)
3346 void *argptr;
3347 struct dm_ioctl *host_dm;
3348 abi_long guest_data;
3349 uint32_t guest_data_size;
3350 int target_size;
3351 const argtype *arg_type = ie->arg_type;
3352 abi_long ret;
3353 void *big_buf = NULL;
3354 char *host_data;
3356 arg_type++;
3357 target_size = thunk_type_size(arg_type, 0);
3358 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3359 if (!argptr) {
3360 ret = -TARGET_EFAULT;
3361 goto out;
3363 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3364 unlock_user(argptr, arg, 0);
3366 /* buf_temp is too small, so fetch things into a bigger buffer */
3367 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3368 memcpy(big_buf, buf_temp, target_size);
3369 buf_temp = big_buf;
3370 host_dm = big_buf;
3372 guest_data = arg + host_dm->data_start;
3373 if ((guest_data - arg) < 0) {
3374 ret = -EINVAL;
3375 goto out;
3377 guest_data_size = host_dm->data_size - host_dm->data_start;
3378 host_data = (char*)host_dm + host_dm->data_start;
3380 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3381 switch (ie->host_cmd) {
3382 case DM_REMOVE_ALL:
3383 case DM_LIST_DEVICES:
3384 case DM_DEV_CREATE:
3385 case DM_DEV_REMOVE:
3386 case DM_DEV_SUSPEND:
3387 case DM_DEV_STATUS:
3388 case DM_DEV_WAIT:
3389 case DM_TABLE_STATUS:
3390 case DM_TABLE_CLEAR:
3391 case DM_TABLE_DEPS:
3392 case DM_LIST_VERSIONS:
3393 /* no input data */
3394 break;
3395 case DM_DEV_RENAME:
3396 case DM_DEV_SET_GEOMETRY:
3397 /* data contains only strings */
3398 memcpy(host_data, argptr, guest_data_size);
3399 break;
3400 case DM_TARGET_MSG:
3401 memcpy(host_data, argptr, guest_data_size);
3402 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3403 break;
3404 case DM_TABLE_LOAD:
3406 void *gspec = argptr;
3407 void *cur_data = host_data;
3408 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3409 int spec_size = thunk_type_size(arg_type, 0);
3410 int i;
3412 for (i = 0; i < host_dm->target_count; i++) {
3413 struct dm_target_spec *spec = cur_data;
3414 uint32_t next;
3415 int slen;
3417 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3418 slen = strlen((char*)gspec + spec_size) + 1;
3419 next = spec->next;
3420 spec->next = sizeof(*spec) + slen;
3421 strcpy((char*)&spec[1], gspec + spec_size);
3422 gspec += next;
3423 cur_data += spec->next;
3425 break;
3427 default:
3428 ret = -TARGET_EINVAL;
3429 goto out;
3431 unlock_user(argptr, guest_data, 0);
3433 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3434 if (!is_error(ret)) {
3435 guest_data = arg + host_dm->data_start;
3436 guest_data_size = host_dm->data_size - host_dm->data_start;
3437 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3438 switch (ie->host_cmd) {
3439 case DM_REMOVE_ALL:
3440 case DM_DEV_CREATE:
3441 case DM_DEV_REMOVE:
3442 case DM_DEV_RENAME:
3443 case DM_DEV_SUSPEND:
3444 case DM_DEV_STATUS:
3445 case DM_TABLE_LOAD:
3446 case DM_TABLE_CLEAR:
3447 case DM_TARGET_MSG:
3448 case DM_DEV_SET_GEOMETRY:
3449 /* no return data */
3450 break;
3451 case DM_LIST_DEVICES:
3453 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3454 uint32_t remaining_data = guest_data_size;
3455 void *cur_data = argptr;
3456 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3457 int nl_size = 12; /* can't use thunk_size due to alignment */
3459 while (1) {
3460 uint32_t next = nl->next;
3461 if (next) {
3462 nl->next = nl_size + (strlen(nl->name) + 1);
3464 if (remaining_data < nl->next) {
3465 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3466 break;
3468 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3469 strcpy(cur_data + nl_size, nl->name);
3470 cur_data += nl->next;
3471 remaining_data -= nl->next;
3472 if (!next) {
3473 break;
3475 nl = (void*)nl + next;
3477 break;
3479 case DM_DEV_WAIT:
3480 case DM_TABLE_STATUS:
3482 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3483 void *cur_data = argptr;
3484 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3485 int spec_size = thunk_type_size(arg_type, 0);
3486 int i;
3488 for (i = 0; i < host_dm->target_count; i++) {
3489 uint32_t next = spec->next;
3490 int slen = strlen((char*)&spec[1]) + 1;
3491 spec->next = (cur_data - argptr) + spec_size + slen;
3492 if (guest_data_size < spec->next) {
3493 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3494 break;
3496 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3497 strcpy(cur_data + spec_size, (char*)&spec[1]);
3498 cur_data = argptr + spec->next;
3499 spec = (void*)host_dm + host_dm->data_start + next;
3501 break;
3503 case DM_TABLE_DEPS:
3505 void *hdata = (void*)host_dm + host_dm->data_start;
3506 int count = *(uint32_t*)hdata;
3507 uint64_t *hdev = hdata + 8;
3508 uint64_t *gdev = argptr + 8;
3509 int i;
3511 *(uint32_t*)argptr = tswap32(count);
3512 for (i = 0; i < count; i++) {
3513 *gdev = tswap64(*hdev);
3514 gdev++;
3515 hdev++;
3517 break;
3519 case DM_LIST_VERSIONS:
3521 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3522 uint32_t remaining_data = guest_data_size;
3523 void *cur_data = argptr;
3524 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3525 int vers_size = thunk_type_size(arg_type, 0);
3527 while (1) {
3528 uint32_t next = vers->next;
3529 if (next) {
3530 vers->next = vers_size + (strlen(vers->name) + 1);
3532 if (remaining_data < vers->next) {
3533 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3534 break;
3536 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3537 strcpy(cur_data + vers_size, vers->name);
3538 cur_data += vers->next;
3539 remaining_data -= vers->next;
3540 if (!next) {
3541 break;
3543 vers = (void*)vers + next;
3545 break;
3547 default:
3548 ret = -TARGET_EINVAL;
3549 goto out;
3551 unlock_user(argptr, guest_data, guest_data_size);
3553 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3554 if (!argptr) {
3555 ret = -TARGET_EFAULT;
3556 goto out;
3558 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3559 unlock_user(argptr, arg, target_size);
3561 out:
3562 g_free(big_buf);
3563 return ret;
3566 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3567 int fd, abi_long cmd, abi_long arg)
3569 const argtype *arg_type = ie->arg_type;
3570 const StructEntry *se;
3571 const argtype *field_types;
3572 const int *dst_offsets, *src_offsets;
3573 int target_size;
3574 void *argptr;
3575 abi_ulong *target_rt_dev_ptr;
3576 unsigned long *host_rt_dev_ptr;
3577 abi_long ret;
3578 int i;
3580 assert(ie->access == IOC_W);
3581 assert(*arg_type == TYPE_PTR);
3582 arg_type++;
3583 assert(*arg_type == TYPE_STRUCT);
3584 target_size = thunk_type_size(arg_type, 0);
3585 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3586 if (!argptr) {
3587 return -TARGET_EFAULT;
3589 arg_type++;
3590 assert(*arg_type == (int)STRUCT_rtentry);
3591 se = struct_entries + *arg_type++;
3592 assert(se->convert[0] == NULL);
3593 /* convert struct here to be able to catch rt_dev string */
3594 field_types = se->field_types;
3595 dst_offsets = se->field_offsets[THUNK_HOST];
3596 src_offsets = se->field_offsets[THUNK_TARGET];
3597 for (i = 0; i < se->nb_fields; i++) {
3598 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3599 assert(*field_types == TYPE_PTRVOID);
3600 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3601 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3602 if (*target_rt_dev_ptr != 0) {
3603 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3604 tswapal(*target_rt_dev_ptr));
3605 if (!*host_rt_dev_ptr) {
3606 unlock_user(argptr, arg, 0);
3607 return -TARGET_EFAULT;
3609 } else {
3610 *host_rt_dev_ptr = 0;
3612 field_types++;
3613 continue;
3615 field_types = thunk_convert(buf_temp + dst_offsets[i],
3616 argptr + src_offsets[i],
3617 field_types, THUNK_HOST);
3619 unlock_user(argptr, arg, 0);
3621 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3622 if (*host_rt_dev_ptr != 0) {
3623 unlock_user((void *)*host_rt_dev_ptr,
3624 *target_rt_dev_ptr, 0);
3626 return ret;
3629 static IOCTLEntry ioctl_entries[] = {
3630 #define IOCTL(cmd, access, ...) \
3631 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3632 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3633 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3634 #include "ioctls.h"
3635 { 0, 0, },
3638 /* ??? Implement proper locking for ioctls. */
3639 /* do_ioctl() Must return target values and target errnos. */
3640 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3642 const IOCTLEntry *ie;
3643 const argtype *arg_type;
3644 abi_long ret;
3645 uint8_t buf_temp[MAX_STRUCT_SIZE];
3646 int target_size;
3647 void *argptr;
3649 ie = ioctl_entries;
3650 for(;;) {
3651 if (ie->target_cmd == 0) {
3652 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3653 return -TARGET_ENOSYS;
3655 if (ie->target_cmd == cmd)
3656 break;
3657 ie++;
3659 arg_type = ie->arg_type;
3660 #if defined(DEBUG)
3661 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3662 #endif
3663 if (ie->do_ioctl) {
3664 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3667 switch(arg_type[0]) {
3668 case TYPE_NULL:
3669 /* no argument */
3670 ret = get_errno(ioctl(fd, ie->host_cmd));
3671 break;
3672 case TYPE_PTRVOID:
3673 case TYPE_INT:
3674 /* int argment */
3675 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3676 break;
3677 case TYPE_PTR:
3678 arg_type++;
3679 target_size = thunk_type_size(arg_type, 0);
3680 switch(ie->access) {
3681 case IOC_R:
3682 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3683 if (!is_error(ret)) {
3684 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3685 if (!argptr)
3686 return -TARGET_EFAULT;
3687 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3688 unlock_user(argptr, arg, target_size);
3690 break;
3691 case IOC_W:
3692 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3693 if (!argptr)
3694 return -TARGET_EFAULT;
3695 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3696 unlock_user(argptr, arg, 0);
3697 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3698 break;
3699 default:
3700 case IOC_RW:
3701 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3702 if (!argptr)
3703 return -TARGET_EFAULT;
3704 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3705 unlock_user(argptr, arg, 0);
3706 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3707 if (!is_error(ret)) {
3708 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3709 if (!argptr)
3710 return -TARGET_EFAULT;
3711 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3712 unlock_user(argptr, arg, target_size);
3714 break;
3716 break;
3717 default:
3718 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3719 (long)cmd, arg_type[0]);
3720 ret = -TARGET_ENOSYS;
3721 break;
3723 return ret;
3726 static const bitmask_transtbl iflag_tbl[] = {
3727 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3728 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3729 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3730 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3731 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3732 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3733 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3734 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3735 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3736 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3737 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3738 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3739 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3740 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3741 { 0, 0, 0, 0 }
3744 static const bitmask_transtbl oflag_tbl[] = {
3745 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3746 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3747 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3748 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3749 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3750 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3751 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3752 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3753 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3754 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3755 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3756 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3757 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3758 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3759 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3760 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3761 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3762 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3763 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3764 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3765 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3766 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3767 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3768 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3769 { 0, 0, 0, 0 }
3772 static const bitmask_transtbl cflag_tbl[] = {
3773 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3774 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3775 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3776 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3777 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3778 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3779 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3780 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3781 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3782 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3783 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3784 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3785 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3786 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3787 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3788 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3789 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3790 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3791 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3792 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3793 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3794 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3795 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3796 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3797 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3798 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3799 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3800 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3801 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3802 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3803 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3804 { 0, 0, 0, 0 }
3807 static const bitmask_transtbl lflag_tbl[] = {
3808 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3809 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3810 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3811 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3812 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3813 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3814 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3815 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3816 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3817 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3818 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3819 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3820 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3821 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3822 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3823 { 0, 0, 0, 0 }
3826 static void target_to_host_termios (void *dst, const void *src)
3828 struct host_termios *host = dst;
3829 const struct target_termios *target = src;
3831 host->c_iflag =
3832 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3833 host->c_oflag =
3834 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3835 host->c_cflag =
3836 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3837 host->c_lflag =
3838 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3839 host->c_line = target->c_line;
3841 memset(host->c_cc, 0, sizeof(host->c_cc));
3842 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3843 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3844 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3845 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3846 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3847 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3848 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3849 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3850 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3851 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3852 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3853 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3854 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3855 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3856 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3857 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3858 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3861 static void host_to_target_termios (void *dst, const void *src)
3863 struct target_termios *target = dst;
3864 const struct host_termios *host = src;
3866 target->c_iflag =
3867 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3868 target->c_oflag =
3869 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3870 target->c_cflag =
3871 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3872 target->c_lflag =
3873 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3874 target->c_line = host->c_line;
3876 memset(target->c_cc, 0, sizeof(target->c_cc));
3877 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3878 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3879 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3880 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3881 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3882 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3883 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3884 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3885 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3886 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3887 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3888 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3889 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3890 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3891 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3892 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3893 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3896 static const StructEntry struct_termios_def = {
3897 .convert = { host_to_target_termios, target_to_host_termios },
3898 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3899 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3902 static bitmask_transtbl mmap_flags_tbl[] = {
3903 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3904 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3905 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3906 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3907 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3908 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3909 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3910 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3911 { 0, 0, 0, 0 }
3914 #if defined(TARGET_I386)
3916 /* NOTE: there is really one LDT for all the threads */
3917 static uint8_t *ldt_table;
3919 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3921 int size;
3922 void *p;
3924 if (!ldt_table)
3925 return 0;
3926 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3927 if (size > bytecount)
3928 size = bytecount;
3929 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3930 if (!p)
3931 return -TARGET_EFAULT;
3932 /* ??? Should this by byteswapped? */
3933 memcpy(p, ldt_table, size);
3934 unlock_user(p, ptr, size);
3935 return size;
3938 /* XXX: add locking support */
3939 static abi_long write_ldt(CPUX86State *env,
3940 abi_ulong ptr, unsigned long bytecount, int oldmode)
3942 struct target_modify_ldt_ldt_s ldt_info;
3943 struct target_modify_ldt_ldt_s *target_ldt_info;
3944 int seg_32bit, contents, read_exec_only, limit_in_pages;
3945 int seg_not_present, useable, lm;
3946 uint32_t *lp, entry_1, entry_2;
3948 if (bytecount != sizeof(ldt_info))
3949 return -TARGET_EINVAL;
3950 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3951 return -TARGET_EFAULT;
3952 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3953 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3954 ldt_info.limit = tswap32(target_ldt_info->limit);
3955 ldt_info.flags = tswap32(target_ldt_info->flags);
3956 unlock_user_struct(target_ldt_info, ptr, 0);
3958 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3959 return -TARGET_EINVAL;
3960 seg_32bit = ldt_info.flags & 1;
3961 contents = (ldt_info.flags >> 1) & 3;
3962 read_exec_only = (ldt_info.flags >> 3) & 1;
3963 limit_in_pages = (ldt_info.flags >> 4) & 1;
3964 seg_not_present = (ldt_info.flags >> 5) & 1;
3965 useable = (ldt_info.flags >> 6) & 1;
3966 #ifdef TARGET_ABI32
3967 lm = 0;
3968 #else
3969 lm = (ldt_info.flags >> 7) & 1;
3970 #endif
3971 if (contents == 3) {
3972 if (oldmode)
3973 return -TARGET_EINVAL;
3974 if (seg_not_present == 0)
3975 return -TARGET_EINVAL;
3977 /* allocate the LDT */
3978 if (!ldt_table) {
3979 env->ldt.base = target_mmap(0,
3980 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3981 PROT_READ|PROT_WRITE,
3982 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3983 if (env->ldt.base == -1)
3984 return -TARGET_ENOMEM;
3985 memset(g2h(env->ldt.base), 0,
3986 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3987 env->ldt.limit = 0xffff;
3988 ldt_table = g2h(env->ldt.base);
3991 /* NOTE: same code as Linux kernel */
3992 /* Allow LDTs to be cleared by the user. */
3993 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3994 if (oldmode ||
3995 (contents == 0 &&
3996 read_exec_only == 1 &&
3997 seg_32bit == 0 &&
3998 limit_in_pages == 0 &&
3999 seg_not_present == 1 &&
4000 useable == 0 )) {
4001 entry_1 = 0;
4002 entry_2 = 0;
4003 goto install;
4007 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4008 (ldt_info.limit & 0x0ffff);
4009 entry_2 = (ldt_info.base_addr & 0xff000000) |
4010 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4011 (ldt_info.limit & 0xf0000) |
4012 ((read_exec_only ^ 1) << 9) |
4013 (contents << 10) |
4014 ((seg_not_present ^ 1) << 15) |
4015 (seg_32bit << 22) |
4016 (limit_in_pages << 23) |
4017 (lm << 21) |
4018 0x7000;
4019 if (!oldmode)
4020 entry_2 |= (useable << 20);
4022 /* Install the new entry ... */
4023 install:
4024 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4025 lp[0] = tswap32(entry_1);
4026 lp[1] = tswap32(entry_2);
4027 return 0;
4030 /* specific and weird i386 syscalls */
4031 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4032 unsigned long bytecount)
4034 abi_long ret;
4036 switch (func) {
4037 case 0:
4038 ret = read_ldt(ptr, bytecount);
4039 break;
4040 case 1:
4041 ret = write_ldt(env, ptr, bytecount, 1);
4042 break;
4043 case 0x11:
4044 ret = write_ldt(env, ptr, bytecount, 0);
4045 break;
4046 default:
4047 ret = -TARGET_ENOSYS;
4048 break;
4050 return ret;
4053 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4054 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4056 uint64_t *gdt_table = g2h(env->gdt.base);
4057 struct target_modify_ldt_ldt_s ldt_info;
4058 struct target_modify_ldt_ldt_s *target_ldt_info;
4059 int seg_32bit, contents, read_exec_only, limit_in_pages;
4060 int seg_not_present, useable, lm;
4061 uint32_t *lp, entry_1, entry_2;
4062 int i;
4064 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4065 if (!target_ldt_info)
4066 return -TARGET_EFAULT;
4067 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4068 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4069 ldt_info.limit = tswap32(target_ldt_info->limit);
4070 ldt_info.flags = tswap32(target_ldt_info->flags);
4071 if (ldt_info.entry_number == -1) {
4072 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4073 if (gdt_table[i] == 0) {
4074 ldt_info.entry_number = i;
4075 target_ldt_info->entry_number = tswap32(i);
4076 break;
4080 unlock_user_struct(target_ldt_info, ptr, 1);
4082 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4083 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4084 return -TARGET_EINVAL;
4085 seg_32bit = ldt_info.flags & 1;
4086 contents = (ldt_info.flags >> 1) & 3;
4087 read_exec_only = (ldt_info.flags >> 3) & 1;
4088 limit_in_pages = (ldt_info.flags >> 4) & 1;
4089 seg_not_present = (ldt_info.flags >> 5) & 1;
4090 useable = (ldt_info.flags >> 6) & 1;
4091 #ifdef TARGET_ABI32
4092 lm = 0;
4093 #else
4094 lm = (ldt_info.flags >> 7) & 1;
4095 #endif
4097 if (contents == 3) {
4098 if (seg_not_present == 0)
4099 return -TARGET_EINVAL;
4102 /* NOTE: same code as Linux kernel */
4103 /* Allow LDTs to be cleared by the user. */
4104 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4105 if ((contents == 0 &&
4106 read_exec_only == 1 &&
4107 seg_32bit == 0 &&
4108 limit_in_pages == 0 &&
4109 seg_not_present == 1 &&
4110 useable == 0 )) {
4111 entry_1 = 0;
4112 entry_2 = 0;
4113 goto install;
4117 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4118 (ldt_info.limit & 0x0ffff);
4119 entry_2 = (ldt_info.base_addr & 0xff000000) |
4120 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4121 (ldt_info.limit & 0xf0000) |
4122 ((read_exec_only ^ 1) << 9) |
4123 (contents << 10) |
4124 ((seg_not_present ^ 1) << 15) |
4125 (seg_32bit << 22) |
4126 (limit_in_pages << 23) |
4127 (useable << 20) |
4128 (lm << 21) |
4129 0x7000;
4131 /* Install the new entry ... */
4132 install:
4133 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4134 lp[0] = tswap32(entry_1);
4135 lp[1] = tswap32(entry_2);
4136 return 0;
4139 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4141 struct target_modify_ldt_ldt_s *target_ldt_info;
4142 uint64_t *gdt_table = g2h(env->gdt.base);
4143 uint32_t base_addr, limit, flags;
4144 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4145 int seg_not_present, useable, lm;
4146 uint32_t *lp, entry_1, entry_2;
4148 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4149 if (!target_ldt_info)
4150 return -TARGET_EFAULT;
4151 idx = tswap32(target_ldt_info->entry_number);
4152 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4153 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4154 unlock_user_struct(target_ldt_info, ptr, 1);
4155 return -TARGET_EINVAL;
4157 lp = (uint32_t *)(gdt_table + idx);
4158 entry_1 = tswap32(lp[0]);
4159 entry_2 = tswap32(lp[1]);
4161 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4162 contents = (entry_2 >> 10) & 3;
4163 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4164 seg_32bit = (entry_2 >> 22) & 1;
4165 limit_in_pages = (entry_2 >> 23) & 1;
4166 useable = (entry_2 >> 20) & 1;
4167 #ifdef TARGET_ABI32
4168 lm = 0;
4169 #else
4170 lm = (entry_2 >> 21) & 1;
4171 #endif
4172 flags = (seg_32bit << 0) | (contents << 1) |
4173 (read_exec_only << 3) | (limit_in_pages << 4) |
4174 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4175 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4176 base_addr = (entry_1 >> 16) |
4177 (entry_2 & 0xff000000) |
4178 ((entry_2 & 0xff) << 16);
4179 target_ldt_info->base_addr = tswapal(base_addr);
4180 target_ldt_info->limit = tswap32(limit);
4181 target_ldt_info->flags = tswap32(flags);
4182 unlock_user_struct(target_ldt_info, ptr, 1);
4183 return 0;
4185 #endif /* TARGET_I386 && TARGET_ABI32 */
4187 #ifndef TARGET_ABI32
4188 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4190 abi_long ret = 0;
4191 abi_ulong val;
4192 int idx;
4194 switch(code) {
4195 case TARGET_ARCH_SET_GS:
4196 case TARGET_ARCH_SET_FS:
4197 if (code == TARGET_ARCH_SET_GS)
4198 idx = R_GS;
4199 else
4200 idx = R_FS;
4201 cpu_x86_load_seg(env, idx, 0);
4202 env->segs[idx].base = addr;
4203 break;
4204 case TARGET_ARCH_GET_GS:
4205 case TARGET_ARCH_GET_FS:
4206 if (code == TARGET_ARCH_GET_GS)
4207 idx = R_GS;
4208 else
4209 idx = R_FS;
4210 val = env->segs[idx].base;
4211 if (put_user(val, addr, abi_ulong))
4212 ret = -TARGET_EFAULT;
4213 break;
4214 default:
4215 ret = -TARGET_EINVAL;
4216 break;
4218 return ret;
4220 #endif
4222 #endif /* defined(TARGET_I386) */
4224 #define NEW_STACK_SIZE 0x40000
4227 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4228 typedef struct {
4229 CPUArchState *env;
4230 pthread_mutex_t mutex;
4231 pthread_cond_t cond;
4232 pthread_t thread;
4233 uint32_t tid;
4234 abi_ulong child_tidptr;
4235 abi_ulong parent_tidptr;
4236 sigset_t sigmask;
4237 } new_thread_info;
4239 static void *clone_func(void *arg)
4241 new_thread_info *info = arg;
4242 CPUArchState *env;
4243 CPUState *cpu;
4244 TaskState *ts;
4246 env = info->env;
4247 cpu = ENV_GET_CPU(env);
4248 thread_cpu = cpu;
4249 ts = (TaskState *)env->opaque;
4250 info->tid = gettid();
4251 cpu->host_tid = info->tid;
4252 task_settid(ts);
4253 if (info->child_tidptr)
4254 put_user_u32(info->tid, info->child_tidptr);
4255 if (info->parent_tidptr)
4256 put_user_u32(info->tid, info->parent_tidptr);
4257 /* Enable signals. */
4258 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4259 /* Signal to the parent that we're ready. */
4260 pthread_mutex_lock(&info->mutex);
4261 pthread_cond_broadcast(&info->cond);
4262 pthread_mutex_unlock(&info->mutex);
4263 /* Wait until the parent has finshed initializing the tls state. */
4264 pthread_mutex_lock(&clone_lock);
4265 pthread_mutex_unlock(&clone_lock);
4266 cpu_loop(env);
4267 /* never exits */
4268 return NULL;
4271 /* do_fork() Must return host values and target errnos (unlike most
4272 do_*() functions). */
4273 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4274 abi_ulong parent_tidptr, target_ulong newtls,
4275 abi_ulong child_tidptr)
4277 int ret;
4278 TaskState *ts;
4279 CPUArchState *new_env;
4280 unsigned int nptl_flags;
4281 sigset_t sigmask;
4283 /* Emulate vfork() with fork() */
4284 if (flags & CLONE_VFORK)
4285 flags &= ~(CLONE_VFORK | CLONE_VM);
4287 if (flags & CLONE_VM) {
4288 TaskState *parent_ts = (TaskState *)env->opaque;
4289 new_thread_info info;
4290 pthread_attr_t attr;
4292 ts = g_malloc0(sizeof(TaskState));
4293 init_task_state(ts);
4294 /* we create a new CPU instance. */
4295 new_env = cpu_copy(env);
4296 /* Init regs that differ from the parent. */
4297 cpu_clone_regs(new_env, newsp);
4298 new_env->opaque = ts;
4299 ts->bprm = parent_ts->bprm;
4300 ts->info = parent_ts->info;
4301 nptl_flags = flags;
4302 flags &= ~CLONE_NPTL_FLAGS2;
4304 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4305 ts->child_tidptr = child_tidptr;
4308 if (nptl_flags & CLONE_SETTLS)
4309 cpu_set_tls (new_env, newtls);
4311 /* Grab a mutex so that thread setup appears atomic. */
4312 pthread_mutex_lock(&clone_lock);
4314 memset(&info, 0, sizeof(info));
4315 pthread_mutex_init(&info.mutex, NULL);
4316 pthread_mutex_lock(&info.mutex);
4317 pthread_cond_init(&info.cond, NULL);
4318 info.env = new_env;
4319 if (nptl_flags & CLONE_CHILD_SETTID)
4320 info.child_tidptr = child_tidptr;
4321 if (nptl_flags & CLONE_PARENT_SETTID)
4322 info.parent_tidptr = parent_tidptr;
4324 ret = pthread_attr_init(&attr);
4325 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4326 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4327 /* It is not safe to deliver signals until the child has finished
4328 initializing, so temporarily block all signals. */
4329 sigfillset(&sigmask);
4330 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4332 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4333 /* TODO: Free new CPU state if thread creation failed. */
4335 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4336 pthread_attr_destroy(&attr);
4337 if (ret == 0) {
4338 /* Wait for the child to initialize. */
4339 pthread_cond_wait(&info.cond, &info.mutex);
4340 ret = info.tid;
4341 if (flags & CLONE_PARENT_SETTID)
4342 put_user_u32(ret, parent_tidptr);
4343 } else {
4344 ret = -1;
4346 pthread_mutex_unlock(&info.mutex);
4347 pthread_cond_destroy(&info.cond);
4348 pthread_mutex_destroy(&info.mutex);
4349 pthread_mutex_unlock(&clone_lock);
4350 } else {
4351 /* if no CLONE_VM, we consider it is a fork */
4352 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4353 return -EINVAL;
4354 fork_start();
4355 ret = fork();
4356 if (ret == 0) {
4357 /* Child Process. */
4358 cpu_clone_regs(env, newsp);
4359 fork_end(1);
4360 /* There is a race condition here. The parent process could
4361 theoretically read the TID in the child process before the child
4362 tid is set. This would require using either ptrace
4363 (not implemented) or having *_tidptr to point at a shared memory
4364 mapping. We can't repeat the spinlock hack used above because
4365 the child process gets its own copy of the lock. */
4366 if (flags & CLONE_CHILD_SETTID)
4367 put_user_u32(gettid(), child_tidptr);
4368 if (flags & CLONE_PARENT_SETTID)
4369 put_user_u32(gettid(), parent_tidptr);
4370 ts = (TaskState *)env->opaque;
4371 if (flags & CLONE_SETTLS)
4372 cpu_set_tls (env, newtls);
4373 if (flags & CLONE_CHILD_CLEARTID)
4374 ts->child_tidptr = child_tidptr;
4375 } else {
4376 fork_end(0);
4379 return ret;
4382 /* warning : doesn't handle linux specific flags... */
4383 static int target_to_host_fcntl_cmd(int cmd)
4385 switch(cmd) {
4386 case TARGET_F_DUPFD:
4387 case TARGET_F_GETFD:
4388 case TARGET_F_SETFD:
4389 case TARGET_F_GETFL:
4390 case TARGET_F_SETFL:
4391 return cmd;
4392 case TARGET_F_GETLK:
4393 return F_GETLK;
4394 case TARGET_F_SETLK:
4395 return F_SETLK;
4396 case TARGET_F_SETLKW:
4397 return F_SETLKW;
4398 case TARGET_F_GETOWN:
4399 return F_GETOWN;
4400 case TARGET_F_SETOWN:
4401 return F_SETOWN;
4402 case TARGET_F_GETSIG:
4403 return F_GETSIG;
4404 case TARGET_F_SETSIG:
4405 return F_SETSIG;
4406 #if TARGET_ABI_BITS == 32
4407 case TARGET_F_GETLK64:
4408 return F_GETLK64;
4409 case TARGET_F_SETLK64:
4410 return F_SETLK64;
4411 case TARGET_F_SETLKW64:
4412 return F_SETLKW64;
4413 #endif
4414 case TARGET_F_SETLEASE:
4415 return F_SETLEASE;
4416 case TARGET_F_GETLEASE:
4417 return F_GETLEASE;
4418 #ifdef F_DUPFD_CLOEXEC
4419 case TARGET_F_DUPFD_CLOEXEC:
4420 return F_DUPFD_CLOEXEC;
4421 #endif
4422 case TARGET_F_NOTIFY:
4423 return F_NOTIFY;
4424 default:
4425 return -TARGET_EINVAL;
4427 return -TARGET_EINVAL;
4430 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4431 static const bitmask_transtbl flock_tbl[] = {
4432 TRANSTBL_CONVERT(F_RDLCK),
4433 TRANSTBL_CONVERT(F_WRLCK),
4434 TRANSTBL_CONVERT(F_UNLCK),
4435 TRANSTBL_CONVERT(F_EXLCK),
4436 TRANSTBL_CONVERT(F_SHLCK),
4437 { 0, 0, 0, 0 }
4440 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4442 struct flock fl;
4443 struct target_flock *target_fl;
4444 struct flock64 fl64;
4445 struct target_flock64 *target_fl64;
4446 abi_long ret;
4447 int host_cmd = target_to_host_fcntl_cmd(cmd);
4449 if (host_cmd == -TARGET_EINVAL)
4450 return host_cmd;
4452 switch(cmd) {
4453 case TARGET_F_GETLK:
4454 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4455 return -TARGET_EFAULT;
4456 fl.l_type =
4457 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4458 fl.l_whence = tswap16(target_fl->l_whence);
4459 fl.l_start = tswapal(target_fl->l_start);
4460 fl.l_len = tswapal(target_fl->l_len);
4461 fl.l_pid = tswap32(target_fl->l_pid);
4462 unlock_user_struct(target_fl, arg, 0);
4463 ret = get_errno(fcntl(fd, host_cmd, &fl));
4464 if (ret == 0) {
4465 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4466 return -TARGET_EFAULT;
4467 target_fl->l_type =
4468 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4469 target_fl->l_whence = tswap16(fl.l_whence);
4470 target_fl->l_start = tswapal(fl.l_start);
4471 target_fl->l_len = tswapal(fl.l_len);
4472 target_fl->l_pid = tswap32(fl.l_pid);
4473 unlock_user_struct(target_fl, arg, 1);
4475 break;
4477 case TARGET_F_SETLK:
4478 case TARGET_F_SETLKW:
4479 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4480 return -TARGET_EFAULT;
4481 fl.l_type =
4482 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4483 fl.l_whence = tswap16(target_fl->l_whence);
4484 fl.l_start = tswapal(target_fl->l_start);
4485 fl.l_len = tswapal(target_fl->l_len);
4486 fl.l_pid = tswap32(target_fl->l_pid);
4487 unlock_user_struct(target_fl, arg, 0);
4488 ret = get_errno(fcntl(fd, host_cmd, &fl));
4489 break;
4491 case TARGET_F_GETLK64:
4492 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4493 return -TARGET_EFAULT;
4494 fl64.l_type =
4495 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4496 fl64.l_whence = tswap16(target_fl64->l_whence);
4497 fl64.l_start = tswap64(target_fl64->l_start);
4498 fl64.l_len = tswap64(target_fl64->l_len);
4499 fl64.l_pid = tswap32(target_fl64->l_pid);
4500 unlock_user_struct(target_fl64, arg, 0);
4501 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4502 if (ret == 0) {
4503 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4504 return -TARGET_EFAULT;
4505 target_fl64->l_type =
4506 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4507 target_fl64->l_whence = tswap16(fl64.l_whence);
4508 target_fl64->l_start = tswap64(fl64.l_start);
4509 target_fl64->l_len = tswap64(fl64.l_len);
4510 target_fl64->l_pid = tswap32(fl64.l_pid);
4511 unlock_user_struct(target_fl64, arg, 1);
4513 break;
4514 case TARGET_F_SETLK64:
4515 case TARGET_F_SETLKW64:
4516 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4517 return -TARGET_EFAULT;
4518 fl64.l_type =
4519 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4520 fl64.l_whence = tswap16(target_fl64->l_whence);
4521 fl64.l_start = tswap64(target_fl64->l_start);
4522 fl64.l_len = tswap64(target_fl64->l_len);
4523 fl64.l_pid = tswap32(target_fl64->l_pid);
4524 unlock_user_struct(target_fl64, arg, 0);
4525 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4526 break;
4528 case TARGET_F_GETFL:
4529 ret = get_errno(fcntl(fd, host_cmd, arg));
4530 if (ret >= 0) {
4531 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4533 break;
4535 case TARGET_F_SETFL:
4536 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4537 break;
4539 case TARGET_F_SETOWN:
4540 case TARGET_F_GETOWN:
4541 case TARGET_F_SETSIG:
4542 case TARGET_F_GETSIG:
4543 case TARGET_F_SETLEASE:
4544 case TARGET_F_GETLEASE:
4545 ret = get_errno(fcntl(fd, host_cmd, arg));
4546 break;
4548 default:
4549 ret = get_errno(fcntl(fd, cmd, arg));
4550 break;
4552 return ret;
4555 #ifdef USE_UID16
4557 static inline int high2lowuid(int uid)
4559 if (uid > 65535)
4560 return 65534;
4561 else
4562 return uid;
4565 static inline int high2lowgid(int gid)
4567 if (gid > 65535)
4568 return 65534;
4569 else
4570 return gid;
4573 static inline int low2highuid(int uid)
4575 if ((int16_t)uid == -1)
4576 return -1;
4577 else
4578 return uid;
4581 static inline int low2highgid(int gid)
4583 if ((int16_t)gid == -1)
4584 return -1;
4585 else
4586 return gid;
4588 static inline int tswapid(int id)
4590 return tswap16(id);
4592 #else /* !USE_UID16 */
4593 static inline int high2lowuid(int uid)
4595 return uid;
4597 static inline int high2lowgid(int gid)
4599 return gid;
4601 static inline int low2highuid(int uid)
4603 return uid;
4605 static inline int low2highgid(int gid)
4607 return gid;
4609 static inline int tswapid(int id)
4611 return tswap32(id);
4613 #endif /* USE_UID16 */
4615 void syscall_init(void)
4617 IOCTLEntry *ie;
4618 const argtype *arg_type;
4619 int size;
4620 int i;
4622 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4623 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4624 #include "syscall_types.h"
4625 #undef STRUCT
4626 #undef STRUCT_SPECIAL
4628 /* Build target_to_host_errno_table[] table from
4629 * host_to_target_errno_table[]. */
4630 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4631 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4634 /* we patch the ioctl size if necessary. We rely on the fact that
4635 no ioctl has all the bits at '1' in the size field */
4636 ie = ioctl_entries;
4637 while (ie->target_cmd != 0) {
4638 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4639 TARGET_IOC_SIZEMASK) {
4640 arg_type = ie->arg_type;
4641 if (arg_type[0] != TYPE_PTR) {
4642 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4643 ie->target_cmd);
4644 exit(1);
4646 arg_type++;
4647 size = thunk_type_size(arg_type, 0);
4648 ie->target_cmd = (ie->target_cmd &
4649 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4650 (size << TARGET_IOC_SIZESHIFT);
4653 /* automatic consistency check if same arch */
4654 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4655 (defined(__x86_64__) && defined(TARGET_X86_64))
4656 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4657 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4658 ie->name, ie->target_cmd, ie->host_cmd);
4660 #endif
4661 ie++;
4665 #if TARGET_ABI_BITS == 32
4666 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4668 #ifdef TARGET_WORDS_BIGENDIAN
4669 return ((uint64_t)word0 << 32) | word1;
4670 #else
4671 return ((uint64_t)word1 << 32) | word0;
4672 #endif
4674 #else /* TARGET_ABI_BITS == 32 */
4675 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4677 return word0;
4679 #endif /* TARGET_ABI_BITS != 32 */
4681 #ifdef TARGET_NR_truncate64
4682 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4683 abi_long arg2,
4684 abi_long arg3,
4685 abi_long arg4)
4687 if (regpairs_aligned(cpu_env)) {
4688 arg2 = arg3;
4689 arg3 = arg4;
4691 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4693 #endif
4695 #ifdef TARGET_NR_ftruncate64
4696 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4697 abi_long arg2,
4698 abi_long arg3,
4699 abi_long arg4)
4701 if (regpairs_aligned(cpu_env)) {
4702 arg2 = arg3;
4703 arg3 = arg4;
4705 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4707 #endif
4709 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4710 abi_ulong target_addr)
4712 struct target_timespec *target_ts;
4714 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4715 return -TARGET_EFAULT;
4716 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4717 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4718 unlock_user_struct(target_ts, target_addr, 0);
4719 return 0;
4722 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4723 struct timespec *host_ts)
4725 struct target_timespec *target_ts;
4727 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4728 return -TARGET_EFAULT;
4729 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4730 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4731 unlock_user_struct(target_ts, target_addr, 1);
4732 return 0;
4735 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4736 static inline abi_long host_to_target_stat64(void *cpu_env,
4737 abi_ulong target_addr,
4738 struct stat *host_st)
4740 #ifdef TARGET_ARM
4741 if (((CPUARMState *)cpu_env)->eabi) {
4742 struct target_eabi_stat64 *target_st;
4744 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4745 return -TARGET_EFAULT;
4746 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4747 __put_user(host_st->st_dev, &target_st->st_dev);
4748 __put_user(host_st->st_ino, &target_st->st_ino);
4749 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4750 __put_user(host_st->st_ino, &target_st->__st_ino);
4751 #endif
4752 __put_user(host_st->st_mode, &target_st->st_mode);
4753 __put_user(host_st->st_nlink, &target_st->st_nlink);
4754 __put_user(host_st->st_uid, &target_st->st_uid);
4755 __put_user(host_st->st_gid, &target_st->st_gid);
4756 __put_user(host_st->st_rdev, &target_st->st_rdev);
4757 __put_user(host_st->st_size, &target_st->st_size);
4758 __put_user(host_st->st_blksize, &target_st->st_blksize);
4759 __put_user(host_st->st_blocks, &target_st->st_blocks);
4760 __put_user(host_st->st_atime, &target_st->target_st_atime);
4761 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4762 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4763 unlock_user_struct(target_st, target_addr, 1);
4764 } else
4765 #endif
4767 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4768 struct target_stat *target_st;
4769 #else
4770 struct target_stat64 *target_st;
4771 #endif
4773 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4774 return -TARGET_EFAULT;
4775 memset(target_st, 0, sizeof(*target_st));
4776 __put_user(host_st->st_dev, &target_st->st_dev);
4777 __put_user(host_st->st_ino, &target_st->st_ino);
4778 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4779 __put_user(host_st->st_ino, &target_st->__st_ino);
4780 #endif
4781 __put_user(host_st->st_mode, &target_st->st_mode);
4782 __put_user(host_st->st_nlink, &target_st->st_nlink);
4783 __put_user(host_st->st_uid, &target_st->st_uid);
4784 __put_user(host_st->st_gid, &target_st->st_gid);
4785 __put_user(host_st->st_rdev, &target_st->st_rdev);
4786 /* XXX: better use of kernel struct */
4787 __put_user(host_st->st_size, &target_st->st_size);
4788 __put_user(host_st->st_blksize, &target_st->st_blksize);
4789 __put_user(host_st->st_blocks, &target_st->st_blocks);
4790 __put_user(host_st->st_atime, &target_st->target_st_atime);
4791 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4792 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4793 unlock_user_struct(target_st, target_addr, 1);
4796 return 0;
4798 #endif
4800 /* ??? Using host futex calls even when target atomic operations
4801 are not really atomic probably breaks things. However implementing
4802 futexes locally would make futexes shared between multiple processes
4803 tricky. However they're probably useless because guest atomic
4804 operations won't work either. */
4805 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4806 target_ulong uaddr2, int val3)
4808 struct timespec ts, *pts;
4809 int base_op;
4811 /* ??? We assume FUTEX_* constants are the same on both host
4812 and target. */
4813 #ifdef FUTEX_CMD_MASK
4814 base_op = op & FUTEX_CMD_MASK;
4815 #else
4816 base_op = op;
4817 #endif
4818 switch (base_op) {
4819 case FUTEX_WAIT:
4820 case FUTEX_WAIT_BITSET:
4821 if (timeout) {
4822 pts = &ts;
4823 target_to_host_timespec(pts, timeout);
4824 } else {
4825 pts = NULL;
4827 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4828 pts, NULL, val3));
4829 case FUTEX_WAKE:
4830 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4831 case FUTEX_FD:
4832 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4833 case FUTEX_REQUEUE:
4834 case FUTEX_CMP_REQUEUE:
4835 case FUTEX_WAKE_OP:
4836 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4837 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4838 But the prototype takes a `struct timespec *'; insert casts
4839 to satisfy the compiler. We do not need to tswap TIMEOUT
4840 since it's not compared to guest memory. */
4841 pts = (struct timespec *)(uintptr_t) timeout;
4842 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4843 g2h(uaddr2),
4844 (base_op == FUTEX_CMP_REQUEUE
4845 ? tswap32(val3)
4846 : val3)));
4847 default:
4848 return -TARGET_ENOSYS;
4852 /* Map host to target signal numbers for the wait family of syscalls.
4853 Assume all other status bits are the same. */
4854 int host_to_target_waitstatus(int status)
4856 if (WIFSIGNALED(status)) {
4857 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4859 if (WIFSTOPPED(status)) {
4860 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4861 | (status & 0xff);
4863 return status;
4866 int get_osversion(void)
4868 static int osversion;
4869 struct new_utsname buf;
4870 const char *s;
4871 int i, n, tmp;
4872 if (osversion)
4873 return osversion;
4874 if (qemu_uname_release && *qemu_uname_release) {
4875 s = qemu_uname_release;
4876 } else {
4877 if (sys_uname(&buf))
4878 return 0;
4879 s = buf.release;
4881 tmp = 0;
4882 for (i = 0; i < 3; i++) {
4883 n = 0;
4884 while (*s >= '0' && *s <= '9') {
4885 n *= 10;
4886 n += *s - '0';
4887 s++;
4889 tmp = (tmp << 8) + n;
4890 if (*s == '.')
4891 s++;
4893 osversion = tmp;
4894 return osversion;
4898 static int open_self_maps(void *cpu_env, int fd)
4900 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4901 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4902 #endif
4903 FILE *fp;
4904 char *line = NULL;
4905 size_t len = 0;
4906 ssize_t read;
4908 fp = fopen("/proc/self/maps", "r");
4909 if (fp == NULL) {
4910 return -EACCES;
4913 while ((read = getline(&line, &len, fp)) != -1) {
4914 int fields, dev_maj, dev_min, inode;
4915 uint64_t min, max, offset;
4916 char flag_r, flag_w, flag_x, flag_p;
4917 char path[512] = "";
4918 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4919 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4920 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4922 if ((fields < 10) || (fields > 11)) {
4923 continue;
4925 if (!strncmp(path, "[stack]", 7)) {
4926 continue;
4928 if (h2g_valid(min) && h2g_valid(max)) {
4929 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4930 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
4931 h2g(min), h2g(max), flag_r, flag_w,
4932 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4933 path[0] ? " " : "", path);
4937 free(line);
4938 fclose(fp);
4940 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4941 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4942 (unsigned long long)ts->info->stack_limit,
4943 (unsigned long long)(ts->info->start_stack +
4944 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
4945 (unsigned long long)0);
4946 #endif
4948 return 0;
4951 static int open_self_stat(void *cpu_env, int fd)
4953 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4954 abi_ulong start_stack = ts->info->start_stack;
4955 int i;
4957 for (i = 0; i < 44; i++) {
4958 char buf[128];
4959 int len;
4960 uint64_t val = 0;
4962 if (i == 0) {
4963 /* pid */
4964 val = getpid();
4965 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4966 } else if (i == 1) {
4967 /* app name */
4968 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4969 } else if (i == 27) {
4970 /* stack bottom */
4971 val = start_stack;
4972 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4973 } else {
4974 /* for the rest, there is MasterCard */
4975 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
4978 len = strlen(buf);
4979 if (write(fd, buf, len) != len) {
4980 return -1;
4984 return 0;
4987 static int open_self_auxv(void *cpu_env, int fd)
4989 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4990 abi_ulong auxv = ts->info->saved_auxv;
4991 abi_ulong len = ts->info->auxv_len;
4992 char *ptr;
4995 * Auxiliary vector is stored in target process stack.
4996 * read in whole auxv vector and copy it to file
4998 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4999 if (ptr != NULL) {
5000 while (len > 0) {
5001 ssize_t r;
5002 r = write(fd, ptr, len);
5003 if (r <= 0) {
5004 break;
5006 len -= r;
5007 ptr += r;
5009 lseek(fd, 0, SEEK_SET);
5010 unlock_user(ptr, auxv, len);
5013 return 0;
5016 static int is_proc_myself(const char *filename, const char *entry)
5018 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5019 filename += strlen("/proc/");
5020 if (!strncmp(filename, "self/", strlen("self/"))) {
5021 filename += strlen("self/");
5022 } else if (*filename >= '1' && *filename <= '9') {
5023 char myself[80];
5024 snprintf(myself, sizeof(myself), "%d/", getpid());
5025 if (!strncmp(filename, myself, strlen(myself))) {
5026 filename += strlen(myself);
5027 } else {
5028 return 0;
5030 } else {
5031 return 0;
5033 if (!strcmp(filename, entry)) {
5034 return 1;
5037 return 0;
5040 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5042 struct fake_open {
5043 const char *filename;
5044 int (*fill)(void *cpu_env, int fd);
5046 const struct fake_open *fake_open;
5047 static const struct fake_open fakes[] = {
5048 { "maps", open_self_maps },
5049 { "stat", open_self_stat },
5050 { "auxv", open_self_auxv },
5051 { NULL, NULL }
5054 for (fake_open = fakes; fake_open->filename; fake_open++) {
5055 if (is_proc_myself(pathname, fake_open->filename)) {
5056 break;
5060 if (fake_open->filename) {
5061 const char *tmpdir;
5062 char filename[PATH_MAX];
5063 int fd, r;
5065 /* create temporary file to map stat to */
5066 tmpdir = getenv("TMPDIR");
5067 if (!tmpdir)
5068 tmpdir = "/tmp";
5069 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5070 fd = mkstemp(filename);
5071 if (fd < 0) {
5072 return fd;
5074 unlink(filename);
5076 if ((r = fake_open->fill(cpu_env, fd))) {
5077 close(fd);
5078 return r;
5080 lseek(fd, 0, SEEK_SET);
5082 return fd;
5085 return get_errno(open(path(pathname), flags, mode));
5088 /* do_syscall() should always have a single exit point at the end so
5089 that actions, such as logging of syscall results, can be performed.
5090 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5091 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5092 abi_long arg2, abi_long arg3, abi_long arg4,
5093 abi_long arg5, abi_long arg6, abi_long arg7,
5094 abi_long arg8)
5096 CPUState *cpu = ENV_GET_CPU(cpu_env);
5097 abi_long ret;
5098 struct stat st;
5099 struct statfs stfs;
5100 void *p;
5102 #ifdef DEBUG
5103 gemu_log("syscall %d", num);
5104 #endif
5105 if(do_strace)
5106 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5108 switch(num) {
5109 case TARGET_NR_exit:
5110 /* In old applications this may be used to implement _exit(2).
5111 However in threaded applictions it is used for thread termination,
5112 and _exit_group is used for application termination.
5113 Do thread termination if we have more then one thread. */
5114 /* FIXME: This probably breaks if a signal arrives. We should probably
5115 be disabling signals. */
5116 if (CPU_NEXT(first_cpu)) {
5117 TaskState *ts;
5119 cpu_list_lock();
5120 /* Remove the CPU from the list. */
5121 QTAILQ_REMOVE(&cpus, cpu, node);
5122 cpu_list_unlock();
5123 ts = ((CPUArchState *)cpu_env)->opaque;
5124 if (ts->child_tidptr) {
5125 put_user_u32(0, ts->child_tidptr);
5126 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5127 NULL, NULL, 0);
5129 thread_cpu = NULL;
5130 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5131 g_free(ts);
5132 pthread_exit(NULL);
5134 #ifdef TARGET_GPROF
5135 _mcleanup();
5136 #endif
5137 gdb_exit(cpu_env, arg1);
5138 _exit(arg1);
5139 ret = 0; /* avoid warning */
5140 break;
5141 case TARGET_NR_read:
5142 if (arg3 == 0)
5143 ret = 0;
5144 else {
5145 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5146 goto efault;
5147 ret = get_errno(read(arg1, p, arg3));
5148 unlock_user(p, arg2, ret);
5150 break;
5151 case TARGET_NR_write:
5152 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5153 goto efault;
5154 ret = get_errno(write(arg1, p, arg3));
5155 unlock_user(p, arg2, 0);
5156 break;
5157 case TARGET_NR_open:
5158 if (!(p = lock_user_string(arg1)))
5159 goto efault;
5160 ret = get_errno(do_open(cpu_env, p,
5161 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5162 arg3));
5163 unlock_user(p, arg1, 0);
5164 break;
5165 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5166 case TARGET_NR_openat:
5167 if (!(p = lock_user_string(arg2)))
5168 goto efault;
5169 ret = get_errno(sys_openat(arg1,
5170 path(p),
5171 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5172 arg4));
5173 unlock_user(p, arg2, 0);
5174 break;
5175 #endif
5176 case TARGET_NR_close:
5177 ret = get_errno(close(arg1));
5178 break;
5179 case TARGET_NR_brk:
5180 ret = do_brk(arg1);
5181 break;
5182 case TARGET_NR_fork:
5183 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5184 break;
5185 #ifdef TARGET_NR_waitpid
5186 case TARGET_NR_waitpid:
5188 int status;
5189 ret = get_errno(waitpid(arg1, &status, arg3));
5190 if (!is_error(ret) && arg2 && ret
5191 && put_user_s32(host_to_target_waitstatus(status), arg2))
5192 goto efault;
5194 break;
5195 #endif
5196 #ifdef TARGET_NR_waitid
5197 case TARGET_NR_waitid:
5199 siginfo_t info;
5200 info.si_pid = 0;
5201 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5202 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5203 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5204 goto efault;
5205 host_to_target_siginfo(p, &info);
5206 unlock_user(p, arg3, sizeof(target_siginfo_t));
5209 break;
5210 #endif
5211 #ifdef TARGET_NR_creat /* not on alpha */
5212 case TARGET_NR_creat:
5213 if (!(p = lock_user_string(arg1)))
5214 goto efault;
5215 ret = get_errno(creat(p, arg2));
5216 unlock_user(p, arg1, 0);
5217 break;
5218 #endif
5219 case TARGET_NR_link:
5221 void * p2;
5222 p = lock_user_string(arg1);
5223 p2 = lock_user_string(arg2);
5224 if (!p || !p2)
5225 ret = -TARGET_EFAULT;
5226 else
5227 ret = get_errno(link(p, p2));
5228 unlock_user(p2, arg2, 0);
5229 unlock_user(p, arg1, 0);
5231 break;
5232 #if defined(TARGET_NR_linkat)
5233 case TARGET_NR_linkat:
5235 void * p2 = NULL;
5236 if (!arg2 || !arg4)
5237 goto efault;
5238 p = lock_user_string(arg2);
5239 p2 = lock_user_string(arg4);
5240 if (!p || !p2)
5241 ret = -TARGET_EFAULT;
5242 else
5243 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5244 unlock_user(p, arg2, 0);
5245 unlock_user(p2, arg4, 0);
5247 break;
5248 #endif
5249 case TARGET_NR_unlink:
5250 if (!(p = lock_user_string(arg1)))
5251 goto efault;
5252 ret = get_errno(unlink(p));
5253 unlock_user(p, arg1, 0);
5254 break;
5255 #if defined(TARGET_NR_unlinkat)
5256 case TARGET_NR_unlinkat:
5257 if (!(p = lock_user_string(arg2)))
5258 goto efault;
5259 ret = get_errno(unlinkat(arg1, p, arg3));
5260 unlock_user(p, arg2, 0);
5261 break;
5262 #endif
5263 case TARGET_NR_execve:
5265 char **argp, **envp;
5266 int argc, envc;
5267 abi_ulong gp;
5268 abi_ulong guest_argp;
5269 abi_ulong guest_envp;
5270 abi_ulong addr;
5271 char **q;
5272 int total_size = 0;
5274 argc = 0;
5275 guest_argp = arg2;
5276 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5277 if (get_user_ual(addr, gp))
5278 goto efault;
5279 if (!addr)
5280 break;
5281 argc++;
5283 envc = 0;
5284 guest_envp = arg3;
5285 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5286 if (get_user_ual(addr, gp))
5287 goto efault;
5288 if (!addr)
5289 break;
5290 envc++;
5293 argp = alloca((argc + 1) * sizeof(void *));
5294 envp = alloca((envc + 1) * sizeof(void *));
5296 for (gp = guest_argp, q = argp; gp;
5297 gp += sizeof(abi_ulong), q++) {
5298 if (get_user_ual(addr, gp))
5299 goto execve_efault;
5300 if (!addr)
5301 break;
5302 if (!(*q = lock_user_string(addr)))
5303 goto execve_efault;
5304 total_size += strlen(*q) + 1;
5306 *q = NULL;
5308 for (gp = guest_envp, q = envp; gp;
5309 gp += sizeof(abi_ulong), q++) {
5310 if (get_user_ual(addr, gp))
5311 goto execve_efault;
5312 if (!addr)
5313 break;
5314 if (!(*q = lock_user_string(addr)))
5315 goto execve_efault;
5316 total_size += strlen(*q) + 1;
5318 *q = NULL;
5320 /* This case will not be caught by the host's execve() if its
5321 page size is bigger than the target's. */
5322 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5323 ret = -TARGET_E2BIG;
5324 goto execve_end;
5326 if (!(p = lock_user_string(arg1)))
5327 goto execve_efault;
5328 ret = get_errno(execve(p, argp, envp));
5329 unlock_user(p, arg1, 0);
5331 goto execve_end;
5333 execve_efault:
5334 ret = -TARGET_EFAULT;
5336 execve_end:
5337 for (gp = guest_argp, q = argp; *q;
5338 gp += sizeof(abi_ulong), q++) {
5339 if (get_user_ual(addr, gp)
5340 || !addr)
5341 break;
5342 unlock_user(*q, addr, 0);
5344 for (gp = guest_envp, q = envp; *q;
5345 gp += sizeof(abi_ulong), q++) {
5346 if (get_user_ual(addr, gp)
5347 || !addr)
5348 break;
5349 unlock_user(*q, addr, 0);
5352 break;
5353 case TARGET_NR_chdir:
5354 if (!(p = lock_user_string(arg1)))
5355 goto efault;
5356 ret = get_errno(chdir(p));
5357 unlock_user(p, arg1, 0);
5358 break;
5359 #ifdef TARGET_NR_time
5360 case TARGET_NR_time:
5362 time_t host_time;
5363 ret = get_errno(time(&host_time));
5364 if (!is_error(ret)
5365 && arg1
5366 && put_user_sal(host_time, arg1))
5367 goto efault;
5369 break;
5370 #endif
5371 case TARGET_NR_mknod:
5372 if (!(p = lock_user_string(arg1)))
5373 goto efault;
5374 ret = get_errno(mknod(p, arg2, arg3));
5375 unlock_user(p, arg1, 0);
5376 break;
5377 #if defined(TARGET_NR_mknodat)
5378 case TARGET_NR_mknodat:
5379 if (!(p = lock_user_string(arg2)))
5380 goto efault;
5381 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5382 unlock_user(p, arg2, 0);
5383 break;
5384 #endif
5385 case TARGET_NR_chmod:
5386 if (!(p = lock_user_string(arg1)))
5387 goto efault;
5388 ret = get_errno(chmod(p, arg2));
5389 unlock_user(p, arg1, 0);
5390 break;
5391 #ifdef TARGET_NR_break
5392 case TARGET_NR_break:
5393 goto unimplemented;
5394 #endif
5395 #ifdef TARGET_NR_oldstat
5396 case TARGET_NR_oldstat:
5397 goto unimplemented;
5398 #endif
5399 case TARGET_NR_lseek:
5400 ret = get_errno(lseek(arg1, arg2, arg3));
5401 break;
5402 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5403 /* Alpha specific */
5404 case TARGET_NR_getxpid:
5405 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5406 ret = get_errno(getpid());
5407 break;
5408 #endif
5409 #ifdef TARGET_NR_getpid
5410 case TARGET_NR_getpid:
5411 ret = get_errno(getpid());
5412 break;
5413 #endif
5414 case TARGET_NR_mount:
5416 /* need to look at the data field */
5417 void *p2, *p3;
5418 p = lock_user_string(arg1);
5419 p2 = lock_user_string(arg2);
5420 p3 = lock_user_string(arg3);
5421 if (!p || !p2 || !p3)
5422 ret = -TARGET_EFAULT;
5423 else {
5424 /* FIXME - arg5 should be locked, but it isn't clear how to
5425 * do that since it's not guaranteed to be a NULL-terminated
5426 * string.
5428 if ( ! arg5 )
5429 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5430 else
5431 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5433 unlock_user(p, arg1, 0);
5434 unlock_user(p2, arg2, 0);
5435 unlock_user(p3, arg3, 0);
5436 break;
5438 #ifdef TARGET_NR_umount
5439 case TARGET_NR_umount:
5440 if (!(p = lock_user_string(arg1)))
5441 goto efault;
5442 ret = get_errno(umount(p));
5443 unlock_user(p, arg1, 0);
5444 break;
5445 #endif
5446 #ifdef TARGET_NR_stime /* not on alpha */
5447 case TARGET_NR_stime:
5449 time_t host_time;
5450 if (get_user_sal(host_time, arg1))
5451 goto efault;
5452 ret = get_errno(stime(&host_time));
5454 break;
5455 #endif
5456 case TARGET_NR_ptrace:
5457 goto unimplemented;
5458 #ifdef TARGET_NR_alarm /* not on alpha */
5459 case TARGET_NR_alarm:
5460 ret = alarm(arg1);
5461 break;
5462 #endif
5463 #ifdef TARGET_NR_oldfstat
5464 case TARGET_NR_oldfstat:
5465 goto unimplemented;
5466 #endif
5467 #ifdef TARGET_NR_pause /* not on alpha */
5468 case TARGET_NR_pause:
5469 ret = get_errno(pause());
5470 break;
5471 #endif
5472 #ifdef TARGET_NR_utime
5473 case TARGET_NR_utime:
5475 struct utimbuf tbuf, *host_tbuf;
5476 struct target_utimbuf *target_tbuf;
5477 if (arg2) {
5478 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5479 goto efault;
5480 tbuf.actime = tswapal(target_tbuf->actime);
5481 tbuf.modtime = tswapal(target_tbuf->modtime);
5482 unlock_user_struct(target_tbuf, arg2, 0);
5483 host_tbuf = &tbuf;
5484 } else {
5485 host_tbuf = NULL;
5487 if (!(p = lock_user_string(arg1)))
5488 goto efault;
5489 ret = get_errno(utime(p, host_tbuf));
5490 unlock_user(p, arg1, 0);
5492 break;
5493 #endif
5494 case TARGET_NR_utimes:
5496 struct timeval *tvp, tv[2];
5497 if (arg2) {
5498 if (copy_from_user_timeval(&tv[0], arg2)
5499 || copy_from_user_timeval(&tv[1],
5500 arg2 + sizeof(struct target_timeval)))
5501 goto efault;
5502 tvp = tv;
5503 } else {
5504 tvp = NULL;
5506 if (!(p = lock_user_string(arg1)))
5507 goto efault;
5508 ret = get_errno(utimes(p, tvp));
5509 unlock_user(p, arg1, 0);
5511 break;
5512 #if defined(TARGET_NR_futimesat)
5513 case TARGET_NR_futimesat:
5515 struct timeval *tvp, tv[2];
5516 if (arg3) {
5517 if (copy_from_user_timeval(&tv[0], arg3)
5518 || copy_from_user_timeval(&tv[1],
5519 arg3 + sizeof(struct target_timeval)))
5520 goto efault;
5521 tvp = tv;
5522 } else {
5523 tvp = NULL;
5525 if (!(p = lock_user_string(arg2)))
5526 goto efault;
5527 ret = get_errno(futimesat(arg1, path(p), tvp));
5528 unlock_user(p, arg2, 0);
5530 break;
5531 #endif
5532 #ifdef TARGET_NR_stty
5533 case TARGET_NR_stty:
5534 goto unimplemented;
5535 #endif
5536 #ifdef TARGET_NR_gtty
5537 case TARGET_NR_gtty:
5538 goto unimplemented;
5539 #endif
5540 case TARGET_NR_access:
5541 if (!(p = lock_user_string(arg1)))
5542 goto efault;
5543 ret = get_errno(access(path(p), arg2));
5544 unlock_user(p, arg1, 0);
5545 break;
5546 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5547 case TARGET_NR_faccessat:
5548 if (!(p = lock_user_string(arg2)))
5549 goto efault;
5550 ret = get_errno(faccessat(arg1, p, arg3, 0));
5551 unlock_user(p, arg2, 0);
5552 break;
5553 #endif
5554 #ifdef TARGET_NR_nice /* not on alpha */
5555 case TARGET_NR_nice:
5556 ret = get_errno(nice(arg1));
5557 break;
5558 #endif
5559 #ifdef TARGET_NR_ftime
5560 case TARGET_NR_ftime:
5561 goto unimplemented;
5562 #endif
5563 case TARGET_NR_sync:
5564 sync();
5565 ret = 0;
5566 break;
5567 case TARGET_NR_kill:
5568 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5569 break;
5570 case TARGET_NR_rename:
5572 void *p2;
5573 p = lock_user_string(arg1);
5574 p2 = lock_user_string(arg2);
5575 if (!p || !p2)
5576 ret = -TARGET_EFAULT;
5577 else
5578 ret = get_errno(rename(p, p2));
5579 unlock_user(p2, arg2, 0);
5580 unlock_user(p, arg1, 0);
5582 break;
5583 #if defined(TARGET_NR_renameat)
5584 case TARGET_NR_renameat:
5586 void *p2;
5587 p = lock_user_string(arg2);
5588 p2 = lock_user_string(arg4);
5589 if (!p || !p2)
5590 ret = -TARGET_EFAULT;
5591 else
5592 ret = get_errno(renameat(arg1, p, arg3, p2));
5593 unlock_user(p2, arg4, 0);
5594 unlock_user(p, arg2, 0);
5596 break;
5597 #endif
5598 case TARGET_NR_mkdir:
5599 if (!(p = lock_user_string(arg1)))
5600 goto efault;
5601 ret = get_errno(mkdir(p, arg2));
5602 unlock_user(p, arg1, 0);
5603 break;
5604 #if defined(TARGET_NR_mkdirat)
5605 case TARGET_NR_mkdirat:
5606 if (!(p = lock_user_string(arg2)))
5607 goto efault;
5608 ret = get_errno(mkdirat(arg1, p, arg3));
5609 unlock_user(p, arg2, 0);
5610 break;
5611 #endif
5612 case TARGET_NR_rmdir:
5613 if (!(p = lock_user_string(arg1)))
5614 goto efault;
5615 ret = get_errno(rmdir(p));
5616 unlock_user(p, arg1, 0);
5617 break;
5618 case TARGET_NR_dup:
5619 ret = get_errno(dup(arg1));
5620 break;
5621 case TARGET_NR_pipe:
5622 ret = do_pipe(cpu_env, arg1, 0, 0);
5623 break;
5624 #ifdef TARGET_NR_pipe2
5625 case TARGET_NR_pipe2:
5626 ret = do_pipe(cpu_env, arg1,
5627 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5628 break;
5629 #endif
5630 case TARGET_NR_times:
5632 struct target_tms *tmsp;
5633 struct tms tms;
5634 ret = get_errno(times(&tms));
5635 if (arg1) {
5636 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5637 if (!tmsp)
5638 goto efault;
5639 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5640 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5641 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5642 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5644 if (!is_error(ret))
5645 ret = host_to_target_clock_t(ret);
5647 break;
5648 #ifdef TARGET_NR_prof
5649 case TARGET_NR_prof:
5650 goto unimplemented;
5651 #endif
5652 #ifdef TARGET_NR_signal
5653 case TARGET_NR_signal:
5654 goto unimplemented;
5655 #endif
5656 case TARGET_NR_acct:
5657 if (arg1 == 0) {
5658 ret = get_errno(acct(NULL));
5659 } else {
5660 if (!(p = lock_user_string(arg1)))
5661 goto efault;
5662 ret = get_errno(acct(path(p)));
5663 unlock_user(p, arg1, 0);
5665 break;
5666 #ifdef TARGET_NR_umount2 /* not on alpha */
5667 case TARGET_NR_umount2:
5668 if (!(p = lock_user_string(arg1)))
5669 goto efault;
5670 ret = get_errno(umount2(p, arg2));
5671 unlock_user(p, arg1, 0);
5672 break;
5673 #endif
5674 #ifdef TARGET_NR_lock
5675 case TARGET_NR_lock:
5676 goto unimplemented;
5677 #endif
5678 case TARGET_NR_ioctl:
5679 ret = do_ioctl(arg1, arg2, arg3);
5680 break;
5681 case TARGET_NR_fcntl:
5682 ret = do_fcntl(arg1, arg2, arg3);
5683 break;
5684 #ifdef TARGET_NR_mpx
5685 case TARGET_NR_mpx:
5686 goto unimplemented;
5687 #endif
5688 case TARGET_NR_setpgid:
5689 ret = get_errno(setpgid(arg1, arg2));
5690 break;
5691 #ifdef TARGET_NR_ulimit
5692 case TARGET_NR_ulimit:
5693 goto unimplemented;
5694 #endif
5695 #ifdef TARGET_NR_oldolduname
5696 case TARGET_NR_oldolduname:
5697 goto unimplemented;
5698 #endif
5699 case TARGET_NR_umask:
5700 ret = get_errno(umask(arg1));
5701 break;
5702 case TARGET_NR_chroot:
5703 if (!(p = lock_user_string(arg1)))
5704 goto efault;
5705 ret = get_errno(chroot(p));
5706 unlock_user(p, arg1, 0);
5707 break;
5708 case TARGET_NR_ustat:
5709 goto unimplemented;
5710 case TARGET_NR_dup2:
5711 ret = get_errno(dup2(arg1, arg2));
5712 break;
5713 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5714 case TARGET_NR_dup3:
5715 ret = get_errno(dup3(arg1, arg2, arg3));
5716 break;
5717 #endif
5718 #ifdef TARGET_NR_getppid /* not on alpha */
5719 case TARGET_NR_getppid:
5720 ret = get_errno(getppid());
5721 break;
5722 #endif
5723 case TARGET_NR_getpgrp:
5724 ret = get_errno(getpgrp());
5725 break;
5726 case TARGET_NR_setsid:
5727 ret = get_errno(setsid());
5728 break;
5729 #ifdef TARGET_NR_sigaction
5730 case TARGET_NR_sigaction:
5732 #if defined(TARGET_ALPHA)
5733 struct target_sigaction act, oact, *pact = 0;
5734 struct target_old_sigaction *old_act;
5735 if (arg2) {
5736 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5737 goto efault;
5738 act._sa_handler = old_act->_sa_handler;
5739 target_siginitset(&act.sa_mask, old_act->sa_mask);
5740 act.sa_flags = old_act->sa_flags;
5741 act.sa_restorer = 0;
5742 unlock_user_struct(old_act, arg2, 0);
5743 pact = &act;
5745 ret = get_errno(do_sigaction(arg1, pact, &oact));
5746 if (!is_error(ret) && arg3) {
5747 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5748 goto efault;
5749 old_act->_sa_handler = oact._sa_handler;
5750 old_act->sa_mask = oact.sa_mask.sig[0];
5751 old_act->sa_flags = oact.sa_flags;
5752 unlock_user_struct(old_act, arg3, 1);
5754 #elif defined(TARGET_MIPS)
5755 struct target_sigaction act, oact, *pact, *old_act;
5757 if (arg2) {
5758 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5759 goto efault;
5760 act._sa_handler = old_act->_sa_handler;
5761 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5762 act.sa_flags = old_act->sa_flags;
5763 unlock_user_struct(old_act, arg2, 0);
5764 pact = &act;
5765 } else {
5766 pact = NULL;
5769 ret = get_errno(do_sigaction(arg1, pact, &oact));
5771 if (!is_error(ret) && arg3) {
5772 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5773 goto efault;
5774 old_act->_sa_handler = oact._sa_handler;
5775 old_act->sa_flags = oact.sa_flags;
5776 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5777 old_act->sa_mask.sig[1] = 0;
5778 old_act->sa_mask.sig[2] = 0;
5779 old_act->sa_mask.sig[3] = 0;
5780 unlock_user_struct(old_act, arg3, 1);
5782 #else
5783 struct target_old_sigaction *old_act;
5784 struct target_sigaction act, oact, *pact;
5785 if (arg2) {
5786 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5787 goto efault;
5788 act._sa_handler = old_act->_sa_handler;
5789 target_siginitset(&act.sa_mask, old_act->sa_mask);
5790 act.sa_flags = old_act->sa_flags;
5791 act.sa_restorer = old_act->sa_restorer;
5792 unlock_user_struct(old_act, arg2, 0);
5793 pact = &act;
5794 } else {
5795 pact = NULL;
5797 ret = get_errno(do_sigaction(arg1, pact, &oact));
5798 if (!is_error(ret) && arg3) {
5799 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5800 goto efault;
5801 old_act->_sa_handler = oact._sa_handler;
5802 old_act->sa_mask = oact.sa_mask.sig[0];
5803 old_act->sa_flags = oact.sa_flags;
5804 old_act->sa_restorer = oact.sa_restorer;
5805 unlock_user_struct(old_act, arg3, 1);
5807 #endif
5809 break;
5810 #endif
5811 case TARGET_NR_rt_sigaction:
5813 #if defined(TARGET_ALPHA)
5814 struct target_sigaction act, oact, *pact = 0;
5815 struct target_rt_sigaction *rt_act;
5816 /* ??? arg4 == sizeof(sigset_t). */
5817 if (arg2) {
5818 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5819 goto efault;
5820 act._sa_handler = rt_act->_sa_handler;
5821 act.sa_mask = rt_act->sa_mask;
5822 act.sa_flags = rt_act->sa_flags;
5823 act.sa_restorer = arg5;
5824 unlock_user_struct(rt_act, arg2, 0);
5825 pact = &act;
5827 ret = get_errno(do_sigaction(arg1, pact, &oact));
5828 if (!is_error(ret) && arg3) {
5829 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5830 goto efault;
5831 rt_act->_sa_handler = oact._sa_handler;
5832 rt_act->sa_mask = oact.sa_mask;
5833 rt_act->sa_flags = oact.sa_flags;
5834 unlock_user_struct(rt_act, arg3, 1);
5836 #else
5837 struct target_sigaction *act;
5838 struct target_sigaction *oact;
5840 if (arg2) {
5841 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5842 goto efault;
5843 } else
5844 act = NULL;
5845 if (arg3) {
5846 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5847 ret = -TARGET_EFAULT;
5848 goto rt_sigaction_fail;
5850 } else
5851 oact = NULL;
5852 ret = get_errno(do_sigaction(arg1, act, oact));
5853 rt_sigaction_fail:
5854 if (act)
5855 unlock_user_struct(act, arg2, 0);
5856 if (oact)
5857 unlock_user_struct(oact, arg3, 1);
5858 #endif
5860 break;
5861 #ifdef TARGET_NR_sgetmask /* not on alpha */
5862 case TARGET_NR_sgetmask:
5864 sigset_t cur_set;
5865 abi_ulong target_set;
5866 sigprocmask(0, NULL, &cur_set);
5867 host_to_target_old_sigset(&target_set, &cur_set);
5868 ret = target_set;
5870 break;
5871 #endif
5872 #ifdef TARGET_NR_ssetmask /* not on alpha */
5873 case TARGET_NR_ssetmask:
5875 sigset_t set, oset, cur_set;
5876 abi_ulong target_set = arg1;
5877 sigprocmask(0, NULL, &cur_set);
5878 target_to_host_old_sigset(&set, &target_set);
5879 sigorset(&set, &set, &cur_set);
5880 sigprocmask(SIG_SETMASK, &set, &oset);
5881 host_to_target_old_sigset(&target_set, &oset);
5882 ret = target_set;
5884 break;
5885 #endif
5886 #ifdef TARGET_NR_sigprocmask
5887 case TARGET_NR_sigprocmask:
5889 #if defined(TARGET_ALPHA)
5890 sigset_t set, oldset;
5891 abi_ulong mask;
5892 int how;
5894 switch (arg1) {
5895 case TARGET_SIG_BLOCK:
5896 how = SIG_BLOCK;
5897 break;
5898 case TARGET_SIG_UNBLOCK:
5899 how = SIG_UNBLOCK;
5900 break;
5901 case TARGET_SIG_SETMASK:
5902 how = SIG_SETMASK;
5903 break;
5904 default:
5905 ret = -TARGET_EINVAL;
5906 goto fail;
5908 mask = arg2;
5909 target_to_host_old_sigset(&set, &mask);
5911 ret = get_errno(sigprocmask(how, &set, &oldset));
5912 if (!is_error(ret)) {
5913 host_to_target_old_sigset(&mask, &oldset);
5914 ret = mask;
5915 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5917 #else
5918 sigset_t set, oldset, *set_ptr;
5919 int how;
5921 if (arg2) {
5922 switch (arg1) {
5923 case TARGET_SIG_BLOCK:
5924 how = SIG_BLOCK;
5925 break;
5926 case TARGET_SIG_UNBLOCK:
5927 how = SIG_UNBLOCK;
5928 break;
5929 case TARGET_SIG_SETMASK:
5930 how = SIG_SETMASK;
5931 break;
5932 default:
5933 ret = -TARGET_EINVAL;
5934 goto fail;
5936 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5937 goto efault;
5938 target_to_host_old_sigset(&set, p);
5939 unlock_user(p, arg2, 0);
5940 set_ptr = &set;
5941 } else {
5942 how = 0;
5943 set_ptr = NULL;
5945 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5946 if (!is_error(ret) && arg3) {
5947 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5948 goto efault;
5949 host_to_target_old_sigset(p, &oldset);
5950 unlock_user(p, arg3, sizeof(target_sigset_t));
5952 #endif
5954 break;
5955 #endif
5956 case TARGET_NR_rt_sigprocmask:
5958 int how = arg1;
5959 sigset_t set, oldset, *set_ptr;
5961 if (arg2) {
5962 switch(how) {
5963 case TARGET_SIG_BLOCK:
5964 how = SIG_BLOCK;
5965 break;
5966 case TARGET_SIG_UNBLOCK:
5967 how = SIG_UNBLOCK;
5968 break;
5969 case TARGET_SIG_SETMASK:
5970 how = SIG_SETMASK;
5971 break;
5972 default:
5973 ret = -TARGET_EINVAL;
5974 goto fail;
5976 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5977 goto efault;
5978 target_to_host_sigset(&set, p);
5979 unlock_user(p, arg2, 0);
5980 set_ptr = &set;
5981 } else {
5982 how = 0;
5983 set_ptr = NULL;
5985 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5986 if (!is_error(ret) && arg3) {
5987 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5988 goto efault;
5989 host_to_target_sigset(p, &oldset);
5990 unlock_user(p, arg3, sizeof(target_sigset_t));
5993 break;
5994 #ifdef TARGET_NR_sigpending
5995 case TARGET_NR_sigpending:
5997 sigset_t set;
5998 ret = get_errno(sigpending(&set));
5999 if (!is_error(ret)) {
6000 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6001 goto efault;
6002 host_to_target_old_sigset(p, &set);
6003 unlock_user(p, arg1, sizeof(target_sigset_t));
6006 break;
6007 #endif
6008 case TARGET_NR_rt_sigpending:
6010 sigset_t set;
6011 ret = get_errno(sigpending(&set));
6012 if (!is_error(ret)) {
6013 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6014 goto efault;
6015 host_to_target_sigset(p, &set);
6016 unlock_user(p, arg1, sizeof(target_sigset_t));
6019 break;
6020 #ifdef TARGET_NR_sigsuspend
6021 case TARGET_NR_sigsuspend:
6023 sigset_t set;
6024 #if defined(TARGET_ALPHA)
6025 abi_ulong mask = arg1;
6026 target_to_host_old_sigset(&set, &mask);
6027 #else
6028 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6029 goto efault;
6030 target_to_host_old_sigset(&set, p);
6031 unlock_user(p, arg1, 0);
6032 #endif
6033 ret = get_errno(sigsuspend(&set));
6035 break;
6036 #endif
6037 case TARGET_NR_rt_sigsuspend:
6039 sigset_t set;
6040 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6041 goto efault;
6042 target_to_host_sigset(&set, p);
6043 unlock_user(p, arg1, 0);
6044 ret = get_errno(sigsuspend(&set));
6046 break;
6047 case TARGET_NR_rt_sigtimedwait:
6049 sigset_t set;
6050 struct timespec uts, *puts;
6051 siginfo_t uinfo;
6053 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6054 goto efault;
6055 target_to_host_sigset(&set, p);
6056 unlock_user(p, arg1, 0);
6057 if (arg3) {
6058 puts = &uts;
6059 target_to_host_timespec(puts, arg3);
6060 } else {
6061 puts = NULL;
6063 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6064 if (!is_error(ret) && arg2) {
6065 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6066 goto efault;
6067 host_to_target_siginfo(p, &uinfo);
6068 unlock_user(p, arg2, sizeof(target_siginfo_t));
6071 break;
6072 case TARGET_NR_rt_sigqueueinfo:
6074 siginfo_t uinfo;
6075 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6076 goto efault;
6077 target_to_host_siginfo(&uinfo, p);
6078 unlock_user(p, arg1, 0);
6079 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6081 break;
6082 #ifdef TARGET_NR_sigreturn
6083 case TARGET_NR_sigreturn:
6084 /* NOTE: ret is eax, so not transcoding must be done */
6085 ret = do_sigreturn(cpu_env);
6086 break;
6087 #endif
6088 case TARGET_NR_rt_sigreturn:
6089 /* NOTE: ret is eax, so not transcoding must be done */
6090 ret = do_rt_sigreturn(cpu_env);
6091 break;
6092 case TARGET_NR_sethostname:
6093 if (!(p = lock_user_string(arg1)))
6094 goto efault;
6095 ret = get_errno(sethostname(p, arg2));
6096 unlock_user(p, arg1, 0);
6097 break;
6098 case TARGET_NR_setrlimit:
6100 int resource = target_to_host_resource(arg1);
6101 struct target_rlimit *target_rlim;
6102 struct rlimit rlim;
6103 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6104 goto efault;
6105 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6106 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6107 unlock_user_struct(target_rlim, arg2, 0);
6108 ret = get_errno(setrlimit(resource, &rlim));
6110 break;
6111 case TARGET_NR_getrlimit:
6113 int resource = target_to_host_resource(arg1);
6114 struct target_rlimit *target_rlim;
6115 struct rlimit rlim;
6117 ret = get_errno(getrlimit(resource, &rlim));
6118 if (!is_error(ret)) {
6119 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6120 goto efault;
6121 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6122 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6123 unlock_user_struct(target_rlim, arg2, 1);
6126 break;
6127 case TARGET_NR_getrusage:
6129 struct rusage rusage;
6130 ret = get_errno(getrusage(arg1, &rusage));
6131 if (!is_error(ret)) {
6132 host_to_target_rusage(arg2, &rusage);
6135 break;
6136 case TARGET_NR_gettimeofday:
6138 struct timeval tv;
6139 ret = get_errno(gettimeofday(&tv, NULL));
6140 if (!is_error(ret)) {
6141 if (copy_to_user_timeval(arg1, &tv))
6142 goto efault;
6145 break;
6146 case TARGET_NR_settimeofday:
6148 struct timeval tv;
6149 if (copy_from_user_timeval(&tv, arg1))
6150 goto efault;
6151 ret = get_errno(settimeofday(&tv, NULL));
6153 break;
6154 #if defined(TARGET_NR_select)
6155 case TARGET_NR_select:
6156 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6157 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6158 #else
6160 struct target_sel_arg_struct *sel;
6161 abi_ulong inp, outp, exp, tvp;
6162 long nsel;
6164 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6165 goto efault;
6166 nsel = tswapal(sel->n);
6167 inp = tswapal(sel->inp);
6168 outp = tswapal(sel->outp);
6169 exp = tswapal(sel->exp);
6170 tvp = tswapal(sel->tvp);
6171 unlock_user_struct(sel, arg1, 0);
6172 ret = do_select(nsel, inp, outp, exp, tvp);
6174 #endif
6175 break;
6176 #endif
6177 #ifdef TARGET_NR_pselect6
6178 case TARGET_NR_pselect6:
6180 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6181 fd_set rfds, wfds, efds;
6182 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6183 struct timespec ts, *ts_ptr;
6186 * The 6th arg is actually two args smashed together,
6187 * so we cannot use the C library.
6189 sigset_t set;
6190 struct {
6191 sigset_t *set;
6192 size_t size;
6193 } sig, *sig_ptr;
6195 abi_ulong arg_sigset, arg_sigsize, *arg7;
6196 target_sigset_t *target_sigset;
6198 n = arg1;
6199 rfd_addr = arg2;
6200 wfd_addr = arg3;
6201 efd_addr = arg4;
6202 ts_addr = arg5;
6204 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6205 if (ret) {
6206 goto fail;
6208 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6209 if (ret) {
6210 goto fail;
6212 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6213 if (ret) {
6214 goto fail;
6218 * This takes a timespec, and not a timeval, so we cannot
6219 * use the do_select() helper ...
6221 if (ts_addr) {
6222 if (target_to_host_timespec(&ts, ts_addr)) {
6223 goto efault;
6225 ts_ptr = &ts;
6226 } else {
6227 ts_ptr = NULL;
6230 /* Extract the two packed args for the sigset */
6231 if (arg6) {
6232 sig_ptr = &sig;
6233 sig.size = _NSIG / 8;
6235 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6236 if (!arg7) {
6237 goto efault;
6239 arg_sigset = tswapal(arg7[0]);
6240 arg_sigsize = tswapal(arg7[1]);
6241 unlock_user(arg7, arg6, 0);
6243 if (arg_sigset) {
6244 sig.set = &set;
6245 if (arg_sigsize != sizeof(*target_sigset)) {
6246 /* Like the kernel, we enforce correct size sigsets */
6247 ret = -TARGET_EINVAL;
6248 goto fail;
6250 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6251 sizeof(*target_sigset), 1);
6252 if (!target_sigset) {
6253 goto efault;
6255 target_to_host_sigset(&set, target_sigset);
6256 unlock_user(target_sigset, arg_sigset, 0);
6257 } else {
6258 sig.set = NULL;
6260 } else {
6261 sig_ptr = NULL;
6264 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6265 ts_ptr, sig_ptr));
6267 if (!is_error(ret)) {
6268 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6269 goto efault;
6270 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6271 goto efault;
6272 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6273 goto efault;
6275 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6276 goto efault;
6279 break;
6280 #endif
6281 case TARGET_NR_symlink:
6283 void *p2;
6284 p = lock_user_string(arg1);
6285 p2 = lock_user_string(arg2);
6286 if (!p || !p2)
6287 ret = -TARGET_EFAULT;
6288 else
6289 ret = get_errno(symlink(p, p2));
6290 unlock_user(p2, arg2, 0);
6291 unlock_user(p, arg1, 0);
6293 break;
6294 #if defined(TARGET_NR_symlinkat)
6295 case TARGET_NR_symlinkat:
6297 void *p2;
6298 p = lock_user_string(arg1);
6299 p2 = lock_user_string(arg3);
6300 if (!p || !p2)
6301 ret = -TARGET_EFAULT;
6302 else
6303 ret = get_errno(symlinkat(p, arg2, p2));
6304 unlock_user(p2, arg3, 0);
6305 unlock_user(p, arg1, 0);
6307 break;
6308 #endif
6309 #ifdef TARGET_NR_oldlstat
6310 case TARGET_NR_oldlstat:
6311 goto unimplemented;
6312 #endif
6313 case TARGET_NR_readlink:
6315 void *p2;
6316 p = lock_user_string(arg1);
6317 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6318 if (!p || !p2) {
6319 ret = -TARGET_EFAULT;
6320 } else if (is_proc_myself((const char *)p, "exe")) {
6321 char real[PATH_MAX], *temp;
6322 temp = realpath(exec_path, real);
6323 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6324 snprintf((char *)p2, arg3, "%s", real);
6325 } else {
6326 ret = get_errno(readlink(path(p), p2, arg3));
6328 unlock_user(p2, arg2, ret);
6329 unlock_user(p, arg1, 0);
6331 break;
6332 #if defined(TARGET_NR_readlinkat)
6333 case TARGET_NR_readlinkat:
6335 void *p2;
6336 p = lock_user_string(arg2);
6337 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6338 if (!p || !p2) {
6339 ret = -TARGET_EFAULT;
6340 } else if (is_proc_myself((const char *)p, "exe")) {
6341 char real[PATH_MAX], *temp;
6342 temp = realpath(exec_path, real);
6343 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6344 snprintf((char *)p2, arg4, "%s", real);
6345 } else {
6346 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6348 unlock_user(p2, arg3, ret);
6349 unlock_user(p, arg2, 0);
6351 break;
6352 #endif
6353 #ifdef TARGET_NR_uselib
6354 case TARGET_NR_uselib:
6355 goto unimplemented;
6356 #endif
6357 #ifdef TARGET_NR_swapon
6358 case TARGET_NR_swapon:
6359 if (!(p = lock_user_string(arg1)))
6360 goto efault;
6361 ret = get_errno(swapon(p, arg2));
6362 unlock_user(p, arg1, 0);
6363 break;
6364 #endif
6365 case TARGET_NR_reboot:
6366 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6367 /* arg4 must be ignored in all other cases */
6368 p = lock_user_string(arg4);
6369 if (!p) {
6370 goto efault;
6372 ret = get_errno(reboot(arg1, arg2, arg3, p));
6373 unlock_user(p, arg4, 0);
6374 } else {
6375 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6377 break;
6378 #ifdef TARGET_NR_readdir
6379 case TARGET_NR_readdir:
6380 goto unimplemented;
6381 #endif
6382 #ifdef TARGET_NR_mmap
6383 case TARGET_NR_mmap:
6384 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6385 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6386 || defined(TARGET_S390X)
6388 abi_ulong *v;
6389 abi_ulong v1, v2, v3, v4, v5, v6;
6390 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6391 goto efault;
6392 v1 = tswapal(v[0]);
6393 v2 = tswapal(v[1]);
6394 v3 = tswapal(v[2]);
6395 v4 = tswapal(v[3]);
6396 v5 = tswapal(v[4]);
6397 v6 = tswapal(v[5]);
6398 unlock_user(v, arg1, 0);
6399 ret = get_errno(target_mmap(v1, v2, v3,
6400 target_to_host_bitmask(v4, mmap_flags_tbl),
6401 v5, v6));
6403 #else
6404 ret = get_errno(target_mmap(arg1, arg2, arg3,
6405 target_to_host_bitmask(arg4, mmap_flags_tbl),
6406 arg5,
6407 arg6));
6408 #endif
6409 break;
6410 #endif
6411 #ifdef TARGET_NR_mmap2
6412 case TARGET_NR_mmap2:
6413 #ifndef MMAP_SHIFT
6414 #define MMAP_SHIFT 12
6415 #endif
6416 ret = get_errno(target_mmap(arg1, arg2, arg3,
6417 target_to_host_bitmask(arg4, mmap_flags_tbl),
6418 arg5,
6419 arg6 << MMAP_SHIFT));
6420 break;
6421 #endif
6422 case TARGET_NR_munmap:
6423 ret = get_errno(target_munmap(arg1, arg2));
6424 break;
6425 case TARGET_NR_mprotect:
6427 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6428 /* Special hack to detect libc making the stack executable. */
6429 if ((arg3 & PROT_GROWSDOWN)
6430 && arg1 >= ts->info->stack_limit
6431 && arg1 <= ts->info->start_stack) {
6432 arg3 &= ~PROT_GROWSDOWN;
6433 arg2 = arg2 + arg1 - ts->info->stack_limit;
6434 arg1 = ts->info->stack_limit;
6437 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6438 break;
6439 #ifdef TARGET_NR_mremap
6440 case TARGET_NR_mremap:
6441 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6442 break;
6443 #endif
6444 /* ??? msync/mlock/munlock are broken for softmmu. */
6445 #ifdef TARGET_NR_msync
6446 case TARGET_NR_msync:
6447 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6448 break;
6449 #endif
6450 #ifdef TARGET_NR_mlock
6451 case TARGET_NR_mlock:
6452 ret = get_errno(mlock(g2h(arg1), arg2));
6453 break;
6454 #endif
6455 #ifdef TARGET_NR_munlock
6456 case TARGET_NR_munlock:
6457 ret = get_errno(munlock(g2h(arg1), arg2));
6458 break;
6459 #endif
6460 #ifdef TARGET_NR_mlockall
6461 case TARGET_NR_mlockall:
6462 ret = get_errno(mlockall(arg1));
6463 break;
6464 #endif
6465 #ifdef TARGET_NR_munlockall
6466 case TARGET_NR_munlockall:
6467 ret = get_errno(munlockall());
6468 break;
6469 #endif
6470 case TARGET_NR_truncate:
6471 if (!(p = lock_user_string(arg1)))
6472 goto efault;
6473 ret = get_errno(truncate(p, arg2));
6474 unlock_user(p, arg1, 0);
6475 break;
6476 case TARGET_NR_ftruncate:
6477 ret = get_errno(ftruncate(arg1, arg2));
6478 break;
6479 case TARGET_NR_fchmod:
6480 ret = get_errno(fchmod(arg1, arg2));
6481 break;
6482 #if defined(TARGET_NR_fchmodat)
6483 case TARGET_NR_fchmodat:
6484 if (!(p = lock_user_string(arg2)))
6485 goto efault;
6486 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6487 unlock_user(p, arg2, 0);
6488 break;
6489 #endif
6490 case TARGET_NR_getpriority:
6491 /* Note that negative values are valid for getpriority, so we must
6492 differentiate based on errno settings. */
6493 errno = 0;
6494 ret = getpriority(arg1, arg2);
6495 if (ret == -1 && errno != 0) {
6496 ret = -host_to_target_errno(errno);
6497 break;
6499 #ifdef TARGET_ALPHA
6500 /* Return value is the unbiased priority. Signal no error. */
6501 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6502 #else
6503 /* Return value is a biased priority to avoid negative numbers. */
6504 ret = 20 - ret;
6505 #endif
6506 break;
6507 case TARGET_NR_setpriority:
6508 ret = get_errno(setpriority(arg1, arg2, arg3));
6509 break;
6510 #ifdef TARGET_NR_profil
6511 case TARGET_NR_profil:
6512 goto unimplemented;
6513 #endif
6514 case TARGET_NR_statfs:
6515 if (!(p = lock_user_string(arg1)))
6516 goto efault;
6517 ret = get_errno(statfs(path(p), &stfs));
6518 unlock_user(p, arg1, 0);
6519 convert_statfs:
6520 if (!is_error(ret)) {
6521 struct target_statfs *target_stfs;
6523 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6524 goto efault;
6525 __put_user(stfs.f_type, &target_stfs->f_type);
6526 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6527 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6528 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6529 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6530 __put_user(stfs.f_files, &target_stfs->f_files);
6531 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6532 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6533 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6534 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6535 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6536 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6537 unlock_user_struct(target_stfs, arg2, 1);
6539 break;
6540 case TARGET_NR_fstatfs:
6541 ret = get_errno(fstatfs(arg1, &stfs));
6542 goto convert_statfs;
6543 #ifdef TARGET_NR_statfs64
6544 case TARGET_NR_statfs64:
6545 if (!(p = lock_user_string(arg1)))
6546 goto efault;
6547 ret = get_errno(statfs(path(p), &stfs));
6548 unlock_user(p, arg1, 0);
6549 convert_statfs64:
6550 if (!is_error(ret)) {
6551 struct target_statfs64 *target_stfs;
6553 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6554 goto efault;
6555 __put_user(stfs.f_type, &target_stfs->f_type);
6556 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6557 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6558 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6559 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6560 __put_user(stfs.f_files, &target_stfs->f_files);
6561 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6562 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6563 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6564 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6565 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6566 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6567 unlock_user_struct(target_stfs, arg3, 1);
6569 break;
6570 case TARGET_NR_fstatfs64:
6571 ret = get_errno(fstatfs(arg1, &stfs));
6572 goto convert_statfs64;
6573 #endif
6574 #ifdef TARGET_NR_ioperm
6575 case TARGET_NR_ioperm:
6576 goto unimplemented;
6577 #endif
6578 #ifdef TARGET_NR_socketcall
6579 case TARGET_NR_socketcall:
6580 ret = do_socketcall(arg1, arg2);
6581 break;
6582 #endif
6583 #ifdef TARGET_NR_accept
6584 case TARGET_NR_accept:
6585 ret = do_accept4(arg1, arg2, arg3, 0);
6586 break;
6587 #endif
6588 #ifdef TARGET_NR_accept4
6589 case TARGET_NR_accept4:
6590 #ifdef CONFIG_ACCEPT4
6591 ret = do_accept4(arg1, arg2, arg3, arg4);
6592 #else
6593 goto unimplemented;
6594 #endif
6595 break;
6596 #endif
6597 #ifdef TARGET_NR_bind
6598 case TARGET_NR_bind:
6599 ret = do_bind(arg1, arg2, arg3);
6600 break;
6601 #endif
6602 #ifdef TARGET_NR_connect
6603 case TARGET_NR_connect:
6604 ret = do_connect(arg1, arg2, arg3);
6605 break;
6606 #endif
6607 #ifdef TARGET_NR_getpeername
6608 case TARGET_NR_getpeername:
6609 ret = do_getpeername(arg1, arg2, arg3);
6610 break;
6611 #endif
6612 #ifdef TARGET_NR_getsockname
6613 case TARGET_NR_getsockname:
6614 ret = do_getsockname(arg1, arg2, arg3);
6615 break;
6616 #endif
6617 #ifdef TARGET_NR_getsockopt
6618 case TARGET_NR_getsockopt:
6619 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6620 break;
6621 #endif
6622 #ifdef TARGET_NR_listen
6623 case TARGET_NR_listen:
6624 ret = get_errno(listen(arg1, arg2));
6625 break;
6626 #endif
6627 #ifdef TARGET_NR_recv
6628 case TARGET_NR_recv:
6629 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6630 break;
6631 #endif
6632 #ifdef TARGET_NR_recvfrom
6633 case TARGET_NR_recvfrom:
6634 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6635 break;
6636 #endif
6637 #ifdef TARGET_NR_recvmsg
6638 case TARGET_NR_recvmsg:
6639 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6640 break;
6641 #endif
6642 #ifdef TARGET_NR_send
6643 case TARGET_NR_send:
6644 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6645 break;
6646 #endif
6647 #ifdef TARGET_NR_sendmsg
6648 case TARGET_NR_sendmsg:
6649 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6650 break;
6651 #endif
6652 #ifdef TARGET_NR_sendto
6653 case TARGET_NR_sendto:
6654 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6655 break;
6656 #endif
6657 #ifdef TARGET_NR_shutdown
6658 case TARGET_NR_shutdown:
6659 ret = get_errno(shutdown(arg1, arg2));
6660 break;
6661 #endif
6662 #ifdef TARGET_NR_socket
6663 case TARGET_NR_socket:
6664 ret = do_socket(arg1, arg2, arg3);
6665 break;
6666 #endif
6667 #ifdef TARGET_NR_socketpair
6668 case TARGET_NR_socketpair:
6669 ret = do_socketpair(arg1, arg2, arg3, arg4);
6670 break;
6671 #endif
6672 #ifdef TARGET_NR_setsockopt
6673 case TARGET_NR_setsockopt:
6674 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6675 break;
6676 #endif
6678 case TARGET_NR_syslog:
6679 if (!(p = lock_user_string(arg2)))
6680 goto efault;
6681 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6682 unlock_user(p, arg2, 0);
6683 break;
6685 case TARGET_NR_setitimer:
6687 struct itimerval value, ovalue, *pvalue;
6689 if (arg2) {
6690 pvalue = &value;
6691 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6692 || copy_from_user_timeval(&pvalue->it_value,
6693 arg2 + sizeof(struct target_timeval)))
6694 goto efault;
6695 } else {
6696 pvalue = NULL;
6698 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6699 if (!is_error(ret) && arg3) {
6700 if (copy_to_user_timeval(arg3,
6701 &ovalue.it_interval)
6702 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6703 &ovalue.it_value))
6704 goto efault;
6707 break;
6708 case TARGET_NR_getitimer:
6710 struct itimerval value;
6712 ret = get_errno(getitimer(arg1, &value));
6713 if (!is_error(ret) && arg2) {
6714 if (copy_to_user_timeval(arg2,
6715 &value.it_interval)
6716 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6717 &value.it_value))
6718 goto efault;
6721 break;
6722 case TARGET_NR_stat:
6723 if (!(p = lock_user_string(arg1)))
6724 goto efault;
6725 ret = get_errno(stat(path(p), &st));
6726 unlock_user(p, arg1, 0);
6727 goto do_stat;
6728 case TARGET_NR_lstat:
6729 if (!(p = lock_user_string(arg1)))
6730 goto efault;
6731 ret = get_errno(lstat(path(p), &st));
6732 unlock_user(p, arg1, 0);
6733 goto do_stat;
6734 case TARGET_NR_fstat:
6736 ret = get_errno(fstat(arg1, &st));
6737 do_stat:
6738 if (!is_error(ret)) {
6739 struct target_stat *target_st;
6741 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6742 goto efault;
6743 memset(target_st, 0, sizeof(*target_st));
6744 __put_user(st.st_dev, &target_st->st_dev);
6745 __put_user(st.st_ino, &target_st->st_ino);
6746 __put_user(st.st_mode, &target_st->st_mode);
6747 __put_user(st.st_uid, &target_st->st_uid);
6748 __put_user(st.st_gid, &target_st->st_gid);
6749 __put_user(st.st_nlink, &target_st->st_nlink);
6750 __put_user(st.st_rdev, &target_st->st_rdev);
6751 __put_user(st.st_size, &target_st->st_size);
6752 __put_user(st.st_blksize, &target_st->st_blksize);
6753 __put_user(st.st_blocks, &target_st->st_blocks);
6754 __put_user(st.st_atime, &target_st->target_st_atime);
6755 __put_user(st.st_mtime, &target_st->target_st_mtime);
6756 __put_user(st.st_ctime, &target_st->target_st_ctime);
6757 unlock_user_struct(target_st, arg2, 1);
6760 break;
6761 #ifdef TARGET_NR_olduname
6762 case TARGET_NR_olduname:
6763 goto unimplemented;
6764 #endif
6765 #ifdef TARGET_NR_iopl
6766 case TARGET_NR_iopl:
6767 goto unimplemented;
6768 #endif
6769 case TARGET_NR_vhangup:
6770 ret = get_errno(vhangup());
6771 break;
6772 #ifdef TARGET_NR_idle
6773 case TARGET_NR_idle:
6774 goto unimplemented;
6775 #endif
6776 #ifdef TARGET_NR_syscall
6777 case TARGET_NR_syscall:
6778 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6779 arg6, arg7, arg8, 0);
6780 break;
6781 #endif
6782 case TARGET_NR_wait4:
6784 int status;
6785 abi_long status_ptr = arg2;
6786 struct rusage rusage, *rusage_ptr;
6787 abi_ulong target_rusage = arg4;
6788 if (target_rusage)
6789 rusage_ptr = &rusage;
6790 else
6791 rusage_ptr = NULL;
6792 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6793 if (!is_error(ret)) {
6794 if (status_ptr && ret) {
6795 status = host_to_target_waitstatus(status);
6796 if (put_user_s32(status, status_ptr))
6797 goto efault;
6799 if (target_rusage)
6800 host_to_target_rusage(target_rusage, &rusage);
6803 break;
6804 #ifdef TARGET_NR_swapoff
6805 case TARGET_NR_swapoff:
6806 if (!(p = lock_user_string(arg1)))
6807 goto efault;
6808 ret = get_errno(swapoff(p));
6809 unlock_user(p, arg1, 0);
6810 break;
6811 #endif
6812 case TARGET_NR_sysinfo:
6814 struct target_sysinfo *target_value;
6815 struct sysinfo value;
6816 ret = get_errno(sysinfo(&value));
6817 if (!is_error(ret) && arg1)
6819 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6820 goto efault;
6821 __put_user(value.uptime, &target_value->uptime);
6822 __put_user(value.loads[0], &target_value->loads[0]);
6823 __put_user(value.loads[1], &target_value->loads[1]);
6824 __put_user(value.loads[2], &target_value->loads[2]);
6825 __put_user(value.totalram, &target_value->totalram);
6826 __put_user(value.freeram, &target_value->freeram);
6827 __put_user(value.sharedram, &target_value->sharedram);
6828 __put_user(value.bufferram, &target_value->bufferram);
6829 __put_user(value.totalswap, &target_value->totalswap);
6830 __put_user(value.freeswap, &target_value->freeswap);
6831 __put_user(value.procs, &target_value->procs);
6832 __put_user(value.totalhigh, &target_value->totalhigh);
6833 __put_user(value.freehigh, &target_value->freehigh);
6834 __put_user(value.mem_unit, &target_value->mem_unit);
6835 unlock_user_struct(target_value, arg1, 1);
6838 break;
6839 #ifdef TARGET_NR_ipc
6840 case TARGET_NR_ipc:
6841 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6842 break;
6843 #endif
6844 #ifdef TARGET_NR_semget
6845 case TARGET_NR_semget:
6846 ret = get_errno(semget(arg1, arg2, arg3));
6847 break;
6848 #endif
6849 #ifdef TARGET_NR_semop
6850 case TARGET_NR_semop:
6851 ret = do_semop(arg1, arg2, arg3);
6852 break;
6853 #endif
6854 #ifdef TARGET_NR_semctl
6855 case TARGET_NR_semctl:
6856 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6857 break;
6858 #endif
6859 #ifdef TARGET_NR_msgctl
6860 case TARGET_NR_msgctl:
6861 ret = do_msgctl(arg1, arg2, arg3);
6862 break;
6863 #endif
6864 #ifdef TARGET_NR_msgget
6865 case TARGET_NR_msgget:
6866 ret = get_errno(msgget(arg1, arg2));
6867 break;
6868 #endif
6869 #ifdef TARGET_NR_msgrcv
6870 case TARGET_NR_msgrcv:
6871 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6872 break;
6873 #endif
6874 #ifdef TARGET_NR_msgsnd
6875 case TARGET_NR_msgsnd:
6876 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6877 break;
6878 #endif
6879 #ifdef TARGET_NR_shmget
6880 case TARGET_NR_shmget:
6881 ret = get_errno(shmget(arg1, arg2, arg3));
6882 break;
6883 #endif
6884 #ifdef TARGET_NR_shmctl
6885 case TARGET_NR_shmctl:
6886 ret = do_shmctl(arg1, arg2, arg3);
6887 break;
6888 #endif
6889 #ifdef TARGET_NR_shmat
6890 case TARGET_NR_shmat:
6891 ret = do_shmat(arg1, arg2, arg3);
6892 break;
6893 #endif
6894 #ifdef TARGET_NR_shmdt
6895 case TARGET_NR_shmdt:
6896 ret = do_shmdt(arg1);
6897 break;
6898 #endif
6899 case TARGET_NR_fsync:
6900 ret = get_errno(fsync(arg1));
6901 break;
6902 case TARGET_NR_clone:
6903 /* Linux manages to have three different orderings for its
6904 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
6905 * match the kernel's CONFIG_CLONE_* settings.
6906 * Microblaze is further special in that it uses a sixth
6907 * implicit argument to clone for the TLS pointer.
6909 #if defined(TARGET_MICROBLAZE)
6910 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6911 #elif defined(TARGET_CLONE_BACKWARDS)
6912 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6913 #elif defined(TARGET_CLONE_BACKWARDS2)
6914 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6915 #else
6916 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6917 #endif
6918 break;
6919 #ifdef __NR_exit_group
6920 /* new thread calls */
6921 case TARGET_NR_exit_group:
6922 #ifdef TARGET_GPROF
6923 _mcleanup();
6924 #endif
6925 gdb_exit(cpu_env, arg1);
6926 ret = get_errno(exit_group(arg1));
6927 break;
6928 #endif
6929 case TARGET_NR_setdomainname:
6930 if (!(p = lock_user_string(arg1)))
6931 goto efault;
6932 ret = get_errno(setdomainname(p, arg2));
6933 unlock_user(p, arg1, 0);
6934 break;
6935 case TARGET_NR_uname:
6936 /* no need to transcode because we use the linux syscall */
6938 struct new_utsname * buf;
6940 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6941 goto efault;
6942 ret = get_errno(sys_uname(buf));
6943 if (!is_error(ret)) {
6944 /* Overrite the native machine name with whatever is being
6945 emulated. */
6946 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6947 /* Allow the user to override the reported release. */
6948 if (qemu_uname_release && *qemu_uname_release)
6949 strcpy (buf->release, qemu_uname_release);
6951 unlock_user_struct(buf, arg1, 1);
6953 break;
6954 #ifdef TARGET_I386
6955 case TARGET_NR_modify_ldt:
6956 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6957 break;
6958 #if !defined(TARGET_X86_64)
6959 case TARGET_NR_vm86old:
6960 goto unimplemented;
6961 case TARGET_NR_vm86:
6962 ret = do_vm86(cpu_env, arg1, arg2);
6963 break;
6964 #endif
6965 #endif
6966 case TARGET_NR_adjtimex:
6967 goto unimplemented;
6968 #ifdef TARGET_NR_create_module
6969 case TARGET_NR_create_module:
6970 #endif
6971 case TARGET_NR_init_module:
6972 case TARGET_NR_delete_module:
6973 #ifdef TARGET_NR_get_kernel_syms
6974 case TARGET_NR_get_kernel_syms:
6975 #endif
6976 goto unimplemented;
6977 case TARGET_NR_quotactl:
6978 goto unimplemented;
6979 case TARGET_NR_getpgid:
6980 ret = get_errno(getpgid(arg1));
6981 break;
6982 case TARGET_NR_fchdir:
6983 ret = get_errno(fchdir(arg1));
6984 break;
6985 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6986 case TARGET_NR_bdflush:
6987 goto unimplemented;
6988 #endif
6989 #ifdef TARGET_NR_sysfs
6990 case TARGET_NR_sysfs:
6991 goto unimplemented;
6992 #endif
6993 case TARGET_NR_personality:
6994 ret = get_errno(personality(arg1));
6995 break;
6996 #ifdef TARGET_NR_afs_syscall
6997 case TARGET_NR_afs_syscall:
6998 goto unimplemented;
6999 #endif
7000 #ifdef TARGET_NR__llseek /* Not on alpha */
7001 case TARGET_NR__llseek:
7003 int64_t res;
7004 #if !defined(__NR_llseek)
7005 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7006 if (res == -1) {
7007 ret = get_errno(res);
7008 } else {
7009 ret = 0;
7011 #else
7012 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7013 #endif
7014 if ((ret == 0) && put_user_s64(res, arg4)) {
7015 goto efault;
7018 break;
7019 #endif
7020 case TARGET_NR_getdents:
7021 #ifdef __NR_getdents
7022 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7024 struct target_dirent *target_dirp;
7025 struct linux_dirent *dirp;
7026 abi_long count = arg3;
7028 dirp = malloc(count);
7029 if (!dirp) {
7030 ret = -TARGET_ENOMEM;
7031 goto fail;
7034 ret = get_errno(sys_getdents(arg1, dirp, count));
7035 if (!is_error(ret)) {
7036 struct linux_dirent *de;
7037 struct target_dirent *tde;
7038 int len = ret;
7039 int reclen, treclen;
7040 int count1, tnamelen;
7042 count1 = 0;
7043 de = dirp;
7044 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7045 goto efault;
7046 tde = target_dirp;
7047 while (len > 0) {
7048 reclen = de->d_reclen;
7049 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7050 assert(tnamelen >= 0);
7051 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7052 assert(count1 + treclen <= count);
7053 tde->d_reclen = tswap16(treclen);
7054 tde->d_ino = tswapal(de->d_ino);
7055 tde->d_off = tswapal(de->d_off);
7056 memcpy(tde->d_name, de->d_name, tnamelen);
7057 de = (struct linux_dirent *)((char *)de + reclen);
7058 len -= reclen;
7059 tde = (struct target_dirent *)((char *)tde + treclen);
7060 count1 += treclen;
7062 ret = count1;
7063 unlock_user(target_dirp, arg2, ret);
7065 free(dirp);
7067 #else
7069 struct linux_dirent *dirp;
7070 abi_long count = arg3;
7072 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7073 goto efault;
7074 ret = get_errno(sys_getdents(arg1, dirp, count));
7075 if (!is_error(ret)) {
7076 struct linux_dirent *de;
7077 int len = ret;
7078 int reclen;
7079 de = dirp;
7080 while (len > 0) {
7081 reclen = de->d_reclen;
7082 if (reclen > len)
7083 break;
7084 de->d_reclen = tswap16(reclen);
7085 tswapls(&de->d_ino);
7086 tswapls(&de->d_off);
7087 de = (struct linux_dirent *)((char *)de + reclen);
7088 len -= reclen;
7091 unlock_user(dirp, arg2, ret);
7093 #endif
7094 #else
7095 /* Implement getdents in terms of getdents64 */
7097 struct linux_dirent64 *dirp;
7098 abi_long count = arg3;
7100 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7101 if (!dirp) {
7102 goto efault;
7104 ret = get_errno(sys_getdents64(arg1, dirp, count));
7105 if (!is_error(ret)) {
7106 /* Convert the dirent64 structs to target dirent. We do this
7107 * in-place, since we can guarantee that a target_dirent is no
7108 * larger than a dirent64; however this means we have to be
7109 * careful to read everything before writing in the new format.
7111 struct linux_dirent64 *de;
7112 struct target_dirent *tde;
7113 int len = ret;
7114 int tlen = 0;
7116 de = dirp;
7117 tde = (struct target_dirent *)dirp;
7118 while (len > 0) {
7119 int namelen, treclen;
7120 int reclen = de->d_reclen;
7121 uint64_t ino = de->d_ino;
7122 int64_t off = de->d_off;
7123 uint8_t type = de->d_type;
7125 namelen = strlen(de->d_name);
7126 treclen = offsetof(struct target_dirent, d_name)
7127 + namelen + 2;
7128 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7130 memmove(tde->d_name, de->d_name, namelen + 1);
7131 tde->d_ino = tswapal(ino);
7132 tde->d_off = tswapal(off);
7133 tde->d_reclen = tswap16(treclen);
7134 /* The target_dirent type is in what was formerly a padding
7135 * byte at the end of the structure:
7137 *(((char *)tde) + treclen - 1) = type;
7139 de = (struct linux_dirent64 *)((char *)de + reclen);
7140 tde = (struct target_dirent *)((char *)tde + treclen);
7141 len -= reclen;
7142 tlen += treclen;
7144 ret = tlen;
7146 unlock_user(dirp, arg2, ret);
7148 #endif
7149 break;
7150 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7151 case TARGET_NR_getdents64:
7153 struct linux_dirent64 *dirp;
7154 abi_long count = arg3;
7155 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7156 goto efault;
7157 ret = get_errno(sys_getdents64(arg1, dirp, count));
7158 if (!is_error(ret)) {
7159 struct linux_dirent64 *de;
7160 int len = ret;
7161 int reclen;
7162 de = dirp;
7163 while (len > 0) {
7164 reclen = de->d_reclen;
7165 if (reclen > len)
7166 break;
7167 de->d_reclen = tswap16(reclen);
7168 tswap64s((uint64_t *)&de->d_ino);
7169 tswap64s((uint64_t *)&de->d_off);
7170 de = (struct linux_dirent64 *)((char *)de + reclen);
7171 len -= reclen;
7174 unlock_user(dirp, arg2, ret);
7176 break;
7177 #endif /* TARGET_NR_getdents64 */
7178 #if defined(TARGET_NR__newselect)
7179 case TARGET_NR__newselect:
7180 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7181 break;
7182 #endif
7183 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7184 # ifdef TARGET_NR_poll
7185 case TARGET_NR_poll:
7186 # endif
7187 # ifdef TARGET_NR_ppoll
7188 case TARGET_NR_ppoll:
7189 # endif
7191 struct target_pollfd *target_pfd;
7192 unsigned int nfds = arg2;
7193 int timeout = arg3;
7194 struct pollfd *pfd;
7195 unsigned int i;
7197 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7198 if (!target_pfd)
7199 goto efault;
7201 pfd = alloca(sizeof(struct pollfd) * nfds);
7202 for(i = 0; i < nfds; i++) {
7203 pfd[i].fd = tswap32(target_pfd[i].fd);
7204 pfd[i].events = tswap16(target_pfd[i].events);
7207 # ifdef TARGET_NR_ppoll
7208 if (num == TARGET_NR_ppoll) {
7209 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7210 target_sigset_t *target_set;
7211 sigset_t _set, *set = &_set;
7213 if (arg3) {
7214 if (target_to_host_timespec(timeout_ts, arg3)) {
7215 unlock_user(target_pfd, arg1, 0);
7216 goto efault;
7218 } else {
7219 timeout_ts = NULL;
7222 if (arg4) {
7223 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7224 if (!target_set) {
7225 unlock_user(target_pfd, arg1, 0);
7226 goto efault;
7228 target_to_host_sigset(set, target_set);
7229 } else {
7230 set = NULL;
7233 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7235 if (!is_error(ret) && arg3) {
7236 host_to_target_timespec(arg3, timeout_ts);
7238 if (arg4) {
7239 unlock_user(target_set, arg4, 0);
7241 } else
7242 # endif
7243 ret = get_errno(poll(pfd, nfds, timeout));
7245 if (!is_error(ret)) {
7246 for(i = 0; i < nfds; i++) {
7247 target_pfd[i].revents = tswap16(pfd[i].revents);
7250 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7252 break;
7253 #endif
7254 case TARGET_NR_flock:
7255 /* NOTE: the flock constant seems to be the same for every
7256 Linux platform */
7257 ret = get_errno(flock(arg1, arg2));
7258 break;
7259 case TARGET_NR_readv:
7261 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7262 if (vec != NULL) {
7263 ret = get_errno(readv(arg1, vec, arg3));
7264 unlock_iovec(vec, arg2, arg3, 1);
7265 } else {
7266 ret = -host_to_target_errno(errno);
7269 break;
7270 case TARGET_NR_writev:
7272 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7273 if (vec != NULL) {
7274 ret = get_errno(writev(arg1, vec, arg3));
7275 unlock_iovec(vec, arg2, arg3, 0);
7276 } else {
7277 ret = -host_to_target_errno(errno);
7280 break;
7281 case TARGET_NR_getsid:
7282 ret = get_errno(getsid(arg1));
7283 break;
7284 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7285 case TARGET_NR_fdatasync:
7286 ret = get_errno(fdatasync(arg1));
7287 break;
7288 #endif
7289 case TARGET_NR__sysctl:
7290 /* We don't implement this, but ENOTDIR is always a safe
7291 return value. */
7292 ret = -TARGET_ENOTDIR;
7293 break;
7294 case TARGET_NR_sched_getaffinity:
7296 unsigned int mask_size;
7297 unsigned long *mask;
7300 * sched_getaffinity needs multiples of ulong, so need to take
7301 * care of mismatches between target ulong and host ulong sizes.
7303 if (arg2 & (sizeof(abi_ulong) - 1)) {
7304 ret = -TARGET_EINVAL;
7305 break;
7307 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7309 mask = alloca(mask_size);
7310 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7312 if (!is_error(ret)) {
7313 if (copy_to_user(arg3, mask, ret)) {
7314 goto efault;
7318 break;
7319 case TARGET_NR_sched_setaffinity:
7321 unsigned int mask_size;
7322 unsigned long *mask;
7325 * sched_setaffinity needs multiples of ulong, so need to take
7326 * care of mismatches between target ulong and host ulong sizes.
7328 if (arg2 & (sizeof(abi_ulong) - 1)) {
7329 ret = -TARGET_EINVAL;
7330 break;
7332 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7334 mask = alloca(mask_size);
7335 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7336 goto efault;
7338 memcpy(mask, p, arg2);
7339 unlock_user_struct(p, arg2, 0);
7341 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7343 break;
7344 case TARGET_NR_sched_setparam:
7346 struct sched_param *target_schp;
7347 struct sched_param schp;
7349 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7350 goto efault;
7351 schp.sched_priority = tswap32(target_schp->sched_priority);
7352 unlock_user_struct(target_schp, arg2, 0);
7353 ret = get_errno(sched_setparam(arg1, &schp));
7355 break;
7356 case TARGET_NR_sched_getparam:
7358 struct sched_param *target_schp;
7359 struct sched_param schp;
7360 ret = get_errno(sched_getparam(arg1, &schp));
7361 if (!is_error(ret)) {
7362 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7363 goto efault;
7364 target_schp->sched_priority = tswap32(schp.sched_priority);
7365 unlock_user_struct(target_schp, arg2, 1);
7368 break;
7369 case TARGET_NR_sched_setscheduler:
7371 struct sched_param *target_schp;
7372 struct sched_param schp;
7373 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7374 goto efault;
7375 schp.sched_priority = tswap32(target_schp->sched_priority);
7376 unlock_user_struct(target_schp, arg3, 0);
7377 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7379 break;
7380 case TARGET_NR_sched_getscheduler:
7381 ret = get_errno(sched_getscheduler(arg1));
7382 break;
7383 case TARGET_NR_sched_yield:
7384 ret = get_errno(sched_yield());
7385 break;
7386 case TARGET_NR_sched_get_priority_max:
7387 ret = get_errno(sched_get_priority_max(arg1));
7388 break;
7389 case TARGET_NR_sched_get_priority_min:
7390 ret = get_errno(sched_get_priority_min(arg1));
7391 break;
7392 case TARGET_NR_sched_rr_get_interval:
7394 struct timespec ts;
7395 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7396 if (!is_error(ret)) {
7397 host_to_target_timespec(arg2, &ts);
7400 break;
7401 case TARGET_NR_nanosleep:
7403 struct timespec req, rem;
7404 target_to_host_timespec(&req, arg1);
7405 ret = get_errno(nanosleep(&req, &rem));
7406 if (is_error(ret) && arg2) {
7407 host_to_target_timespec(arg2, &rem);
7410 break;
7411 #ifdef TARGET_NR_query_module
7412 case TARGET_NR_query_module:
7413 goto unimplemented;
7414 #endif
7415 #ifdef TARGET_NR_nfsservctl
7416 case TARGET_NR_nfsservctl:
7417 goto unimplemented;
7418 #endif
7419 case TARGET_NR_prctl:
7420 switch (arg1) {
7421 case PR_GET_PDEATHSIG:
7423 int deathsig;
7424 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7425 if (!is_error(ret) && arg2
7426 && put_user_ual(deathsig, arg2)) {
7427 goto efault;
7429 break;
7431 #ifdef PR_GET_NAME
7432 case PR_GET_NAME:
7434 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7435 if (!name) {
7436 goto efault;
7438 ret = get_errno(prctl(arg1, (unsigned long)name,
7439 arg3, arg4, arg5));
7440 unlock_user(name, arg2, 16);
7441 break;
7443 case PR_SET_NAME:
7445 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7446 if (!name) {
7447 goto efault;
7449 ret = get_errno(prctl(arg1, (unsigned long)name,
7450 arg3, arg4, arg5));
7451 unlock_user(name, arg2, 0);
7452 break;
7454 #endif
7455 default:
7456 /* Most prctl options have no pointer arguments */
7457 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7458 break;
7460 break;
7461 #ifdef TARGET_NR_arch_prctl
7462 case TARGET_NR_arch_prctl:
7463 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7464 ret = do_arch_prctl(cpu_env, arg1, arg2);
7465 break;
7466 #else
7467 goto unimplemented;
7468 #endif
7469 #endif
7470 #ifdef TARGET_NR_pread64
7471 case TARGET_NR_pread64:
7472 if (regpairs_aligned(cpu_env)) {
7473 arg4 = arg5;
7474 arg5 = arg6;
7476 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7477 goto efault;
7478 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7479 unlock_user(p, arg2, ret);
7480 break;
7481 case TARGET_NR_pwrite64:
7482 if (regpairs_aligned(cpu_env)) {
7483 arg4 = arg5;
7484 arg5 = arg6;
7486 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7487 goto efault;
7488 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7489 unlock_user(p, arg2, 0);
7490 break;
7491 #endif
7492 case TARGET_NR_getcwd:
7493 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7494 goto efault;
7495 ret = get_errno(sys_getcwd1(p, arg2));
7496 unlock_user(p, arg1, ret);
7497 break;
7498 case TARGET_NR_capget:
7499 goto unimplemented;
7500 case TARGET_NR_capset:
7501 goto unimplemented;
7502 case TARGET_NR_sigaltstack:
7503 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7504 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7505 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7506 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7507 break;
7508 #else
7509 goto unimplemented;
7510 #endif
7512 #ifdef CONFIG_SENDFILE
7513 case TARGET_NR_sendfile:
7515 off_t *offp = NULL;
7516 off_t off;
7517 if (arg3) {
7518 ret = get_user_sal(off, arg3);
7519 if (is_error(ret)) {
7520 break;
7522 offp = &off;
7524 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7525 if (!is_error(ret) && arg3) {
7526 abi_long ret2 = put_user_sal(off, arg3);
7527 if (is_error(ret2)) {
7528 ret = ret2;
7531 break;
7533 #ifdef TARGET_NR_sendfile64
7534 case TARGET_NR_sendfile64:
7536 off_t *offp = NULL;
7537 off_t off;
7538 if (arg3) {
7539 ret = get_user_s64(off, arg3);
7540 if (is_error(ret)) {
7541 break;
7543 offp = &off;
7545 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7546 if (!is_error(ret) && arg3) {
7547 abi_long ret2 = put_user_s64(off, arg3);
7548 if (is_error(ret2)) {
7549 ret = ret2;
7552 break;
7554 #endif
7555 #else
7556 case TARGET_NR_sendfile:
7557 #ifdef TARGET_NR_sendfile64
7558 case TARGET_NR_sendfile64:
7559 #endif
7560 goto unimplemented;
7561 #endif
7563 #ifdef TARGET_NR_getpmsg
7564 case TARGET_NR_getpmsg:
7565 goto unimplemented;
7566 #endif
7567 #ifdef TARGET_NR_putpmsg
7568 case TARGET_NR_putpmsg:
7569 goto unimplemented;
7570 #endif
7571 #ifdef TARGET_NR_vfork
7572 case TARGET_NR_vfork:
7573 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7574 0, 0, 0, 0));
7575 break;
7576 #endif
7577 #ifdef TARGET_NR_ugetrlimit
7578 case TARGET_NR_ugetrlimit:
7580 struct rlimit rlim;
7581 int resource = target_to_host_resource(arg1);
7582 ret = get_errno(getrlimit(resource, &rlim));
7583 if (!is_error(ret)) {
7584 struct target_rlimit *target_rlim;
7585 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7586 goto efault;
7587 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7588 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7589 unlock_user_struct(target_rlim, arg2, 1);
7591 break;
7593 #endif
7594 #ifdef TARGET_NR_truncate64
7595 case TARGET_NR_truncate64:
7596 if (!(p = lock_user_string(arg1)))
7597 goto efault;
7598 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7599 unlock_user(p, arg1, 0);
7600 break;
7601 #endif
7602 #ifdef TARGET_NR_ftruncate64
7603 case TARGET_NR_ftruncate64:
7604 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7605 break;
7606 #endif
7607 #ifdef TARGET_NR_stat64
7608 case TARGET_NR_stat64:
7609 if (!(p = lock_user_string(arg1)))
7610 goto efault;
7611 ret = get_errno(stat(path(p), &st));
7612 unlock_user(p, arg1, 0);
7613 if (!is_error(ret))
7614 ret = host_to_target_stat64(cpu_env, arg2, &st);
7615 break;
7616 #endif
7617 #ifdef TARGET_NR_lstat64
7618 case TARGET_NR_lstat64:
7619 if (!(p = lock_user_string(arg1)))
7620 goto efault;
7621 ret = get_errno(lstat(path(p), &st));
7622 unlock_user(p, arg1, 0);
7623 if (!is_error(ret))
7624 ret = host_to_target_stat64(cpu_env, arg2, &st);
7625 break;
7626 #endif
7627 #ifdef TARGET_NR_fstat64
7628 case TARGET_NR_fstat64:
7629 ret = get_errno(fstat(arg1, &st));
7630 if (!is_error(ret))
7631 ret = host_to_target_stat64(cpu_env, arg2, &st);
7632 break;
7633 #endif
7634 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7635 #ifdef TARGET_NR_fstatat64
7636 case TARGET_NR_fstatat64:
7637 #endif
7638 #ifdef TARGET_NR_newfstatat
7639 case TARGET_NR_newfstatat:
7640 #endif
7641 if (!(p = lock_user_string(arg2)))
7642 goto efault;
7643 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7644 if (!is_error(ret))
7645 ret = host_to_target_stat64(cpu_env, arg3, &st);
7646 break;
7647 #endif
7648 case TARGET_NR_lchown:
7649 if (!(p = lock_user_string(arg1)))
7650 goto efault;
7651 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7652 unlock_user(p, arg1, 0);
7653 break;
7654 #ifdef TARGET_NR_getuid
7655 case TARGET_NR_getuid:
7656 ret = get_errno(high2lowuid(getuid()));
7657 break;
7658 #endif
7659 #ifdef TARGET_NR_getgid
7660 case TARGET_NR_getgid:
7661 ret = get_errno(high2lowgid(getgid()));
7662 break;
7663 #endif
7664 #ifdef TARGET_NR_geteuid
7665 case TARGET_NR_geteuid:
7666 ret = get_errno(high2lowuid(geteuid()));
7667 break;
7668 #endif
7669 #ifdef TARGET_NR_getegid
7670 case TARGET_NR_getegid:
7671 ret = get_errno(high2lowgid(getegid()));
7672 break;
7673 #endif
7674 case TARGET_NR_setreuid:
7675 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7676 break;
7677 case TARGET_NR_setregid:
7678 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7679 break;
7680 case TARGET_NR_getgroups:
7682 int gidsetsize = arg1;
7683 target_id *target_grouplist;
7684 gid_t *grouplist;
7685 int i;
7687 grouplist = alloca(gidsetsize * sizeof(gid_t));
7688 ret = get_errno(getgroups(gidsetsize, grouplist));
7689 if (gidsetsize == 0)
7690 break;
7691 if (!is_error(ret)) {
7692 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7693 if (!target_grouplist)
7694 goto efault;
7695 for(i = 0;i < ret; i++)
7696 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7697 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7700 break;
7701 case TARGET_NR_setgroups:
7703 int gidsetsize = arg1;
7704 target_id *target_grouplist;
7705 gid_t *grouplist = NULL;
7706 int i;
7707 if (gidsetsize) {
7708 grouplist = alloca(gidsetsize * sizeof(gid_t));
7709 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7710 if (!target_grouplist) {
7711 ret = -TARGET_EFAULT;
7712 goto fail;
7714 for (i = 0; i < gidsetsize; i++) {
7715 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7717 unlock_user(target_grouplist, arg2, 0);
7719 ret = get_errno(setgroups(gidsetsize, grouplist));
7721 break;
7722 case TARGET_NR_fchown:
7723 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7724 break;
7725 #if defined(TARGET_NR_fchownat)
7726 case TARGET_NR_fchownat:
7727 if (!(p = lock_user_string(arg2)))
7728 goto efault;
7729 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7730 low2highgid(arg4), arg5));
7731 unlock_user(p, arg2, 0);
7732 break;
7733 #endif
7734 #ifdef TARGET_NR_setresuid
7735 case TARGET_NR_setresuid:
7736 ret = get_errno(setresuid(low2highuid(arg1),
7737 low2highuid(arg2),
7738 low2highuid(arg3)));
7739 break;
7740 #endif
7741 #ifdef TARGET_NR_getresuid
7742 case TARGET_NR_getresuid:
7744 uid_t ruid, euid, suid;
7745 ret = get_errno(getresuid(&ruid, &euid, &suid));
7746 if (!is_error(ret)) {
7747 if (put_user_u16(high2lowuid(ruid), arg1)
7748 || put_user_u16(high2lowuid(euid), arg2)
7749 || put_user_u16(high2lowuid(suid), arg3))
7750 goto efault;
7753 break;
7754 #endif
7755 #ifdef TARGET_NR_getresgid
7756 case TARGET_NR_setresgid:
7757 ret = get_errno(setresgid(low2highgid(arg1),
7758 low2highgid(arg2),
7759 low2highgid(arg3)));
7760 break;
7761 #endif
7762 #ifdef TARGET_NR_getresgid
7763 case TARGET_NR_getresgid:
7765 gid_t rgid, egid, sgid;
7766 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7767 if (!is_error(ret)) {
7768 if (put_user_u16(high2lowgid(rgid), arg1)
7769 || put_user_u16(high2lowgid(egid), arg2)
7770 || put_user_u16(high2lowgid(sgid), arg3))
7771 goto efault;
7774 break;
7775 #endif
7776 case TARGET_NR_chown:
7777 if (!(p = lock_user_string(arg1)))
7778 goto efault;
7779 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7780 unlock_user(p, arg1, 0);
7781 break;
7782 case TARGET_NR_setuid:
7783 ret = get_errno(setuid(low2highuid(arg1)));
7784 break;
7785 case TARGET_NR_setgid:
7786 ret = get_errno(setgid(low2highgid(arg1)));
7787 break;
7788 case TARGET_NR_setfsuid:
7789 ret = get_errno(setfsuid(arg1));
7790 break;
7791 case TARGET_NR_setfsgid:
7792 ret = get_errno(setfsgid(arg1));
7793 break;
7795 #ifdef TARGET_NR_lchown32
7796 case TARGET_NR_lchown32:
7797 if (!(p = lock_user_string(arg1)))
7798 goto efault;
7799 ret = get_errno(lchown(p, arg2, arg3));
7800 unlock_user(p, arg1, 0);
7801 break;
7802 #endif
7803 #ifdef TARGET_NR_getuid32
7804 case TARGET_NR_getuid32:
7805 ret = get_errno(getuid());
7806 break;
7807 #endif
7809 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7810 /* Alpha specific */
7811 case TARGET_NR_getxuid:
7813 uid_t euid;
7814 euid=geteuid();
7815 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7817 ret = get_errno(getuid());
7818 break;
7819 #endif
7820 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7821 /* Alpha specific */
7822 case TARGET_NR_getxgid:
7824 uid_t egid;
7825 egid=getegid();
7826 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7828 ret = get_errno(getgid());
7829 break;
7830 #endif
7831 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7832 /* Alpha specific */
7833 case TARGET_NR_osf_getsysinfo:
7834 ret = -TARGET_EOPNOTSUPP;
7835 switch (arg1) {
7836 case TARGET_GSI_IEEE_FP_CONTROL:
7838 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7840 /* Copied from linux ieee_fpcr_to_swcr. */
7841 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7842 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7843 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7844 | SWCR_TRAP_ENABLE_DZE
7845 | SWCR_TRAP_ENABLE_OVF);
7846 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7847 | SWCR_TRAP_ENABLE_INE);
7848 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7849 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7851 if (put_user_u64 (swcr, arg2))
7852 goto efault;
7853 ret = 0;
7855 break;
7857 /* case GSI_IEEE_STATE_AT_SIGNAL:
7858 -- Not implemented in linux kernel.
7859 case GSI_UACPROC:
7860 -- Retrieves current unaligned access state; not much used.
7861 case GSI_PROC_TYPE:
7862 -- Retrieves implver information; surely not used.
7863 case GSI_GET_HWRPB:
7864 -- Grabs a copy of the HWRPB; surely not used.
7867 break;
7868 #endif
7869 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7870 /* Alpha specific */
7871 case TARGET_NR_osf_setsysinfo:
7872 ret = -TARGET_EOPNOTSUPP;
7873 switch (arg1) {
7874 case TARGET_SSI_IEEE_FP_CONTROL:
7876 uint64_t swcr, fpcr, orig_fpcr;
7878 if (get_user_u64 (swcr, arg2)) {
7879 goto efault;
7881 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7882 fpcr = orig_fpcr & FPCR_DYN_MASK;
7884 /* Copied from linux ieee_swcr_to_fpcr. */
7885 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7886 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7887 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7888 | SWCR_TRAP_ENABLE_DZE
7889 | SWCR_TRAP_ENABLE_OVF)) << 48;
7890 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7891 | SWCR_TRAP_ENABLE_INE)) << 57;
7892 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7893 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7895 cpu_alpha_store_fpcr(cpu_env, fpcr);
7896 ret = 0;
7898 break;
7900 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7902 uint64_t exc, fpcr, orig_fpcr;
7903 int si_code;
7905 if (get_user_u64(exc, arg2)) {
7906 goto efault;
7909 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7911 /* We only add to the exception status here. */
7912 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7914 cpu_alpha_store_fpcr(cpu_env, fpcr);
7915 ret = 0;
7917 /* Old exceptions are not signaled. */
7918 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7920 /* If any exceptions set by this call,
7921 and are unmasked, send a signal. */
7922 si_code = 0;
7923 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7924 si_code = TARGET_FPE_FLTRES;
7926 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7927 si_code = TARGET_FPE_FLTUND;
7929 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7930 si_code = TARGET_FPE_FLTOVF;
7932 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7933 si_code = TARGET_FPE_FLTDIV;
7935 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7936 si_code = TARGET_FPE_FLTINV;
7938 if (si_code != 0) {
7939 target_siginfo_t info;
7940 info.si_signo = SIGFPE;
7941 info.si_errno = 0;
7942 info.si_code = si_code;
7943 info._sifields._sigfault._addr
7944 = ((CPUArchState *)cpu_env)->pc;
7945 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7948 break;
7950 /* case SSI_NVPAIRS:
7951 -- Used with SSIN_UACPROC to enable unaligned accesses.
7952 case SSI_IEEE_STATE_AT_SIGNAL:
7953 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7954 -- Not implemented in linux kernel
7957 break;
7958 #endif
7959 #ifdef TARGET_NR_osf_sigprocmask
7960 /* Alpha specific. */
7961 case TARGET_NR_osf_sigprocmask:
7963 abi_ulong mask;
7964 int how;
7965 sigset_t set, oldset;
7967 switch(arg1) {
7968 case TARGET_SIG_BLOCK:
7969 how = SIG_BLOCK;
7970 break;
7971 case TARGET_SIG_UNBLOCK:
7972 how = SIG_UNBLOCK;
7973 break;
7974 case TARGET_SIG_SETMASK:
7975 how = SIG_SETMASK;
7976 break;
7977 default:
7978 ret = -TARGET_EINVAL;
7979 goto fail;
7981 mask = arg2;
7982 target_to_host_old_sigset(&set, &mask);
7983 sigprocmask(how, &set, &oldset);
7984 host_to_target_old_sigset(&mask, &oldset);
7985 ret = mask;
7987 break;
7988 #endif
7990 #ifdef TARGET_NR_getgid32
7991 case TARGET_NR_getgid32:
7992 ret = get_errno(getgid());
7993 break;
7994 #endif
7995 #ifdef TARGET_NR_geteuid32
7996 case TARGET_NR_geteuid32:
7997 ret = get_errno(geteuid());
7998 break;
7999 #endif
8000 #ifdef TARGET_NR_getegid32
8001 case TARGET_NR_getegid32:
8002 ret = get_errno(getegid());
8003 break;
8004 #endif
8005 #ifdef TARGET_NR_setreuid32
8006 case TARGET_NR_setreuid32:
8007 ret = get_errno(setreuid(arg1, arg2));
8008 break;
8009 #endif
8010 #ifdef TARGET_NR_setregid32
8011 case TARGET_NR_setregid32:
8012 ret = get_errno(setregid(arg1, arg2));
8013 break;
8014 #endif
8015 #ifdef TARGET_NR_getgroups32
8016 case TARGET_NR_getgroups32:
8018 int gidsetsize = arg1;
8019 uint32_t *target_grouplist;
8020 gid_t *grouplist;
8021 int i;
8023 grouplist = alloca(gidsetsize * sizeof(gid_t));
8024 ret = get_errno(getgroups(gidsetsize, grouplist));
8025 if (gidsetsize == 0)
8026 break;
8027 if (!is_error(ret)) {
8028 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8029 if (!target_grouplist) {
8030 ret = -TARGET_EFAULT;
8031 goto fail;
8033 for(i = 0;i < ret; i++)
8034 target_grouplist[i] = tswap32(grouplist[i]);
8035 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8038 break;
8039 #endif
8040 #ifdef TARGET_NR_setgroups32
8041 case TARGET_NR_setgroups32:
8043 int gidsetsize = arg1;
8044 uint32_t *target_grouplist;
8045 gid_t *grouplist;
8046 int i;
8048 grouplist = alloca(gidsetsize * sizeof(gid_t));
8049 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8050 if (!target_grouplist) {
8051 ret = -TARGET_EFAULT;
8052 goto fail;
8054 for(i = 0;i < gidsetsize; i++)
8055 grouplist[i] = tswap32(target_grouplist[i]);
8056 unlock_user(target_grouplist, arg2, 0);
8057 ret = get_errno(setgroups(gidsetsize, grouplist));
8059 break;
8060 #endif
8061 #ifdef TARGET_NR_fchown32
8062 case TARGET_NR_fchown32:
8063 ret = get_errno(fchown(arg1, arg2, arg3));
8064 break;
8065 #endif
8066 #ifdef TARGET_NR_setresuid32
8067 case TARGET_NR_setresuid32:
8068 ret = get_errno(setresuid(arg1, arg2, arg3));
8069 break;
8070 #endif
8071 #ifdef TARGET_NR_getresuid32
8072 case TARGET_NR_getresuid32:
8074 uid_t ruid, euid, suid;
8075 ret = get_errno(getresuid(&ruid, &euid, &suid));
8076 if (!is_error(ret)) {
8077 if (put_user_u32(ruid, arg1)
8078 || put_user_u32(euid, arg2)
8079 || put_user_u32(suid, arg3))
8080 goto efault;
8083 break;
8084 #endif
8085 #ifdef TARGET_NR_setresgid32
8086 case TARGET_NR_setresgid32:
8087 ret = get_errno(setresgid(arg1, arg2, arg3));
8088 break;
8089 #endif
8090 #ifdef TARGET_NR_getresgid32
8091 case TARGET_NR_getresgid32:
8093 gid_t rgid, egid, sgid;
8094 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8095 if (!is_error(ret)) {
8096 if (put_user_u32(rgid, arg1)
8097 || put_user_u32(egid, arg2)
8098 || put_user_u32(sgid, arg3))
8099 goto efault;
8102 break;
8103 #endif
8104 #ifdef TARGET_NR_chown32
8105 case TARGET_NR_chown32:
8106 if (!(p = lock_user_string(arg1)))
8107 goto efault;
8108 ret = get_errno(chown(p, arg2, arg3));
8109 unlock_user(p, arg1, 0);
8110 break;
8111 #endif
8112 #ifdef TARGET_NR_setuid32
8113 case TARGET_NR_setuid32:
8114 ret = get_errno(setuid(arg1));
8115 break;
8116 #endif
8117 #ifdef TARGET_NR_setgid32
8118 case TARGET_NR_setgid32:
8119 ret = get_errno(setgid(arg1));
8120 break;
8121 #endif
8122 #ifdef TARGET_NR_setfsuid32
8123 case TARGET_NR_setfsuid32:
8124 ret = get_errno(setfsuid(arg1));
8125 break;
8126 #endif
8127 #ifdef TARGET_NR_setfsgid32
8128 case TARGET_NR_setfsgid32:
8129 ret = get_errno(setfsgid(arg1));
8130 break;
8131 #endif
8133 case TARGET_NR_pivot_root:
8134 goto unimplemented;
8135 #ifdef TARGET_NR_mincore
8136 case TARGET_NR_mincore:
8138 void *a;
8139 ret = -TARGET_EFAULT;
8140 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8141 goto efault;
8142 if (!(p = lock_user_string(arg3)))
8143 goto mincore_fail;
8144 ret = get_errno(mincore(a, arg2, p));
8145 unlock_user(p, arg3, ret);
8146 mincore_fail:
8147 unlock_user(a, arg1, 0);
8149 break;
8150 #endif
8151 #ifdef TARGET_NR_arm_fadvise64_64
8152 case TARGET_NR_arm_fadvise64_64:
8155 * arm_fadvise64_64 looks like fadvise64_64 but
8156 * with different argument order
8158 abi_long temp;
8159 temp = arg3;
8160 arg3 = arg4;
8161 arg4 = temp;
8163 #endif
8164 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8165 #ifdef TARGET_NR_fadvise64_64
8166 case TARGET_NR_fadvise64_64:
8167 #endif
8168 #ifdef TARGET_NR_fadvise64
8169 case TARGET_NR_fadvise64:
8170 #endif
8171 #ifdef TARGET_S390X
8172 switch (arg4) {
8173 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8174 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8175 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8176 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8177 default: break;
8179 #endif
8180 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8181 break;
8182 #endif
8183 #ifdef TARGET_NR_madvise
8184 case TARGET_NR_madvise:
8185 /* A straight passthrough may not be safe because qemu sometimes
8186 turns private file-backed mappings into anonymous mappings.
8187 This will break MADV_DONTNEED.
8188 This is a hint, so ignoring and returning success is ok. */
8189 ret = get_errno(0);
8190 break;
8191 #endif
8192 #if TARGET_ABI_BITS == 32
8193 case TARGET_NR_fcntl64:
8195 int cmd;
8196 struct flock64 fl;
8197 struct target_flock64 *target_fl;
8198 #ifdef TARGET_ARM
8199 struct target_eabi_flock64 *target_efl;
8200 #endif
8202 cmd = target_to_host_fcntl_cmd(arg2);
8203 if (cmd == -TARGET_EINVAL) {
8204 ret = cmd;
8205 break;
8208 switch(arg2) {
8209 case TARGET_F_GETLK64:
8210 #ifdef TARGET_ARM
8211 if (((CPUARMState *)cpu_env)->eabi) {
8212 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8213 goto efault;
8214 fl.l_type = tswap16(target_efl->l_type);
8215 fl.l_whence = tswap16(target_efl->l_whence);
8216 fl.l_start = tswap64(target_efl->l_start);
8217 fl.l_len = tswap64(target_efl->l_len);
8218 fl.l_pid = tswap32(target_efl->l_pid);
8219 unlock_user_struct(target_efl, arg3, 0);
8220 } else
8221 #endif
8223 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8224 goto efault;
8225 fl.l_type = tswap16(target_fl->l_type);
8226 fl.l_whence = tswap16(target_fl->l_whence);
8227 fl.l_start = tswap64(target_fl->l_start);
8228 fl.l_len = tswap64(target_fl->l_len);
8229 fl.l_pid = tswap32(target_fl->l_pid);
8230 unlock_user_struct(target_fl, arg3, 0);
8232 ret = get_errno(fcntl(arg1, cmd, &fl));
8233 if (ret == 0) {
8234 #ifdef TARGET_ARM
8235 if (((CPUARMState *)cpu_env)->eabi) {
8236 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8237 goto efault;
8238 target_efl->l_type = tswap16(fl.l_type);
8239 target_efl->l_whence = tswap16(fl.l_whence);
8240 target_efl->l_start = tswap64(fl.l_start);
8241 target_efl->l_len = tswap64(fl.l_len);
8242 target_efl->l_pid = tswap32(fl.l_pid);
8243 unlock_user_struct(target_efl, arg3, 1);
8244 } else
8245 #endif
8247 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8248 goto efault;
8249 target_fl->l_type = tswap16(fl.l_type);
8250 target_fl->l_whence = tswap16(fl.l_whence);
8251 target_fl->l_start = tswap64(fl.l_start);
8252 target_fl->l_len = tswap64(fl.l_len);
8253 target_fl->l_pid = tswap32(fl.l_pid);
8254 unlock_user_struct(target_fl, arg3, 1);
8257 break;
8259 case TARGET_F_SETLK64:
8260 case TARGET_F_SETLKW64:
8261 #ifdef TARGET_ARM
8262 if (((CPUARMState *)cpu_env)->eabi) {
8263 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8264 goto efault;
8265 fl.l_type = tswap16(target_efl->l_type);
8266 fl.l_whence = tswap16(target_efl->l_whence);
8267 fl.l_start = tswap64(target_efl->l_start);
8268 fl.l_len = tswap64(target_efl->l_len);
8269 fl.l_pid = tswap32(target_efl->l_pid);
8270 unlock_user_struct(target_efl, arg3, 0);
8271 } else
8272 #endif
8274 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8275 goto efault;
8276 fl.l_type = tswap16(target_fl->l_type);
8277 fl.l_whence = tswap16(target_fl->l_whence);
8278 fl.l_start = tswap64(target_fl->l_start);
8279 fl.l_len = tswap64(target_fl->l_len);
8280 fl.l_pid = tswap32(target_fl->l_pid);
8281 unlock_user_struct(target_fl, arg3, 0);
8283 ret = get_errno(fcntl(arg1, cmd, &fl));
8284 break;
8285 default:
8286 ret = do_fcntl(arg1, arg2, arg3);
8287 break;
8289 break;
8291 #endif
8292 #ifdef TARGET_NR_cacheflush
8293 case TARGET_NR_cacheflush:
8294 /* self-modifying code is handled automatically, so nothing needed */
8295 ret = 0;
8296 break;
8297 #endif
8298 #ifdef TARGET_NR_security
8299 case TARGET_NR_security:
8300 goto unimplemented;
8301 #endif
8302 #ifdef TARGET_NR_getpagesize
8303 case TARGET_NR_getpagesize:
8304 ret = TARGET_PAGE_SIZE;
8305 break;
8306 #endif
8307 case TARGET_NR_gettid:
8308 ret = get_errno(gettid());
8309 break;
8310 #ifdef TARGET_NR_readahead
8311 case TARGET_NR_readahead:
8312 #if TARGET_ABI_BITS == 32
8313 if (regpairs_aligned(cpu_env)) {
8314 arg2 = arg3;
8315 arg3 = arg4;
8316 arg4 = arg5;
8318 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8319 #else
8320 ret = get_errno(readahead(arg1, arg2, arg3));
8321 #endif
8322 break;
8323 #endif
8324 #ifdef CONFIG_ATTR
8325 #ifdef TARGET_NR_setxattr
8326 case TARGET_NR_listxattr:
8327 case TARGET_NR_llistxattr:
8329 void *p, *b = 0;
8330 if (arg2) {
8331 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8332 if (!b) {
8333 ret = -TARGET_EFAULT;
8334 break;
8337 p = lock_user_string(arg1);
8338 if (p) {
8339 if (num == TARGET_NR_listxattr) {
8340 ret = get_errno(listxattr(p, b, arg3));
8341 } else {
8342 ret = get_errno(llistxattr(p, b, arg3));
8344 } else {
8345 ret = -TARGET_EFAULT;
8347 unlock_user(p, arg1, 0);
8348 unlock_user(b, arg2, arg3);
8349 break;
8351 case TARGET_NR_flistxattr:
8353 void *b = 0;
8354 if (arg2) {
8355 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8356 if (!b) {
8357 ret = -TARGET_EFAULT;
8358 break;
8361 ret = get_errno(flistxattr(arg1, b, arg3));
8362 unlock_user(b, arg2, arg3);
8363 break;
8365 case TARGET_NR_setxattr:
8366 case TARGET_NR_lsetxattr:
8368 void *p, *n, *v = 0;
8369 if (arg3) {
8370 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8371 if (!v) {
8372 ret = -TARGET_EFAULT;
8373 break;
8376 p = lock_user_string(arg1);
8377 n = lock_user_string(arg2);
8378 if (p && n) {
8379 if (num == TARGET_NR_setxattr) {
8380 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8381 } else {
8382 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8384 } else {
8385 ret = -TARGET_EFAULT;
8387 unlock_user(p, arg1, 0);
8388 unlock_user(n, arg2, 0);
8389 unlock_user(v, arg3, 0);
8391 break;
8392 case TARGET_NR_fsetxattr:
8394 void *n, *v = 0;
8395 if (arg3) {
8396 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8397 if (!v) {
8398 ret = -TARGET_EFAULT;
8399 break;
8402 n = lock_user_string(arg2);
8403 if (n) {
8404 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8405 } else {
8406 ret = -TARGET_EFAULT;
8408 unlock_user(n, arg2, 0);
8409 unlock_user(v, arg3, 0);
8411 break;
8412 case TARGET_NR_getxattr:
8413 case TARGET_NR_lgetxattr:
8415 void *p, *n, *v = 0;
8416 if (arg3) {
8417 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8418 if (!v) {
8419 ret = -TARGET_EFAULT;
8420 break;
8423 p = lock_user_string(arg1);
8424 n = lock_user_string(arg2);
8425 if (p && n) {
8426 if (num == TARGET_NR_getxattr) {
8427 ret = get_errno(getxattr(p, n, v, arg4));
8428 } else {
8429 ret = get_errno(lgetxattr(p, n, v, arg4));
8431 } else {
8432 ret = -TARGET_EFAULT;
8434 unlock_user(p, arg1, 0);
8435 unlock_user(n, arg2, 0);
8436 unlock_user(v, arg3, arg4);
8438 break;
8439 case TARGET_NR_fgetxattr:
8441 void *n, *v = 0;
8442 if (arg3) {
8443 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8444 if (!v) {
8445 ret = -TARGET_EFAULT;
8446 break;
8449 n = lock_user_string(arg2);
8450 if (n) {
8451 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8452 } else {
8453 ret = -TARGET_EFAULT;
8455 unlock_user(n, arg2, 0);
8456 unlock_user(v, arg3, arg4);
8458 break;
8459 case TARGET_NR_removexattr:
8460 case TARGET_NR_lremovexattr:
8462 void *p, *n;
8463 p = lock_user_string(arg1);
8464 n = lock_user_string(arg2);
8465 if (p && n) {
8466 if (num == TARGET_NR_removexattr) {
8467 ret = get_errno(removexattr(p, n));
8468 } else {
8469 ret = get_errno(lremovexattr(p, n));
8471 } else {
8472 ret = -TARGET_EFAULT;
8474 unlock_user(p, arg1, 0);
8475 unlock_user(n, arg2, 0);
8477 break;
8478 case TARGET_NR_fremovexattr:
8480 void *n;
8481 n = lock_user_string(arg2);
8482 if (n) {
8483 ret = get_errno(fremovexattr(arg1, n));
8484 } else {
8485 ret = -TARGET_EFAULT;
8487 unlock_user(n, arg2, 0);
8489 break;
8490 #endif
8491 #endif /* CONFIG_ATTR */
8492 #ifdef TARGET_NR_set_thread_area
8493 case TARGET_NR_set_thread_area:
8494 #if defined(TARGET_MIPS)
8495 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8496 ret = 0;
8497 break;
8498 #elif defined(TARGET_CRIS)
8499 if (arg1 & 0xff)
8500 ret = -TARGET_EINVAL;
8501 else {
8502 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8503 ret = 0;
8505 break;
8506 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8507 ret = do_set_thread_area(cpu_env, arg1);
8508 break;
8509 #elif defined(TARGET_M68K)
8511 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8512 ts->tp_value = arg1;
8513 ret = 0;
8514 break;
8516 #else
8517 goto unimplemented_nowarn;
8518 #endif
8519 #endif
8520 #ifdef TARGET_NR_get_thread_area
8521 case TARGET_NR_get_thread_area:
8522 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8523 ret = do_get_thread_area(cpu_env, arg1);
8524 break;
8525 #elif defined(TARGET_M68K)
8527 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8528 ret = ts->tp_value;
8529 break;
8531 #else
8532 goto unimplemented_nowarn;
8533 #endif
8534 #endif
8535 #ifdef TARGET_NR_getdomainname
8536 case TARGET_NR_getdomainname:
8537 goto unimplemented_nowarn;
8538 #endif
8540 #ifdef TARGET_NR_clock_gettime
8541 case TARGET_NR_clock_gettime:
8543 struct timespec ts;
8544 ret = get_errno(clock_gettime(arg1, &ts));
8545 if (!is_error(ret)) {
8546 host_to_target_timespec(arg2, &ts);
8548 break;
8550 #endif
8551 #ifdef TARGET_NR_clock_getres
8552 case TARGET_NR_clock_getres:
8554 struct timespec ts;
8555 ret = get_errno(clock_getres(arg1, &ts));
8556 if (!is_error(ret)) {
8557 host_to_target_timespec(arg2, &ts);
8559 break;
8561 #endif
8562 #ifdef TARGET_NR_clock_nanosleep
8563 case TARGET_NR_clock_nanosleep:
8565 struct timespec ts;
8566 target_to_host_timespec(&ts, arg3);
8567 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8568 if (arg4)
8569 host_to_target_timespec(arg4, &ts);
8570 break;
8572 #endif
8574 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8575 case TARGET_NR_set_tid_address:
8576 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8577 break;
8578 #endif
8580 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8581 case TARGET_NR_tkill:
8582 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8583 break;
8584 #endif
8586 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8587 case TARGET_NR_tgkill:
8588 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8589 target_to_host_signal(arg3)));
8590 break;
8591 #endif
8593 #ifdef TARGET_NR_set_robust_list
8594 case TARGET_NR_set_robust_list:
8595 case TARGET_NR_get_robust_list:
8596 /* The ABI for supporting robust futexes has userspace pass
8597 * the kernel a pointer to a linked list which is updated by
8598 * userspace after the syscall; the list is walked by the kernel
8599 * when the thread exits. Since the linked list in QEMU guest
8600 * memory isn't a valid linked list for the host and we have
8601 * no way to reliably intercept the thread-death event, we can't
8602 * support these. Silently return ENOSYS so that guest userspace
8603 * falls back to a non-robust futex implementation (which should
8604 * be OK except in the corner case of the guest crashing while
8605 * holding a mutex that is shared with another process via
8606 * shared memory).
8608 goto unimplemented_nowarn;
8609 #endif
8611 #if defined(TARGET_NR_utimensat)
8612 case TARGET_NR_utimensat:
8614 struct timespec *tsp, ts[2];
8615 if (!arg3) {
8616 tsp = NULL;
8617 } else {
8618 target_to_host_timespec(ts, arg3);
8619 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8620 tsp = ts;
8622 if (!arg2)
8623 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8624 else {
8625 if (!(p = lock_user_string(arg2))) {
8626 ret = -TARGET_EFAULT;
8627 goto fail;
8629 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8630 unlock_user(p, arg2, 0);
8633 break;
8634 #endif
8635 case TARGET_NR_futex:
8636 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8637 break;
8638 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8639 case TARGET_NR_inotify_init:
8640 ret = get_errno(sys_inotify_init());
8641 break;
8642 #endif
8643 #ifdef CONFIG_INOTIFY1
8644 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8645 case TARGET_NR_inotify_init1:
8646 ret = get_errno(sys_inotify_init1(arg1));
8647 break;
8648 #endif
8649 #endif
8650 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8651 case TARGET_NR_inotify_add_watch:
8652 p = lock_user_string(arg2);
8653 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8654 unlock_user(p, arg2, 0);
8655 break;
8656 #endif
8657 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8658 case TARGET_NR_inotify_rm_watch:
8659 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8660 break;
8661 #endif
8663 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8664 case TARGET_NR_mq_open:
8666 struct mq_attr posix_mq_attr;
8668 p = lock_user_string(arg1 - 1);
8669 if (arg4 != 0)
8670 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8671 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8672 unlock_user (p, arg1, 0);
8674 break;
8676 case TARGET_NR_mq_unlink:
8677 p = lock_user_string(arg1 - 1);
8678 ret = get_errno(mq_unlink(p));
8679 unlock_user (p, arg1, 0);
8680 break;
8682 case TARGET_NR_mq_timedsend:
8684 struct timespec ts;
8686 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8687 if (arg5 != 0) {
8688 target_to_host_timespec(&ts, arg5);
8689 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8690 host_to_target_timespec(arg5, &ts);
8692 else
8693 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8694 unlock_user (p, arg2, arg3);
8696 break;
8698 case TARGET_NR_mq_timedreceive:
8700 struct timespec ts;
8701 unsigned int prio;
8703 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8704 if (arg5 != 0) {
8705 target_to_host_timespec(&ts, arg5);
8706 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8707 host_to_target_timespec(arg5, &ts);
8709 else
8710 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8711 unlock_user (p, arg2, arg3);
8712 if (arg4 != 0)
8713 put_user_u32(prio, arg4);
8715 break;
8717 /* Not implemented for now... */
8718 /* case TARGET_NR_mq_notify: */
8719 /* break; */
8721 case TARGET_NR_mq_getsetattr:
8723 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8724 ret = 0;
8725 if (arg3 != 0) {
8726 ret = mq_getattr(arg1, &posix_mq_attr_out);
8727 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8729 if (arg2 != 0) {
8730 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8731 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8735 break;
8736 #endif
8738 #ifdef CONFIG_SPLICE
8739 #ifdef TARGET_NR_tee
8740 case TARGET_NR_tee:
8742 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8744 break;
8745 #endif
8746 #ifdef TARGET_NR_splice
8747 case TARGET_NR_splice:
8749 loff_t loff_in, loff_out;
8750 loff_t *ploff_in = NULL, *ploff_out = NULL;
8751 if(arg2) {
8752 get_user_u64(loff_in, arg2);
8753 ploff_in = &loff_in;
8755 if(arg4) {
8756 get_user_u64(loff_out, arg2);
8757 ploff_out = &loff_out;
8759 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8761 break;
8762 #endif
8763 #ifdef TARGET_NR_vmsplice
8764 case TARGET_NR_vmsplice:
8766 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8767 if (vec != NULL) {
8768 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8769 unlock_iovec(vec, arg2, arg3, 0);
8770 } else {
8771 ret = -host_to_target_errno(errno);
8774 break;
8775 #endif
8776 #endif /* CONFIG_SPLICE */
8777 #ifdef CONFIG_EVENTFD
8778 #if defined(TARGET_NR_eventfd)
8779 case TARGET_NR_eventfd:
8780 ret = get_errno(eventfd(arg1, 0));
8781 break;
8782 #endif
8783 #if defined(TARGET_NR_eventfd2)
8784 case TARGET_NR_eventfd2:
8786 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8787 if (arg2 & TARGET_O_NONBLOCK) {
8788 host_flags |= O_NONBLOCK;
8790 if (arg2 & TARGET_O_CLOEXEC) {
8791 host_flags |= O_CLOEXEC;
8793 ret = get_errno(eventfd(arg1, host_flags));
8794 break;
8796 #endif
8797 #endif /* CONFIG_EVENTFD */
8798 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8799 case TARGET_NR_fallocate:
8800 #if TARGET_ABI_BITS == 32
8801 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8802 target_offset64(arg5, arg6)));
8803 #else
8804 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8805 #endif
8806 break;
8807 #endif
8808 #if defined(CONFIG_SYNC_FILE_RANGE)
8809 #if defined(TARGET_NR_sync_file_range)
8810 case TARGET_NR_sync_file_range:
8811 #if TARGET_ABI_BITS == 32
8812 #if defined(TARGET_MIPS)
8813 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8814 target_offset64(arg5, arg6), arg7));
8815 #else
8816 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8817 target_offset64(arg4, arg5), arg6));
8818 #endif /* !TARGET_MIPS */
8819 #else
8820 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8821 #endif
8822 break;
8823 #endif
8824 #if defined(TARGET_NR_sync_file_range2)
8825 case TARGET_NR_sync_file_range2:
8826 /* This is like sync_file_range but the arguments are reordered */
8827 #if TARGET_ABI_BITS == 32
8828 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8829 target_offset64(arg5, arg6), arg2));
8830 #else
8831 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8832 #endif
8833 break;
8834 #endif
8835 #endif
8836 #if defined(CONFIG_EPOLL)
8837 #if defined(TARGET_NR_epoll_create)
8838 case TARGET_NR_epoll_create:
8839 ret = get_errno(epoll_create(arg1));
8840 break;
8841 #endif
8842 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8843 case TARGET_NR_epoll_create1:
8844 ret = get_errno(epoll_create1(arg1));
8845 break;
8846 #endif
8847 #if defined(TARGET_NR_epoll_ctl)
8848 case TARGET_NR_epoll_ctl:
8850 struct epoll_event ep;
8851 struct epoll_event *epp = 0;
8852 if (arg4) {
8853 struct target_epoll_event *target_ep;
8854 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8855 goto efault;
8857 ep.events = tswap32(target_ep->events);
8858 /* The epoll_data_t union is just opaque data to the kernel,
8859 * so we transfer all 64 bits across and need not worry what
8860 * actual data type it is.
8862 ep.data.u64 = tswap64(target_ep->data.u64);
8863 unlock_user_struct(target_ep, arg4, 0);
8864 epp = &ep;
8866 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8867 break;
8869 #endif
8871 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8872 #define IMPLEMENT_EPOLL_PWAIT
8873 #endif
8874 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8875 #if defined(TARGET_NR_epoll_wait)
8876 case TARGET_NR_epoll_wait:
8877 #endif
8878 #if defined(IMPLEMENT_EPOLL_PWAIT)
8879 case TARGET_NR_epoll_pwait:
8880 #endif
8882 struct target_epoll_event *target_ep;
8883 struct epoll_event *ep;
8884 int epfd = arg1;
8885 int maxevents = arg3;
8886 int timeout = arg4;
8888 target_ep = lock_user(VERIFY_WRITE, arg2,
8889 maxevents * sizeof(struct target_epoll_event), 1);
8890 if (!target_ep) {
8891 goto efault;
8894 ep = alloca(maxevents * sizeof(struct epoll_event));
8896 switch (num) {
8897 #if defined(IMPLEMENT_EPOLL_PWAIT)
8898 case TARGET_NR_epoll_pwait:
8900 target_sigset_t *target_set;
8901 sigset_t _set, *set = &_set;
8903 if (arg5) {
8904 target_set = lock_user(VERIFY_READ, arg5,
8905 sizeof(target_sigset_t), 1);
8906 if (!target_set) {
8907 unlock_user(target_ep, arg2, 0);
8908 goto efault;
8910 target_to_host_sigset(set, target_set);
8911 unlock_user(target_set, arg5, 0);
8912 } else {
8913 set = NULL;
8916 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8917 break;
8919 #endif
8920 #if defined(TARGET_NR_epoll_wait)
8921 case TARGET_NR_epoll_wait:
8922 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8923 break;
8924 #endif
8925 default:
8926 ret = -TARGET_ENOSYS;
8928 if (!is_error(ret)) {
8929 int i;
8930 for (i = 0; i < ret; i++) {
8931 target_ep[i].events = tswap32(ep[i].events);
8932 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8935 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8936 break;
8938 #endif
8939 #endif
8940 #ifdef TARGET_NR_prlimit64
8941 case TARGET_NR_prlimit64:
8943 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8944 struct target_rlimit64 *target_rnew, *target_rold;
8945 struct host_rlimit64 rnew, rold, *rnewp = 0;
8946 if (arg3) {
8947 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8948 goto efault;
8950 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8951 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8952 unlock_user_struct(target_rnew, arg3, 0);
8953 rnewp = &rnew;
8956 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8957 if (!is_error(ret) && arg4) {
8958 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8959 goto efault;
8961 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8962 target_rold->rlim_max = tswap64(rold.rlim_max);
8963 unlock_user_struct(target_rold, arg4, 1);
8965 break;
8967 #endif
8968 #ifdef TARGET_NR_gethostname
8969 case TARGET_NR_gethostname:
8971 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8972 if (name) {
8973 ret = get_errno(gethostname(name, arg2));
8974 unlock_user(name, arg1, arg2);
8975 } else {
8976 ret = -TARGET_EFAULT;
8978 break;
8980 #endif
8981 default:
8982 unimplemented:
8983 gemu_log("qemu: Unsupported syscall: %d\n", num);
8984 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8985 unimplemented_nowarn:
8986 #endif
8987 ret = -TARGET_ENOSYS;
8988 break;
8990 fail:
8991 #ifdef DEBUG
8992 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8993 #endif
8994 if(do_strace)
8995 print_syscall_ret(num, ret);
8996 return ret;
8997 efault:
8998 ret = -TARGET_EFAULT;
8999 goto fail;