hw/arm/virt: formatting: memory map
[qemu/ar7.git] / linux-user / syscall.c
bloba50229d0d72fc68966515fcf2bc308b833a3c032
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <linux/capability.h>
47 #include <signal.h>
48 #include <sched.h>
49 #ifdef __ia64__
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
52 #endif
53 #include <sys/socket.h>
54 #include <sys/un.h>
55 #include <sys/uio.h>
56 #include <sys/poll.h>
57 #include <sys/times.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/statfs.h>
61 #include <utime.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/cdrom.h>
95 #include <linux/hdreg.h>
96 #include <linux/soundcard.h>
97 #include <linux/kd.h>
98 #include <linux/mtio.h>
99 #include <linux/fs.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 //#define DEBUG
120 //#include <linux/msdos_fs.h>
121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
125 #undef _syscall0
126 #undef _syscall1
127 #undef _syscall2
128 #undef _syscall3
129 #undef _syscall4
130 #undef _syscall5
131 #undef _syscall6
133 #define _syscall0(type,name) \
134 static type name (void) \
136 return syscall(__NR_##name); \
139 #define _syscall1(type,name,type1,arg1) \
140 static type name (type1 arg1) \
142 return syscall(__NR_##name, arg1); \
145 #define _syscall2(type,name,type1,arg1,type2,arg2) \
146 static type name (type1 arg1,type2 arg2) \
148 return syscall(__NR_##name, arg1, arg2); \
151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
152 static type name (type1 arg1,type2 arg2,type3 arg3) \
154 return syscall(__NR_##name, arg1, arg2, arg3); \
157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
164 type5,arg5) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 type5,arg5,type6,arg6) \
173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
174 type6 arg6) \
176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
180 #define __NR_sys_uname __NR_uname
181 #define __NR_sys_getcwd1 __NR_getcwd
182 #define __NR_sys_getdents __NR_getdents
183 #define __NR_sys_getdents64 __NR_getdents64
184 #define __NR_sys_getpriority __NR_getpriority
185 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
186 #define __NR_sys_syslog __NR_syslog
187 #define __NR_sys_tgkill __NR_tgkill
188 #define __NR_sys_tkill __NR_tkill
189 #define __NR_sys_futex __NR_futex
190 #define __NR_sys_inotify_init __NR_inotify_init
191 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
192 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
194 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
195 defined(__s390x__)
196 #define __NR__llseek __NR_lseek
197 #endif
199 /* Newer kernel ports have llseek() instead of _llseek() */
200 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
201 #define TARGET_NR__llseek TARGET_NR_llseek
202 #endif
204 #ifdef __NR_gettid
205 _syscall0(int, gettid)
206 #else
207 /* This is a replacement for the host gettid() and must return a host
208 errno. */
209 static int gettid(void) {
210 return -ENOSYS;
212 #endif
213 #ifdef __NR_getdents
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
215 #endif
216 #if !defined(__NR_getdents) || \
217 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
218 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
219 #endif
220 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
221 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
222 loff_t *, res, uint, wh);
223 #endif
224 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
225 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
226 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
227 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
228 #endif
229 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
230 _syscall2(int,sys_tkill,int,tid,int,sig)
231 #endif
232 #ifdef __NR_exit_group
233 _syscall1(int,exit_group,int,error_code)
234 #endif
235 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
236 _syscall1(int,set_tid_address,int *,tidptr)
237 #endif
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
243 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
244 unsigned long *, user_mask_ptr);
245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
246 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
249 void *, arg);
250 _syscall2(int, capget, struct __user_cap_header_struct *, header,
251 struct __user_cap_data_struct *, data);
252 _syscall2(int, capset, struct __user_cap_header_struct *, header,
253 struct __user_cap_data_struct *, data);
255 static bitmask_transtbl fcntl_flags_tbl[] = {
256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
264 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
265 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
266 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
267 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
268 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
271 #endif
272 #if defined(O_NOATIME)
273 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
274 #endif
275 #if defined(O_CLOEXEC)
276 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
277 #endif
278 #if defined(O_PATH)
279 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
280 #endif
281 /* Don't terminate the list prematurely on 64-bit host+guest. */
282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
283 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
284 #endif
285 { 0, 0, 0, 0 }
288 static int sys_getcwd1(char *buf, size_t size)
290 if (getcwd(buf, size) == NULL) {
291 /* getcwd() sets errno */
292 return (-1);
294 return strlen(buf)+1;
297 #ifdef TARGET_NR_openat
298 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
301 * open(2) has extra parameter 'mode' when called with
302 * flag O_CREAT.
304 if ((flags & O_CREAT) != 0) {
305 return (openat(dirfd, pathname, flags, mode));
307 return (openat(dirfd, pathname, flags));
309 #endif
311 #ifdef TARGET_NR_utimensat
312 #ifdef CONFIG_UTIMENSAT
313 static int sys_utimensat(int dirfd, const char *pathname,
314 const struct timespec times[2], int flags)
316 if (pathname == NULL)
317 return futimens(dirfd, times);
318 else
319 return utimensat(dirfd, pathname, times, flags);
321 #elif defined(__NR_utimensat)
322 #define __NR_sys_utimensat __NR_utimensat
323 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
324 const struct timespec *,tsp,int,flags)
325 #else
326 static int sys_utimensat(int dirfd, const char *pathname,
327 const struct timespec times[2], int flags)
329 errno = ENOSYS;
330 return -1;
332 #endif
333 #endif /* TARGET_NR_utimensat */
335 #ifdef CONFIG_INOTIFY
336 #include <sys/inotify.h>
338 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
339 static int sys_inotify_init(void)
341 return (inotify_init());
343 #endif
344 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
345 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
347 return (inotify_add_watch(fd, pathname, mask));
349 #endif
350 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
351 static int sys_inotify_rm_watch(int fd, int32_t wd)
353 return (inotify_rm_watch(fd, wd));
355 #endif
356 #ifdef CONFIG_INOTIFY1
357 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
358 static int sys_inotify_init1(int flags)
360 return (inotify_init1(flags));
362 #endif
363 #endif
364 #else
365 /* Userspace can usually survive runtime without inotify */
366 #undef TARGET_NR_inotify_init
367 #undef TARGET_NR_inotify_init1
368 #undef TARGET_NR_inotify_add_watch
369 #undef TARGET_NR_inotify_rm_watch
370 #endif /* CONFIG_INOTIFY */
372 #if defined(TARGET_NR_ppoll)
373 #ifndef __NR_ppoll
374 # define __NR_ppoll -1
375 #endif
376 #define __NR_sys_ppoll __NR_ppoll
377 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
378 struct timespec *, timeout, const sigset_t *, sigmask,
379 size_t, sigsetsize)
380 #endif
382 #if defined(TARGET_NR_pselect6)
383 #ifndef __NR_pselect6
384 # define __NR_pselect6 -1
385 #endif
386 #define __NR_sys_pselect6 __NR_pselect6
387 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
388 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
389 #endif
391 #if defined(TARGET_NR_prlimit64)
392 #ifndef __NR_prlimit64
393 # define __NR_prlimit64 -1
394 #endif
395 #define __NR_sys_prlimit64 __NR_prlimit64
396 /* The glibc rlimit structure may not be that used by the underlying syscall */
397 struct host_rlimit64 {
398 uint64_t rlim_cur;
399 uint64_t rlim_max;
401 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
402 const struct host_rlimit64 *, new_limit,
403 struct host_rlimit64 *, old_limit)
404 #endif
407 #if defined(TARGET_NR_timer_create)
408 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
409 static timer_t g_posix_timers[32] = { 0, } ;
411 static inline int next_free_host_timer(void)
413 int k ;
414 /* FIXME: Does finding the next free slot require a lock? */
415 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
416 if (g_posix_timers[k] == 0) {
417 g_posix_timers[k] = (timer_t) 1;
418 return k;
421 return -1;
423 #endif
425 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
426 #ifdef TARGET_ARM
427 static inline int regpairs_aligned(void *cpu_env) {
428 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
430 #elif defined(TARGET_MIPS)
431 static inline int regpairs_aligned(void *cpu_env) { return 1; }
432 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
433 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
434 * of registers which translates to the same as ARM/MIPS, because we start with
435 * r3 as arg1 */
436 static inline int regpairs_aligned(void *cpu_env) { return 1; }
437 #else
438 static inline int regpairs_aligned(void *cpu_env) { return 0; }
439 #endif
441 #define ERRNO_TABLE_SIZE 1200
443 /* target_to_host_errno_table[] is initialized from
444 * host_to_target_errno_table[] in syscall_init(). */
445 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
449 * This list is the union of errno values overridden in asm-<arch>/errno.h
450 * minus the errnos that are not actually generic to all archs.
452 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
453 [EIDRM] = TARGET_EIDRM,
454 [ECHRNG] = TARGET_ECHRNG,
455 [EL2NSYNC] = TARGET_EL2NSYNC,
456 [EL3HLT] = TARGET_EL3HLT,
457 [EL3RST] = TARGET_EL3RST,
458 [ELNRNG] = TARGET_ELNRNG,
459 [EUNATCH] = TARGET_EUNATCH,
460 [ENOCSI] = TARGET_ENOCSI,
461 [EL2HLT] = TARGET_EL2HLT,
462 [EDEADLK] = TARGET_EDEADLK,
463 [ENOLCK] = TARGET_ENOLCK,
464 [EBADE] = TARGET_EBADE,
465 [EBADR] = TARGET_EBADR,
466 [EXFULL] = TARGET_EXFULL,
467 [ENOANO] = TARGET_ENOANO,
468 [EBADRQC] = TARGET_EBADRQC,
469 [EBADSLT] = TARGET_EBADSLT,
470 [EBFONT] = TARGET_EBFONT,
471 [ENOSTR] = TARGET_ENOSTR,
472 [ENODATA] = TARGET_ENODATA,
473 [ETIME] = TARGET_ETIME,
474 [ENOSR] = TARGET_ENOSR,
475 [ENONET] = TARGET_ENONET,
476 [ENOPKG] = TARGET_ENOPKG,
477 [EREMOTE] = TARGET_EREMOTE,
478 [ENOLINK] = TARGET_ENOLINK,
479 [EADV] = TARGET_EADV,
480 [ESRMNT] = TARGET_ESRMNT,
481 [ECOMM] = TARGET_ECOMM,
482 [EPROTO] = TARGET_EPROTO,
483 [EDOTDOT] = TARGET_EDOTDOT,
484 [EMULTIHOP] = TARGET_EMULTIHOP,
485 [EBADMSG] = TARGET_EBADMSG,
486 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
487 [EOVERFLOW] = TARGET_EOVERFLOW,
488 [ENOTUNIQ] = TARGET_ENOTUNIQ,
489 [EBADFD] = TARGET_EBADFD,
490 [EREMCHG] = TARGET_EREMCHG,
491 [ELIBACC] = TARGET_ELIBACC,
492 [ELIBBAD] = TARGET_ELIBBAD,
493 [ELIBSCN] = TARGET_ELIBSCN,
494 [ELIBMAX] = TARGET_ELIBMAX,
495 [ELIBEXEC] = TARGET_ELIBEXEC,
496 [EILSEQ] = TARGET_EILSEQ,
497 [ENOSYS] = TARGET_ENOSYS,
498 [ELOOP] = TARGET_ELOOP,
499 [ERESTART] = TARGET_ERESTART,
500 [ESTRPIPE] = TARGET_ESTRPIPE,
501 [ENOTEMPTY] = TARGET_ENOTEMPTY,
502 [EUSERS] = TARGET_EUSERS,
503 [ENOTSOCK] = TARGET_ENOTSOCK,
504 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
505 [EMSGSIZE] = TARGET_EMSGSIZE,
506 [EPROTOTYPE] = TARGET_EPROTOTYPE,
507 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
508 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
509 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
510 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
511 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
512 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
513 [EADDRINUSE] = TARGET_EADDRINUSE,
514 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
515 [ENETDOWN] = TARGET_ENETDOWN,
516 [ENETUNREACH] = TARGET_ENETUNREACH,
517 [ENETRESET] = TARGET_ENETRESET,
518 [ECONNABORTED] = TARGET_ECONNABORTED,
519 [ECONNRESET] = TARGET_ECONNRESET,
520 [ENOBUFS] = TARGET_ENOBUFS,
521 [EISCONN] = TARGET_EISCONN,
522 [ENOTCONN] = TARGET_ENOTCONN,
523 [EUCLEAN] = TARGET_EUCLEAN,
524 [ENOTNAM] = TARGET_ENOTNAM,
525 [ENAVAIL] = TARGET_ENAVAIL,
526 [EISNAM] = TARGET_EISNAM,
527 [EREMOTEIO] = TARGET_EREMOTEIO,
528 [ESHUTDOWN] = TARGET_ESHUTDOWN,
529 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
530 [ETIMEDOUT] = TARGET_ETIMEDOUT,
531 [ECONNREFUSED] = TARGET_ECONNREFUSED,
532 [EHOSTDOWN] = TARGET_EHOSTDOWN,
533 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
534 [EALREADY] = TARGET_EALREADY,
535 [EINPROGRESS] = TARGET_EINPROGRESS,
536 [ESTALE] = TARGET_ESTALE,
537 [ECANCELED] = TARGET_ECANCELED,
538 [ENOMEDIUM] = TARGET_ENOMEDIUM,
539 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
540 #ifdef ENOKEY
541 [ENOKEY] = TARGET_ENOKEY,
542 #endif
543 #ifdef EKEYEXPIRED
544 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
545 #endif
546 #ifdef EKEYREVOKED
547 [EKEYREVOKED] = TARGET_EKEYREVOKED,
548 #endif
549 #ifdef EKEYREJECTED
550 [EKEYREJECTED] = TARGET_EKEYREJECTED,
551 #endif
552 #ifdef EOWNERDEAD
553 [EOWNERDEAD] = TARGET_EOWNERDEAD,
554 #endif
555 #ifdef ENOTRECOVERABLE
556 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
557 #endif
560 static inline int host_to_target_errno(int err)
562 if(host_to_target_errno_table[err])
563 return host_to_target_errno_table[err];
564 return err;
567 static inline int target_to_host_errno(int err)
569 if (target_to_host_errno_table[err])
570 return target_to_host_errno_table[err];
571 return err;
574 static inline abi_long get_errno(abi_long ret)
576 if (ret == -1)
577 return -host_to_target_errno(errno);
578 else
579 return ret;
582 static inline int is_error(abi_long ret)
584 return (abi_ulong)ret >= (abi_ulong)(-4096);
587 char *target_strerror(int err)
589 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
590 return NULL;
592 return strerror(target_to_host_errno(err));
595 static inline int host_to_target_sock_type(int host_type)
597 int target_type;
599 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
600 case SOCK_DGRAM:
601 target_type = TARGET_SOCK_DGRAM;
602 break;
603 case SOCK_STREAM:
604 target_type = TARGET_SOCK_STREAM;
605 break;
606 default:
607 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
608 break;
611 #if defined(SOCK_CLOEXEC)
612 if (host_type & SOCK_CLOEXEC) {
613 target_type |= TARGET_SOCK_CLOEXEC;
615 #endif
617 #if defined(SOCK_NONBLOCK)
618 if (host_type & SOCK_NONBLOCK) {
619 target_type |= TARGET_SOCK_NONBLOCK;
621 #endif
623 return target_type;
626 static abi_ulong target_brk;
627 static abi_ulong target_original_brk;
628 static abi_ulong brk_page;
630 void target_set_brk(abi_ulong new_brk)
632 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
633 brk_page = HOST_PAGE_ALIGN(target_brk);
636 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
637 #define DEBUGF_BRK(message, args...)
639 /* do_brk() must return target values and target errnos. */
640 abi_long do_brk(abi_ulong new_brk)
642 abi_long mapped_addr;
643 int new_alloc_size;
645 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
647 if (!new_brk) {
648 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
649 return target_brk;
651 if (new_brk < target_original_brk) {
652 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
653 target_brk);
654 return target_brk;
657 /* If the new brk is less than the highest page reserved to the
658 * target heap allocation, set it and we're almost done... */
659 if (new_brk <= brk_page) {
660 /* Heap contents are initialized to zero, as for anonymous
661 * mapped pages. */
662 if (new_brk > target_brk) {
663 memset(g2h(target_brk), 0, new_brk - target_brk);
665 target_brk = new_brk;
666 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
667 return target_brk;
670 /* We need to allocate more memory after the brk... Note that
671 * we don't use MAP_FIXED because that will map over the top of
672 * any existing mapping (like the one with the host libc or qemu
673 * itself); instead we treat "mapped but at wrong address" as
674 * a failure and unmap again.
676 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
677 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
678 PROT_READ|PROT_WRITE,
679 MAP_ANON|MAP_PRIVATE, 0, 0));
681 if (mapped_addr == brk_page) {
682 /* Heap contents are initialized to zero, as for anonymous
683 * mapped pages. Technically the new pages are already
684 * initialized to zero since they *are* anonymous mapped
685 * pages, however we have to take care with the contents that
686 * come from the remaining part of the previous page: it may
687 * contains garbage data due to a previous heap usage (grown
688 * then shrunken). */
689 memset(g2h(target_brk), 0, brk_page - target_brk);
691 target_brk = new_brk;
692 brk_page = HOST_PAGE_ALIGN(target_brk);
693 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
694 target_brk);
695 return target_brk;
696 } else if (mapped_addr != -1) {
697 /* Mapped but at wrong address, meaning there wasn't actually
698 * enough space for this brk.
700 target_munmap(mapped_addr, new_alloc_size);
701 mapped_addr = -1;
702 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
704 else {
705 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
708 #if defined(TARGET_ALPHA)
709 /* We (partially) emulate OSF/1 on Alpha, which requires we
710 return a proper errno, not an unchanged brk value. */
711 return -TARGET_ENOMEM;
712 #endif
713 /* For everything else, return the previous break. */
714 return target_brk;
717 static inline abi_long copy_from_user_fdset(fd_set *fds,
718 abi_ulong target_fds_addr,
719 int n)
721 int i, nw, j, k;
722 abi_ulong b, *target_fds;
724 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
725 if (!(target_fds = lock_user(VERIFY_READ,
726 target_fds_addr,
727 sizeof(abi_ulong) * nw,
728 1)))
729 return -TARGET_EFAULT;
731 FD_ZERO(fds);
732 k = 0;
733 for (i = 0; i < nw; i++) {
734 /* grab the abi_ulong */
735 __get_user(b, &target_fds[i]);
736 for (j = 0; j < TARGET_ABI_BITS; j++) {
737 /* check the bit inside the abi_ulong */
738 if ((b >> j) & 1)
739 FD_SET(k, fds);
740 k++;
744 unlock_user(target_fds, target_fds_addr, 0);
746 return 0;
749 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
750 abi_ulong target_fds_addr,
751 int n)
753 if (target_fds_addr) {
754 if (copy_from_user_fdset(fds, target_fds_addr, n))
755 return -TARGET_EFAULT;
756 *fds_ptr = fds;
757 } else {
758 *fds_ptr = NULL;
760 return 0;
763 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
764 const fd_set *fds,
765 int n)
767 int i, nw, j, k;
768 abi_long v;
769 abi_ulong *target_fds;
771 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
772 if (!(target_fds = lock_user(VERIFY_WRITE,
773 target_fds_addr,
774 sizeof(abi_ulong) * nw,
775 0)))
776 return -TARGET_EFAULT;
778 k = 0;
779 for (i = 0; i < nw; i++) {
780 v = 0;
781 for (j = 0; j < TARGET_ABI_BITS; j++) {
782 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
783 k++;
785 __put_user(v, &target_fds[i]);
788 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
790 return 0;
793 #if defined(__alpha__)
794 #define HOST_HZ 1024
795 #else
796 #define HOST_HZ 100
797 #endif
799 static inline abi_long host_to_target_clock_t(long ticks)
801 #if HOST_HZ == TARGET_HZ
802 return ticks;
803 #else
804 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
805 #endif
808 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
809 const struct rusage *rusage)
811 struct target_rusage *target_rusage;
813 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
814 return -TARGET_EFAULT;
815 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
816 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
817 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
818 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
819 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
820 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
821 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
822 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
823 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
824 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
825 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
826 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
827 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
828 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
829 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
830 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
831 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
832 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
833 unlock_user_struct(target_rusage, target_addr, 1);
835 return 0;
838 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
840 abi_ulong target_rlim_swap;
841 rlim_t result;
843 target_rlim_swap = tswapal(target_rlim);
844 if (target_rlim_swap == TARGET_RLIM_INFINITY)
845 return RLIM_INFINITY;
847 result = target_rlim_swap;
848 if (target_rlim_swap != (rlim_t)result)
849 return RLIM_INFINITY;
851 return result;
854 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
856 abi_ulong target_rlim_swap;
857 abi_ulong result;
859 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
860 target_rlim_swap = TARGET_RLIM_INFINITY;
861 else
862 target_rlim_swap = rlim;
863 result = tswapal(target_rlim_swap);
865 return result;
868 static inline int target_to_host_resource(int code)
870 switch (code) {
871 case TARGET_RLIMIT_AS:
872 return RLIMIT_AS;
873 case TARGET_RLIMIT_CORE:
874 return RLIMIT_CORE;
875 case TARGET_RLIMIT_CPU:
876 return RLIMIT_CPU;
877 case TARGET_RLIMIT_DATA:
878 return RLIMIT_DATA;
879 case TARGET_RLIMIT_FSIZE:
880 return RLIMIT_FSIZE;
881 case TARGET_RLIMIT_LOCKS:
882 return RLIMIT_LOCKS;
883 case TARGET_RLIMIT_MEMLOCK:
884 return RLIMIT_MEMLOCK;
885 case TARGET_RLIMIT_MSGQUEUE:
886 return RLIMIT_MSGQUEUE;
887 case TARGET_RLIMIT_NICE:
888 return RLIMIT_NICE;
889 case TARGET_RLIMIT_NOFILE:
890 return RLIMIT_NOFILE;
891 case TARGET_RLIMIT_NPROC:
892 return RLIMIT_NPROC;
893 case TARGET_RLIMIT_RSS:
894 return RLIMIT_RSS;
895 case TARGET_RLIMIT_RTPRIO:
896 return RLIMIT_RTPRIO;
897 case TARGET_RLIMIT_SIGPENDING:
898 return RLIMIT_SIGPENDING;
899 case TARGET_RLIMIT_STACK:
900 return RLIMIT_STACK;
901 default:
902 return code;
906 static inline abi_long copy_from_user_timeval(struct timeval *tv,
907 abi_ulong target_tv_addr)
909 struct target_timeval *target_tv;
911 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
912 return -TARGET_EFAULT;
914 __get_user(tv->tv_sec, &target_tv->tv_sec);
915 __get_user(tv->tv_usec, &target_tv->tv_usec);
917 unlock_user_struct(target_tv, target_tv_addr, 0);
919 return 0;
922 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
923 const struct timeval *tv)
925 struct target_timeval *target_tv;
927 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
928 return -TARGET_EFAULT;
930 __put_user(tv->tv_sec, &target_tv->tv_sec);
931 __put_user(tv->tv_usec, &target_tv->tv_usec);
933 unlock_user_struct(target_tv, target_tv_addr, 1);
935 return 0;
938 static inline abi_long copy_from_user_timezone(struct timezone *tz,
939 abi_ulong target_tz_addr)
941 struct target_timezone *target_tz;
943 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
944 return -TARGET_EFAULT;
947 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
948 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
950 unlock_user_struct(target_tz, target_tz_addr, 0);
952 return 0;
955 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
956 #include <mqueue.h>
958 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
959 abi_ulong target_mq_attr_addr)
961 struct target_mq_attr *target_mq_attr;
963 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
964 target_mq_attr_addr, 1))
965 return -TARGET_EFAULT;
967 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
968 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
969 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
970 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
972 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
974 return 0;
977 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
978 const struct mq_attr *attr)
980 struct target_mq_attr *target_mq_attr;
982 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
983 target_mq_attr_addr, 0))
984 return -TARGET_EFAULT;
986 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
987 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
988 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
989 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
991 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
993 return 0;
995 #endif
997 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
998 /* do_select() must return target values and target errnos. */
999 static abi_long do_select(int n,
1000 abi_ulong rfd_addr, abi_ulong wfd_addr,
1001 abi_ulong efd_addr, abi_ulong target_tv_addr)
1003 fd_set rfds, wfds, efds;
1004 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1005 struct timeval tv, *tv_ptr;
1006 abi_long ret;
1008 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1009 if (ret) {
1010 return ret;
1012 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1013 if (ret) {
1014 return ret;
1016 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1017 if (ret) {
1018 return ret;
1021 if (target_tv_addr) {
1022 if (copy_from_user_timeval(&tv, target_tv_addr))
1023 return -TARGET_EFAULT;
1024 tv_ptr = &tv;
1025 } else {
1026 tv_ptr = NULL;
1029 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1031 if (!is_error(ret)) {
1032 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1033 return -TARGET_EFAULT;
1034 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1035 return -TARGET_EFAULT;
1036 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1037 return -TARGET_EFAULT;
1039 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1040 return -TARGET_EFAULT;
1043 return ret;
1045 #endif
1047 static abi_long do_pipe2(int host_pipe[], int flags)
1049 #ifdef CONFIG_PIPE2
1050 return pipe2(host_pipe, flags);
1051 #else
1052 return -ENOSYS;
1053 #endif
1056 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1057 int flags, int is_pipe2)
1059 int host_pipe[2];
1060 abi_long ret;
1061 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1063 if (is_error(ret))
1064 return get_errno(ret);
1066 /* Several targets have special calling conventions for the original
1067 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1068 if (!is_pipe2) {
1069 #if defined(TARGET_ALPHA)
1070 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1071 return host_pipe[0];
1072 #elif defined(TARGET_MIPS)
1073 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1074 return host_pipe[0];
1075 #elif defined(TARGET_SH4)
1076 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1077 return host_pipe[0];
1078 #elif defined(TARGET_SPARC)
1079 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1080 return host_pipe[0];
1081 #endif
1084 if (put_user_s32(host_pipe[0], pipedes)
1085 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1086 return -TARGET_EFAULT;
1087 return get_errno(ret);
1090 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1091 abi_ulong target_addr,
1092 socklen_t len)
1094 struct target_ip_mreqn *target_smreqn;
1096 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1097 if (!target_smreqn)
1098 return -TARGET_EFAULT;
1099 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1100 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1101 if (len == sizeof(struct target_ip_mreqn))
1102 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1103 unlock_user(target_smreqn, target_addr, 0);
1105 return 0;
1108 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1109 abi_ulong target_addr,
1110 socklen_t len)
1112 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1113 sa_family_t sa_family;
1114 struct target_sockaddr *target_saddr;
1116 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1117 if (!target_saddr)
1118 return -TARGET_EFAULT;
1120 sa_family = tswap16(target_saddr->sa_family);
1122 /* Oops. The caller might send a incomplete sun_path; sun_path
1123 * must be terminated by \0 (see the manual page), but
1124 * unfortunately it is quite common to specify sockaddr_un
1125 * length as "strlen(x->sun_path)" while it should be
1126 * "strlen(...) + 1". We'll fix that here if needed.
1127 * Linux kernel has a similar feature.
1130 if (sa_family == AF_UNIX) {
1131 if (len < unix_maxlen && len > 0) {
1132 char *cp = (char*)target_saddr;
1134 if ( cp[len-1] && !cp[len] )
1135 len++;
1137 if (len > unix_maxlen)
1138 len = unix_maxlen;
1141 memcpy(addr, target_saddr, len);
1142 addr->sa_family = sa_family;
1143 if (sa_family == AF_PACKET) {
1144 struct target_sockaddr_ll *lladdr;
1146 lladdr = (struct target_sockaddr_ll *)addr;
1147 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1148 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1150 unlock_user(target_saddr, target_addr, 0);
1152 return 0;
1155 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1156 struct sockaddr *addr,
1157 socklen_t len)
1159 struct target_sockaddr *target_saddr;
1161 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1162 if (!target_saddr)
1163 return -TARGET_EFAULT;
1164 memcpy(target_saddr, addr, len);
1165 target_saddr->sa_family = tswap16(addr->sa_family);
1166 unlock_user(target_saddr, target_addr, len);
1168 return 0;
1171 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1172 struct target_msghdr *target_msgh)
1174 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1175 abi_long msg_controllen;
1176 abi_ulong target_cmsg_addr;
1177 struct target_cmsghdr *target_cmsg;
1178 socklen_t space = 0;
1180 msg_controllen = tswapal(target_msgh->msg_controllen);
1181 if (msg_controllen < sizeof (struct target_cmsghdr))
1182 goto the_end;
1183 target_cmsg_addr = tswapal(target_msgh->msg_control);
1184 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1185 if (!target_cmsg)
1186 return -TARGET_EFAULT;
1188 while (cmsg && target_cmsg) {
1189 void *data = CMSG_DATA(cmsg);
1190 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1192 int len = tswapal(target_cmsg->cmsg_len)
1193 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1195 space += CMSG_SPACE(len);
1196 if (space > msgh->msg_controllen) {
1197 space -= CMSG_SPACE(len);
1198 gemu_log("Host cmsg overflow\n");
1199 break;
1202 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1203 cmsg->cmsg_level = SOL_SOCKET;
1204 } else {
1205 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1207 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1208 cmsg->cmsg_len = CMSG_LEN(len);
1210 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1211 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1212 memcpy(data, target_data, len);
1213 } else {
1214 int *fd = (int *)data;
1215 int *target_fd = (int *)target_data;
1216 int i, numfds = len / sizeof(int);
1218 for (i = 0; i < numfds; i++)
1219 fd[i] = tswap32(target_fd[i]);
1222 cmsg = CMSG_NXTHDR(msgh, cmsg);
1223 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1225 unlock_user(target_cmsg, target_cmsg_addr, 0);
1226 the_end:
1227 msgh->msg_controllen = space;
1228 return 0;
1231 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1232 struct msghdr *msgh)
1234 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1235 abi_long msg_controllen;
1236 abi_ulong target_cmsg_addr;
1237 struct target_cmsghdr *target_cmsg;
1238 socklen_t space = 0;
1240 msg_controllen = tswapal(target_msgh->msg_controllen);
1241 if (msg_controllen < sizeof (struct target_cmsghdr))
1242 goto the_end;
1243 target_cmsg_addr = tswapal(target_msgh->msg_control);
1244 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1245 if (!target_cmsg)
1246 return -TARGET_EFAULT;
1248 while (cmsg && target_cmsg) {
1249 void *data = CMSG_DATA(cmsg);
1250 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1252 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1254 space += TARGET_CMSG_SPACE(len);
1255 if (space > msg_controllen) {
1256 space -= TARGET_CMSG_SPACE(len);
1257 gemu_log("Target cmsg overflow\n");
1258 break;
1261 if (cmsg->cmsg_level == SOL_SOCKET) {
1262 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1263 } else {
1264 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1266 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1267 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1269 switch (cmsg->cmsg_level) {
1270 case SOL_SOCKET:
1271 switch (cmsg->cmsg_type) {
1272 case SCM_RIGHTS:
1274 int *fd = (int *)data;
1275 int *target_fd = (int *)target_data;
1276 int i, numfds = len / sizeof(int);
1278 for (i = 0; i < numfds; i++)
1279 target_fd[i] = tswap32(fd[i]);
1280 break;
1282 case SO_TIMESTAMP:
1284 struct timeval *tv = (struct timeval *)data;
1285 struct target_timeval *target_tv =
1286 (struct target_timeval *)target_data;
1288 if (len != sizeof(struct timeval))
1289 goto unimplemented;
1291 /* copy struct timeval to target */
1292 target_tv->tv_sec = tswapal(tv->tv_sec);
1293 target_tv->tv_usec = tswapal(tv->tv_usec);
1294 break;
1296 case SCM_CREDENTIALS:
1298 struct ucred *cred = (struct ucred *)data;
1299 struct target_ucred *target_cred =
1300 (struct target_ucred *)target_data;
1302 __put_user(cred->pid, &target_cred->pid);
1303 __put_user(cred->uid, &target_cred->uid);
1304 __put_user(cred->gid, &target_cred->gid);
1305 break;
1307 default:
1308 goto unimplemented;
1310 break;
1312 default:
1313 unimplemented:
1314 gemu_log("Unsupported ancillary data: %d/%d\n",
1315 cmsg->cmsg_level, cmsg->cmsg_type);
1316 memcpy(target_data, data, len);
1319 cmsg = CMSG_NXTHDR(msgh, cmsg);
1320 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1322 unlock_user(target_cmsg, target_cmsg_addr, space);
1323 the_end:
1324 target_msgh->msg_controllen = tswapal(space);
1325 return 0;
1328 /* do_setsockopt() Must return target values and target errnos. */
1329 static abi_long do_setsockopt(int sockfd, int level, int optname,
1330 abi_ulong optval_addr, socklen_t optlen)
1332 abi_long ret;
1333 int val;
1334 struct ip_mreqn *ip_mreq;
1335 struct ip_mreq_source *ip_mreq_source;
1337 switch(level) {
1338 case SOL_TCP:
1339 /* TCP options all take an 'int' value. */
1340 if (optlen < sizeof(uint32_t))
1341 return -TARGET_EINVAL;
1343 if (get_user_u32(val, optval_addr))
1344 return -TARGET_EFAULT;
1345 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1346 break;
1347 case SOL_IP:
1348 switch(optname) {
1349 case IP_TOS:
1350 case IP_TTL:
1351 case IP_HDRINCL:
1352 case IP_ROUTER_ALERT:
1353 case IP_RECVOPTS:
1354 case IP_RETOPTS:
1355 case IP_PKTINFO:
1356 case IP_MTU_DISCOVER:
1357 case IP_RECVERR:
1358 case IP_RECVTOS:
1359 #ifdef IP_FREEBIND
1360 case IP_FREEBIND:
1361 #endif
1362 case IP_MULTICAST_TTL:
1363 case IP_MULTICAST_LOOP:
1364 val = 0;
1365 if (optlen >= sizeof(uint32_t)) {
1366 if (get_user_u32(val, optval_addr))
1367 return -TARGET_EFAULT;
1368 } else if (optlen >= 1) {
1369 if (get_user_u8(val, optval_addr))
1370 return -TARGET_EFAULT;
1372 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1373 break;
1374 case IP_ADD_MEMBERSHIP:
1375 case IP_DROP_MEMBERSHIP:
1376 if (optlen < sizeof (struct target_ip_mreq) ||
1377 optlen > sizeof (struct target_ip_mreqn))
1378 return -TARGET_EINVAL;
1380 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1381 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1382 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1383 break;
1385 case IP_BLOCK_SOURCE:
1386 case IP_UNBLOCK_SOURCE:
1387 case IP_ADD_SOURCE_MEMBERSHIP:
1388 case IP_DROP_SOURCE_MEMBERSHIP:
1389 if (optlen != sizeof (struct target_ip_mreq_source))
1390 return -TARGET_EINVAL;
1392 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1393 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1394 unlock_user (ip_mreq_source, optval_addr, 0);
1395 break;
1397 default:
1398 goto unimplemented;
1400 break;
1401 case SOL_IPV6:
1402 switch (optname) {
1403 case IPV6_MTU_DISCOVER:
1404 case IPV6_MTU:
1405 case IPV6_V6ONLY:
1406 case IPV6_RECVPKTINFO:
1407 val = 0;
1408 if (optlen < sizeof(uint32_t)) {
1409 return -TARGET_EINVAL;
1411 if (get_user_u32(val, optval_addr)) {
1412 return -TARGET_EFAULT;
1414 ret = get_errno(setsockopt(sockfd, level, optname,
1415 &val, sizeof(val)));
1416 break;
1417 default:
1418 goto unimplemented;
1420 break;
1421 case SOL_RAW:
1422 switch (optname) {
1423 case ICMP_FILTER:
1424 /* struct icmp_filter takes an u32 value */
1425 if (optlen < sizeof(uint32_t)) {
1426 return -TARGET_EINVAL;
1429 if (get_user_u32(val, optval_addr)) {
1430 return -TARGET_EFAULT;
1432 ret = get_errno(setsockopt(sockfd, level, optname,
1433 &val, sizeof(val)));
1434 break;
1436 default:
1437 goto unimplemented;
1439 break;
1440 case TARGET_SOL_SOCKET:
1441 switch (optname) {
1442 case TARGET_SO_RCVTIMEO:
1444 struct timeval tv;
1446 optname = SO_RCVTIMEO;
1448 set_timeout:
1449 if (optlen != sizeof(struct target_timeval)) {
1450 return -TARGET_EINVAL;
1453 if (copy_from_user_timeval(&tv, optval_addr)) {
1454 return -TARGET_EFAULT;
1457 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1458 &tv, sizeof(tv)));
1459 return ret;
1461 case TARGET_SO_SNDTIMEO:
1462 optname = SO_SNDTIMEO;
1463 goto set_timeout;
1464 case TARGET_SO_ATTACH_FILTER:
1466 struct target_sock_fprog *tfprog;
1467 struct target_sock_filter *tfilter;
1468 struct sock_fprog fprog;
1469 struct sock_filter *filter;
1470 int i;
1472 if (optlen != sizeof(*tfprog)) {
1473 return -TARGET_EINVAL;
1475 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1476 return -TARGET_EFAULT;
1478 if (!lock_user_struct(VERIFY_READ, tfilter,
1479 tswapal(tfprog->filter), 0)) {
1480 unlock_user_struct(tfprog, optval_addr, 1);
1481 return -TARGET_EFAULT;
1484 fprog.len = tswap16(tfprog->len);
1485 filter = malloc(fprog.len * sizeof(*filter));
1486 if (filter == NULL) {
1487 unlock_user_struct(tfilter, tfprog->filter, 1);
1488 unlock_user_struct(tfprog, optval_addr, 1);
1489 return -TARGET_ENOMEM;
1491 for (i = 0; i < fprog.len; i++) {
1492 filter[i].code = tswap16(tfilter[i].code);
1493 filter[i].jt = tfilter[i].jt;
1494 filter[i].jf = tfilter[i].jf;
1495 filter[i].k = tswap32(tfilter[i].k);
1497 fprog.filter = filter;
1499 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1500 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1501 free(filter);
1503 unlock_user_struct(tfilter, tfprog->filter, 1);
1504 unlock_user_struct(tfprog, optval_addr, 1);
1505 return ret;
1507 case TARGET_SO_BINDTODEVICE:
1509 char *dev_ifname, *addr_ifname;
1511 if (optlen > IFNAMSIZ - 1) {
1512 optlen = IFNAMSIZ - 1;
1514 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1515 if (!dev_ifname) {
1516 return -TARGET_EFAULT;
1518 optname = SO_BINDTODEVICE;
1519 addr_ifname = alloca(IFNAMSIZ);
1520 memcpy(addr_ifname, dev_ifname, optlen);
1521 addr_ifname[optlen] = 0;
1522 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
1523 unlock_user (dev_ifname, optval_addr, 0);
1524 return ret;
1526 /* Options with 'int' argument. */
1527 case TARGET_SO_DEBUG:
1528 optname = SO_DEBUG;
1529 break;
1530 case TARGET_SO_REUSEADDR:
1531 optname = SO_REUSEADDR;
1532 break;
1533 case TARGET_SO_TYPE:
1534 optname = SO_TYPE;
1535 break;
1536 case TARGET_SO_ERROR:
1537 optname = SO_ERROR;
1538 break;
1539 case TARGET_SO_DONTROUTE:
1540 optname = SO_DONTROUTE;
1541 break;
1542 case TARGET_SO_BROADCAST:
1543 optname = SO_BROADCAST;
1544 break;
1545 case TARGET_SO_SNDBUF:
1546 optname = SO_SNDBUF;
1547 break;
1548 case TARGET_SO_SNDBUFFORCE:
1549 optname = SO_SNDBUFFORCE;
1550 break;
1551 case TARGET_SO_RCVBUF:
1552 optname = SO_RCVBUF;
1553 break;
1554 case TARGET_SO_RCVBUFFORCE:
1555 optname = SO_RCVBUFFORCE;
1556 break;
1557 case TARGET_SO_KEEPALIVE:
1558 optname = SO_KEEPALIVE;
1559 break;
1560 case TARGET_SO_OOBINLINE:
1561 optname = SO_OOBINLINE;
1562 break;
1563 case TARGET_SO_NO_CHECK:
1564 optname = SO_NO_CHECK;
1565 break;
1566 case TARGET_SO_PRIORITY:
1567 optname = SO_PRIORITY;
1568 break;
1569 #ifdef SO_BSDCOMPAT
1570 case TARGET_SO_BSDCOMPAT:
1571 optname = SO_BSDCOMPAT;
1572 break;
1573 #endif
1574 case TARGET_SO_PASSCRED:
1575 optname = SO_PASSCRED;
1576 break;
1577 case TARGET_SO_PASSSEC:
1578 optname = SO_PASSSEC;
1579 break;
1580 case TARGET_SO_TIMESTAMP:
1581 optname = SO_TIMESTAMP;
1582 break;
1583 case TARGET_SO_RCVLOWAT:
1584 optname = SO_RCVLOWAT;
1585 break;
1586 break;
1587 default:
1588 goto unimplemented;
1590 if (optlen < sizeof(uint32_t))
1591 return -TARGET_EINVAL;
1593 if (get_user_u32(val, optval_addr))
1594 return -TARGET_EFAULT;
1595 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1596 break;
1597 default:
1598 unimplemented:
1599 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1600 ret = -TARGET_ENOPROTOOPT;
1602 return ret;
1605 /* do_getsockopt() Must return target values and target errnos. */
1606 static abi_long do_getsockopt(int sockfd, int level, int optname,
1607 abi_ulong optval_addr, abi_ulong optlen)
1609 abi_long ret;
1610 int len, val;
1611 socklen_t lv;
1613 switch(level) {
1614 case TARGET_SOL_SOCKET:
1615 level = SOL_SOCKET;
1616 switch (optname) {
1617 /* These don't just return a single integer */
1618 case TARGET_SO_LINGER:
1619 case TARGET_SO_RCVTIMEO:
1620 case TARGET_SO_SNDTIMEO:
1621 case TARGET_SO_PEERNAME:
1622 goto unimplemented;
1623 case TARGET_SO_PEERCRED: {
1624 struct ucred cr;
1625 socklen_t crlen;
1626 struct target_ucred *tcr;
1628 if (get_user_u32(len, optlen)) {
1629 return -TARGET_EFAULT;
1631 if (len < 0) {
1632 return -TARGET_EINVAL;
1635 crlen = sizeof(cr);
1636 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1637 &cr, &crlen));
1638 if (ret < 0) {
1639 return ret;
1641 if (len > crlen) {
1642 len = crlen;
1644 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1645 return -TARGET_EFAULT;
1647 __put_user(cr.pid, &tcr->pid);
1648 __put_user(cr.uid, &tcr->uid);
1649 __put_user(cr.gid, &tcr->gid);
1650 unlock_user_struct(tcr, optval_addr, 1);
1651 if (put_user_u32(len, optlen)) {
1652 return -TARGET_EFAULT;
1654 break;
1656 /* Options with 'int' argument. */
1657 case TARGET_SO_DEBUG:
1658 optname = SO_DEBUG;
1659 goto int_case;
1660 case TARGET_SO_REUSEADDR:
1661 optname = SO_REUSEADDR;
1662 goto int_case;
1663 case TARGET_SO_TYPE:
1664 optname = SO_TYPE;
1665 goto int_case;
1666 case TARGET_SO_ERROR:
1667 optname = SO_ERROR;
1668 goto int_case;
1669 case TARGET_SO_DONTROUTE:
1670 optname = SO_DONTROUTE;
1671 goto int_case;
1672 case TARGET_SO_BROADCAST:
1673 optname = SO_BROADCAST;
1674 goto int_case;
1675 case TARGET_SO_SNDBUF:
1676 optname = SO_SNDBUF;
1677 goto int_case;
1678 case TARGET_SO_RCVBUF:
1679 optname = SO_RCVBUF;
1680 goto int_case;
1681 case TARGET_SO_KEEPALIVE:
1682 optname = SO_KEEPALIVE;
1683 goto int_case;
1684 case TARGET_SO_OOBINLINE:
1685 optname = SO_OOBINLINE;
1686 goto int_case;
1687 case TARGET_SO_NO_CHECK:
1688 optname = SO_NO_CHECK;
1689 goto int_case;
1690 case TARGET_SO_PRIORITY:
1691 optname = SO_PRIORITY;
1692 goto int_case;
1693 #ifdef SO_BSDCOMPAT
1694 case TARGET_SO_BSDCOMPAT:
1695 optname = SO_BSDCOMPAT;
1696 goto int_case;
1697 #endif
1698 case TARGET_SO_PASSCRED:
1699 optname = SO_PASSCRED;
1700 goto int_case;
1701 case TARGET_SO_TIMESTAMP:
1702 optname = SO_TIMESTAMP;
1703 goto int_case;
1704 case TARGET_SO_RCVLOWAT:
1705 optname = SO_RCVLOWAT;
1706 goto int_case;
1707 case TARGET_SO_ACCEPTCONN:
1708 optname = SO_ACCEPTCONN;
1709 goto int_case;
1710 default:
1711 goto int_case;
1713 break;
1714 case SOL_TCP:
1715 /* TCP options all take an 'int' value. */
1716 int_case:
1717 if (get_user_u32(len, optlen))
1718 return -TARGET_EFAULT;
1719 if (len < 0)
1720 return -TARGET_EINVAL;
1721 lv = sizeof(lv);
1722 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1723 if (ret < 0)
1724 return ret;
1725 if (optname == SO_TYPE) {
1726 val = host_to_target_sock_type(val);
1728 if (len > lv)
1729 len = lv;
1730 if (len == 4) {
1731 if (put_user_u32(val, optval_addr))
1732 return -TARGET_EFAULT;
1733 } else {
1734 if (put_user_u8(val, optval_addr))
1735 return -TARGET_EFAULT;
1737 if (put_user_u32(len, optlen))
1738 return -TARGET_EFAULT;
1739 break;
1740 case SOL_IP:
1741 switch(optname) {
1742 case IP_TOS:
1743 case IP_TTL:
1744 case IP_HDRINCL:
1745 case IP_ROUTER_ALERT:
1746 case IP_RECVOPTS:
1747 case IP_RETOPTS:
1748 case IP_PKTINFO:
1749 case IP_MTU_DISCOVER:
1750 case IP_RECVERR:
1751 case IP_RECVTOS:
1752 #ifdef IP_FREEBIND
1753 case IP_FREEBIND:
1754 #endif
1755 case IP_MULTICAST_TTL:
1756 case IP_MULTICAST_LOOP:
1757 if (get_user_u32(len, optlen))
1758 return -TARGET_EFAULT;
1759 if (len < 0)
1760 return -TARGET_EINVAL;
1761 lv = sizeof(lv);
1762 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1763 if (ret < 0)
1764 return ret;
1765 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1766 len = 1;
1767 if (put_user_u32(len, optlen)
1768 || put_user_u8(val, optval_addr))
1769 return -TARGET_EFAULT;
1770 } else {
1771 if (len > sizeof(int))
1772 len = sizeof(int);
1773 if (put_user_u32(len, optlen)
1774 || put_user_u32(val, optval_addr))
1775 return -TARGET_EFAULT;
1777 break;
1778 default:
1779 ret = -TARGET_ENOPROTOOPT;
1780 break;
1782 break;
1783 default:
1784 unimplemented:
1785 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1786 level, optname);
1787 ret = -TARGET_EOPNOTSUPP;
1788 break;
1790 return ret;
1793 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1794 int count, int copy)
1796 struct target_iovec *target_vec;
1797 struct iovec *vec;
1798 abi_ulong total_len, max_len;
1799 int i;
1800 int err = 0;
1802 if (count == 0) {
1803 errno = 0;
1804 return NULL;
1806 if (count < 0 || count > IOV_MAX) {
1807 errno = EINVAL;
1808 return NULL;
1811 vec = calloc(count, sizeof(struct iovec));
1812 if (vec == NULL) {
1813 errno = ENOMEM;
1814 return NULL;
1817 target_vec = lock_user(VERIFY_READ, target_addr,
1818 count * sizeof(struct target_iovec), 1);
1819 if (target_vec == NULL) {
1820 err = EFAULT;
1821 goto fail2;
1824 /* ??? If host page size > target page size, this will result in a
1825 value larger than what we can actually support. */
1826 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1827 total_len = 0;
1829 for (i = 0; i < count; i++) {
1830 abi_ulong base = tswapal(target_vec[i].iov_base);
1831 abi_long len = tswapal(target_vec[i].iov_len);
1833 if (len < 0) {
1834 err = EINVAL;
1835 goto fail;
1836 } else if (len == 0) {
1837 /* Zero length pointer is ignored. */
1838 vec[i].iov_base = 0;
1839 } else {
1840 vec[i].iov_base = lock_user(type, base, len, copy);
1841 if (!vec[i].iov_base) {
1842 err = EFAULT;
1843 goto fail;
1845 if (len > max_len - total_len) {
1846 len = max_len - total_len;
1849 vec[i].iov_len = len;
1850 total_len += len;
1853 unlock_user(target_vec, target_addr, 0);
1854 return vec;
1856 fail:
1857 unlock_user(target_vec, target_addr, 0);
1858 fail2:
1859 free(vec);
1860 errno = err;
1861 return NULL;
1864 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1865 int count, int copy)
1867 struct target_iovec *target_vec;
1868 int i;
1870 target_vec = lock_user(VERIFY_READ, target_addr,
1871 count * sizeof(struct target_iovec), 1);
1872 if (target_vec) {
1873 for (i = 0; i < count; i++) {
1874 abi_ulong base = tswapal(target_vec[i].iov_base);
1875 abi_long len = tswapal(target_vec[i].iov_base);
1876 if (len < 0) {
1877 break;
1879 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1881 unlock_user(target_vec, target_addr, 0);
1884 free(vec);
1887 static inline int target_to_host_sock_type(int *type)
1889 int host_type = 0;
1890 int target_type = *type;
1892 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1893 case TARGET_SOCK_DGRAM:
1894 host_type = SOCK_DGRAM;
1895 break;
1896 case TARGET_SOCK_STREAM:
1897 host_type = SOCK_STREAM;
1898 break;
1899 default:
1900 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1901 break;
1903 if (target_type & TARGET_SOCK_CLOEXEC) {
1904 #if defined(SOCK_CLOEXEC)
1905 host_type |= SOCK_CLOEXEC;
1906 #else
1907 return -TARGET_EINVAL;
1908 #endif
1910 if (target_type & TARGET_SOCK_NONBLOCK) {
1911 #if defined(SOCK_NONBLOCK)
1912 host_type |= SOCK_NONBLOCK;
1913 #elif !defined(O_NONBLOCK)
1914 return -TARGET_EINVAL;
1915 #endif
1917 *type = host_type;
1918 return 0;
1921 /* Try to emulate socket type flags after socket creation. */
1922 static int sock_flags_fixup(int fd, int target_type)
1924 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1925 if (target_type & TARGET_SOCK_NONBLOCK) {
1926 int flags = fcntl(fd, F_GETFL);
1927 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1928 close(fd);
1929 return -TARGET_EINVAL;
1932 #endif
1933 return fd;
1936 /* do_socket() Must return target values and target errnos. */
1937 static abi_long do_socket(int domain, int type, int protocol)
1939 int target_type = type;
1940 int ret;
1942 ret = target_to_host_sock_type(&type);
1943 if (ret) {
1944 return ret;
1947 if (domain == PF_NETLINK)
1948 return -TARGET_EAFNOSUPPORT;
1949 ret = get_errno(socket(domain, type, protocol));
1950 if (ret >= 0) {
1951 ret = sock_flags_fixup(ret, target_type);
1953 return ret;
1956 /* do_bind() Must return target values and target errnos. */
1957 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1958 socklen_t addrlen)
1960 void *addr;
1961 abi_long ret;
1963 if ((int)addrlen < 0) {
1964 return -TARGET_EINVAL;
1967 addr = alloca(addrlen+1);
1969 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1970 if (ret)
1971 return ret;
1973 return get_errno(bind(sockfd, addr, addrlen));
1976 /* do_connect() Must return target values and target errnos. */
1977 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1978 socklen_t addrlen)
1980 void *addr;
1981 abi_long ret;
1983 if ((int)addrlen < 0) {
1984 return -TARGET_EINVAL;
1987 addr = alloca(addrlen+1);
1989 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1990 if (ret)
1991 return ret;
1993 return get_errno(connect(sockfd, addr, addrlen));
1996 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
1997 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
1998 int flags, int send)
2000 abi_long ret, len;
2001 struct msghdr msg;
2002 int count;
2003 struct iovec *vec;
2004 abi_ulong target_vec;
2006 if (msgp->msg_name) {
2007 msg.msg_namelen = tswap32(msgp->msg_namelen);
2008 msg.msg_name = alloca(msg.msg_namelen+1);
2009 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
2010 msg.msg_namelen);
2011 if (ret) {
2012 goto out2;
2014 } else {
2015 msg.msg_name = NULL;
2016 msg.msg_namelen = 0;
2018 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2019 msg.msg_control = alloca(msg.msg_controllen);
2020 msg.msg_flags = tswap32(msgp->msg_flags);
2022 count = tswapal(msgp->msg_iovlen);
2023 target_vec = tswapal(msgp->msg_iov);
2024 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2025 target_vec, count, send);
2026 if (vec == NULL) {
2027 ret = -host_to_target_errno(errno);
2028 goto out2;
2030 msg.msg_iovlen = count;
2031 msg.msg_iov = vec;
2033 if (send) {
2034 ret = target_to_host_cmsg(&msg, msgp);
2035 if (ret == 0)
2036 ret = get_errno(sendmsg(fd, &msg, flags));
2037 } else {
2038 ret = get_errno(recvmsg(fd, &msg, flags));
2039 if (!is_error(ret)) {
2040 len = ret;
2041 ret = host_to_target_cmsg(msgp, &msg);
2042 if (!is_error(ret)) {
2043 msgp->msg_namelen = tswap32(msg.msg_namelen);
2044 if (msg.msg_name != NULL) {
2045 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2046 msg.msg_name, msg.msg_namelen);
2047 if (ret) {
2048 goto out;
2052 ret = len;
2057 out:
2058 unlock_iovec(vec, target_vec, count, !send);
2059 out2:
2060 return ret;
2063 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2064 int flags, int send)
2066 abi_long ret;
2067 struct target_msghdr *msgp;
2069 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2070 msgp,
2071 target_msg,
2072 send ? 1 : 0)) {
2073 return -TARGET_EFAULT;
2075 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2076 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2077 return ret;
2080 #ifdef TARGET_NR_sendmmsg
2081 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2082 * so it might not have this *mmsg-specific flag either.
2084 #ifndef MSG_WAITFORONE
2085 #define MSG_WAITFORONE 0x10000
2086 #endif
2088 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2089 unsigned int vlen, unsigned int flags,
2090 int send)
2092 struct target_mmsghdr *mmsgp;
2093 abi_long ret = 0;
2094 int i;
2096 if (vlen > UIO_MAXIOV) {
2097 vlen = UIO_MAXIOV;
2100 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2101 if (!mmsgp) {
2102 return -TARGET_EFAULT;
2105 for (i = 0; i < vlen; i++) {
2106 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2107 if (is_error(ret)) {
2108 break;
2110 mmsgp[i].msg_len = tswap32(ret);
2111 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2112 if (flags & MSG_WAITFORONE) {
2113 flags |= MSG_DONTWAIT;
2117 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2119 /* Return number of datagrams sent if we sent any at all;
2120 * otherwise return the error.
2122 if (i) {
2123 return i;
2125 return ret;
2127 #endif
2129 /* If we don't have a system accept4() then just call accept.
2130 * The callsites to do_accept4() will ensure that they don't
2131 * pass a non-zero flags argument in this config.
2133 #ifndef CONFIG_ACCEPT4
2134 static inline int accept4(int sockfd, struct sockaddr *addr,
2135 socklen_t *addrlen, int flags)
2137 assert(flags == 0);
2138 return accept(sockfd, addr, addrlen);
2140 #endif
2142 /* do_accept4() Must return target values and target errnos. */
2143 static abi_long do_accept4(int fd, abi_ulong target_addr,
2144 abi_ulong target_addrlen_addr, int flags)
2146 socklen_t addrlen;
2147 void *addr;
2148 abi_long ret;
2149 int host_flags;
2151 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2153 if (target_addr == 0) {
2154 return get_errno(accept4(fd, NULL, NULL, host_flags));
2157 /* linux returns EINVAL if addrlen pointer is invalid */
2158 if (get_user_u32(addrlen, target_addrlen_addr))
2159 return -TARGET_EINVAL;
2161 if ((int)addrlen < 0) {
2162 return -TARGET_EINVAL;
2165 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2166 return -TARGET_EINVAL;
2168 addr = alloca(addrlen);
2170 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2171 if (!is_error(ret)) {
2172 host_to_target_sockaddr(target_addr, addr, addrlen);
2173 if (put_user_u32(addrlen, target_addrlen_addr))
2174 ret = -TARGET_EFAULT;
2176 return ret;
2179 /* do_getpeername() Must return target values and target errnos. */
2180 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2181 abi_ulong target_addrlen_addr)
2183 socklen_t addrlen;
2184 void *addr;
2185 abi_long ret;
2187 if (get_user_u32(addrlen, target_addrlen_addr))
2188 return -TARGET_EFAULT;
2190 if ((int)addrlen < 0) {
2191 return -TARGET_EINVAL;
2194 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2195 return -TARGET_EFAULT;
2197 addr = alloca(addrlen);
2199 ret = get_errno(getpeername(fd, addr, &addrlen));
2200 if (!is_error(ret)) {
2201 host_to_target_sockaddr(target_addr, addr, addrlen);
2202 if (put_user_u32(addrlen, target_addrlen_addr))
2203 ret = -TARGET_EFAULT;
2205 return ret;
2208 /* do_getsockname() Must return target values and target errnos. */
2209 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2210 abi_ulong target_addrlen_addr)
2212 socklen_t addrlen;
2213 void *addr;
2214 abi_long ret;
2216 if (get_user_u32(addrlen, target_addrlen_addr))
2217 return -TARGET_EFAULT;
2219 if ((int)addrlen < 0) {
2220 return -TARGET_EINVAL;
2223 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2224 return -TARGET_EFAULT;
2226 addr = alloca(addrlen);
2228 ret = get_errno(getsockname(fd, addr, &addrlen));
2229 if (!is_error(ret)) {
2230 host_to_target_sockaddr(target_addr, addr, addrlen);
2231 if (put_user_u32(addrlen, target_addrlen_addr))
2232 ret = -TARGET_EFAULT;
2234 return ret;
2237 /* do_socketpair() Must return target values and target errnos. */
2238 static abi_long do_socketpair(int domain, int type, int protocol,
2239 abi_ulong target_tab_addr)
2241 int tab[2];
2242 abi_long ret;
2244 target_to_host_sock_type(&type);
2246 ret = get_errno(socketpair(domain, type, protocol, tab));
2247 if (!is_error(ret)) {
2248 if (put_user_s32(tab[0], target_tab_addr)
2249 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2250 ret = -TARGET_EFAULT;
2252 return ret;
2255 /* do_sendto() Must return target values and target errnos. */
2256 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2257 abi_ulong target_addr, socklen_t addrlen)
2259 void *addr;
2260 void *host_msg;
2261 abi_long ret;
2263 if ((int)addrlen < 0) {
2264 return -TARGET_EINVAL;
2267 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2268 if (!host_msg)
2269 return -TARGET_EFAULT;
2270 if (target_addr) {
2271 addr = alloca(addrlen+1);
2272 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2273 if (ret) {
2274 unlock_user(host_msg, msg, 0);
2275 return ret;
2277 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2278 } else {
2279 ret = get_errno(send(fd, host_msg, len, flags));
2281 unlock_user(host_msg, msg, 0);
2282 return ret;
2285 /* do_recvfrom() Must return target values and target errnos. */
2286 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2287 abi_ulong target_addr,
2288 abi_ulong target_addrlen)
2290 socklen_t addrlen;
2291 void *addr;
2292 void *host_msg;
2293 abi_long ret;
2295 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2296 if (!host_msg)
2297 return -TARGET_EFAULT;
2298 if (target_addr) {
2299 if (get_user_u32(addrlen, target_addrlen)) {
2300 ret = -TARGET_EFAULT;
2301 goto fail;
2303 if ((int)addrlen < 0) {
2304 ret = -TARGET_EINVAL;
2305 goto fail;
2307 addr = alloca(addrlen);
2308 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2309 } else {
2310 addr = NULL; /* To keep compiler quiet. */
2311 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2313 if (!is_error(ret)) {
2314 if (target_addr) {
2315 host_to_target_sockaddr(target_addr, addr, addrlen);
2316 if (put_user_u32(addrlen, target_addrlen)) {
2317 ret = -TARGET_EFAULT;
2318 goto fail;
2321 unlock_user(host_msg, msg, len);
2322 } else {
2323 fail:
2324 unlock_user(host_msg, msg, 0);
2326 return ret;
2329 #ifdef TARGET_NR_socketcall
2330 /* do_socketcall() Must return target values and target errnos. */
2331 static abi_long do_socketcall(int num, abi_ulong vptr)
2333 static const unsigned ac[] = { /* number of arguments per call */
2334 [SOCKOP_socket] = 3, /* domain, type, protocol */
2335 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2336 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2337 [SOCKOP_listen] = 2, /* sockfd, backlog */
2338 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2339 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2340 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2341 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2342 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2343 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2344 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2345 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2346 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2347 [SOCKOP_shutdown] = 2, /* sockfd, how */
2348 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2349 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2350 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2351 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2353 abi_long a[6]; /* max 6 args */
2355 /* first, collect the arguments in a[] according to ac[] */
2356 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2357 unsigned i;
2358 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2359 for (i = 0; i < ac[num]; ++i) {
2360 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2361 return -TARGET_EFAULT;
2366 /* now when we have the args, actually handle the call */
2367 switch (num) {
2368 case SOCKOP_socket: /* domain, type, protocol */
2369 return do_socket(a[0], a[1], a[2]);
2370 case SOCKOP_bind: /* sockfd, addr, addrlen */
2371 return do_bind(a[0], a[1], a[2]);
2372 case SOCKOP_connect: /* sockfd, addr, addrlen */
2373 return do_connect(a[0], a[1], a[2]);
2374 case SOCKOP_listen: /* sockfd, backlog */
2375 return get_errno(listen(a[0], a[1]));
2376 case SOCKOP_accept: /* sockfd, addr, addrlen */
2377 return do_accept4(a[0], a[1], a[2], 0);
2378 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2379 return do_accept4(a[0], a[1], a[2], a[3]);
2380 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2381 return do_getsockname(a[0], a[1], a[2]);
2382 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2383 return do_getpeername(a[0], a[1], a[2]);
2384 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2385 return do_socketpair(a[0], a[1], a[2], a[3]);
2386 case SOCKOP_send: /* sockfd, msg, len, flags */
2387 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2388 case SOCKOP_recv: /* sockfd, msg, len, flags */
2389 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2390 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2391 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2392 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2393 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2394 case SOCKOP_shutdown: /* sockfd, how */
2395 return get_errno(shutdown(a[0], a[1]));
2396 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2397 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2398 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2399 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2400 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2401 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2402 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2403 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2404 default:
2405 gemu_log("Unsupported socketcall: %d\n", num);
2406 return -TARGET_ENOSYS;
2409 #endif
2411 #define N_SHM_REGIONS 32
2413 static struct shm_region {
2414 abi_ulong start;
2415 abi_ulong size;
2416 } shm_regions[N_SHM_REGIONS];
2418 struct target_semid_ds
2420 struct target_ipc_perm sem_perm;
2421 abi_ulong sem_otime;
2422 abi_ulong __unused1;
2423 abi_ulong sem_ctime;
2424 abi_ulong __unused2;
2425 abi_ulong sem_nsems;
2426 abi_ulong __unused3;
2427 abi_ulong __unused4;
2430 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2431 abi_ulong target_addr)
2433 struct target_ipc_perm *target_ip;
2434 struct target_semid_ds *target_sd;
2436 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2437 return -TARGET_EFAULT;
2438 target_ip = &(target_sd->sem_perm);
2439 host_ip->__key = tswap32(target_ip->__key);
2440 host_ip->uid = tswap32(target_ip->uid);
2441 host_ip->gid = tswap32(target_ip->gid);
2442 host_ip->cuid = tswap32(target_ip->cuid);
2443 host_ip->cgid = tswap32(target_ip->cgid);
2444 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2445 host_ip->mode = tswap32(target_ip->mode);
2446 #else
2447 host_ip->mode = tswap16(target_ip->mode);
2448 #endif
2449 #if defined(TARGET_PPC)
2450 host_ip->__seq = tswap32(target_ip->__seq);
2451 #else
2452 host_ip->__seq = tswap16(target_ip->__seq);
2453 #endif
2454 unlock_user_struct(target_sd, target_addr, 0);
2455 return 0;
2458 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2459 struct ipc_perm *host_ip)
2461 struct target_ipc_perm *target_ip;
2462 struct target_semid_ds *target_sd;
2464 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2465 return -TARGET_EFAULT;
2466 target_ip = &(target_sd->sem_perm);
2467 target_ip->__key = tswap32(host_ip->__key);
2468 target_ip->uid = tswap32(host_ip->uid);
2469 target_ip->gid = tswap32(host_ip->gid);
2470 target_ip->cuid = tswap32(host_ip->cuid);
2471 target_ip->cgid = tswap32(host_ip->cgid);
2472 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2473 target_ip->mode = tswap32(host_ip->mode);
2474 #else
2475 target_ip->mode = tswap16(host_ip->mode);
2476 #endif
2477 #if defined(TARGET_PPC)
2478 target_ip->__seq = tswap32(host_ip->__seq);
2479 #else
2480 target_ip->__seq = tswap16(host_ip->__seq);
2481 #endif
2482 unlock_user_struct(target_sd, target_addr, 1);
2483 return 0;
2486 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2487 abi_ulong target_addr)
2489 struct target_semid_ds *target_sd;
2491 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2492 return -TARGET_EFAULT;
2493 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2494 return -TARGET_EFAULT;
2495 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2496 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2497 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2498 unlock_user_struct(target_sd, target_addr, 0);
2499 return 0;
2502 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2503 struct semid_ds *host_sd)
2505 struct target_semid_ds *target_sd;
2507 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2508 return -TARGET_EFAULT;
2509 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2510 return -TARGET_EFAULT;
2511 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2512 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2513 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2514 unlock_user_struct(target_sd, target_addr, 1);
2515 return 0;
2518 struct target_seminfo {
2519 int semmap;
2520 int semmni;
2521 int semmns;
2522 int semmnu;
2523 int semmsl;
2524 int semopm;
2525 int semume;
2526 int semusz;
2527 int semvmx;
2528 int semaem;
2531 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2532 struct seminfo *host_seminfo)
2534 struct target_seminfo *target_seminfo;
2535 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2536 return -TARGET_EFAULT;
2537 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2538 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2539 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2540 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2541 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2542 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2543 __put_user(host_seminfo->semume, &target_seminfo->semume);
2544 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2545 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2546 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2547 unlock_user_struct(target_seminfo, target_addr, 1);
2548 return 0;
2551 union semun {
2552 int val;
2553 struct semid_ds *buf;
2554 unsigned short *array;
2555 struct seminfo *__buf;
2558 union target_semun {
2559 int val;
2560 abi_ulong buf;
2561 abi_ulong array;
2562 abi_ulong __buf;
2565 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2566 abi_ulong target_addr)
2568 int nsems;
2569 unsigned short *array;
2570 union semun semun;
2571 struct semid_ds semid_ds;
2572 int i, ret;
2574 semun.buf = &semid_ds;
2576 ret = semctl(semid, 0, IPC_STAT, semun);
2577 if (ret == -1)
2578 return get_errno(ret);
2580 nsems = semid_ds.sem_nsems;
2582 *host_array = malloc(nsems*sizeof(unsigned short));
2583 if (!*host_array) {
2584 return -TARGET_ENOMEM;
2586 array = lock_user(VERIFY_READ, target_addr,
2587 nsems*sizeof(unsigned short), 1);
2588 if (!array) {
2589 free(*host_array);
2590 return -TARGET_EFAULT;
2593 for(i=0; i<nsems; i++) {
2594 __get_user((*host_array)[i], &array[i]);
2596 unlock_user(array, target_addr, 0);
2598 return 0;
2601 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2602 unsigned short **host_array)
2604 int nsems;
2605 unsigned short *array;
2606 union semun semun;
2607 struct semid_ds semid_ds;
2608 int i, ret;
2610 semun.buf = &semid_ds;
2612 ret = semctl(semid, 0, IPC_STAT, semun);
2613 if (ret == -1)
2614 return get_errno(ret);
2616 nsems = semid_ds.sem_nsems;
2618 array = lock_user(VERIFY_WRITE, target_addr,
2619 nsems*sizeof(unsigned short), 0);
2620 if (!array)
2621 return -TARGET_EFAULT;
2623 for(i=0; i<nsems; i++) {
2624 __put_user((*host_array)[i], &array[i]);
2626 free(*host_array);
2627 unlock_user(array, target_addr, 1);
2629 return 0;
2632 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2633 union target_semun target_su)
2635 union semun arg;
2636 struct semid_ds dsarg;
2637 unsigned short *array = NULL;
2638 struct seminfo seminfo;
2639 abi_long ret = -TARGET_EINVAL;
2640 abi_long err;
2641 cmd &= 0xff;
2643 switch( cmd ) {
2644 case GETVAL:
2645 case SETVAL:
2646 arg.val = tswap32(target_su.val);
2647 ret = get_errno(semctl(semid, semnum, cmd, arg));
2648 target_su.val = tswap32(arg.val);
2649 break;
2650 case GETALL:
2651 case SETALL:
2652 err = target_to_host_semarray(semid, &array, target_su.array);
2653 if (err)
2654 return err;
2655 arg.array = array;
2656 ret = get_errno(semctl(semid, semnum, cmd, arg));
2657 err = host_to_target_semarray(semid, target_su.array, &array);
2658 if (err)
2659 return err;
2660 break;
2661 case IPC_STAT:
2662 case IPC_SET:
2663 case SEM_STAT:
2664 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2665 if (err)
2666 return err;
2667 arg.buf = &dsarg;
2668 ret = get_errno(semctl(semid, semnum, cmd, arg));
2669 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2670 if (err)
2671 return err;
2672 break;
2673 case IPC_INFO:
2674 case SEM_INFO:
2675 arg.__buf = &seminfo;
2676 ret = get_errno(semctl(semid, semnum, cmd, arg));
2677 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2678 if (err)
2679 return err;
2680 break;
2681 case IPC_RMID:
2682 case GETPID:
2683 case GETNCNT:
2684 case GETZCNT:
2685 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2686 break;
2689 return ret;
2692 struct target_sembuf {
2693 unsigned short sem_num;
2694 short sem_op;
2695 short sem_flg;
2698 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2699 abi_ulong target_addr,
2700 unsigned nsops)
2702 struct target_sembuf *target_sembuf;
2703 int i;
2705 target_sembuf = lock_user(VERIFY_READ, target_addr,
2706 nsops*sizeof(struct target_sembuf), 1);
2707 if (!target_sembuf)
2708 return -TARGET_EFAULT;
2710 for(i=0; i<nsops; i++) {
2711 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2712 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2713 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2716 unlock_user(target_sembuf, target_addr, 0);
2718 return 0;
2721 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2723 struct sembuf sops[nsops];
2725 if (target_to_host_sembuf(sops, ptr, nsops))
2726 return -TARGET_EFAULT;
2728 return get_errno(semop(semid, sops, nsops));
2731 struct target_msqid_ds
2733 struct target_ipc_perm msg_perm;
2734 abi_ulong msg_stime;
2735 #if TARGET_ABI_BITS == 32
2736 abi_ulong __unused1;
2737 #endif
2738 abi_ulong msg_rtime;
2739 #if TARGET_ABI_BITS == 32
2740 abi_ulong __unused2;
2741 #endif
2742 abi_ulong msg_ctime;
2743 #if TARGET_ABI_BITS == 32
2744 abi_ulong __unused3;
2745 #endif
2746 abi_ulong __msg_cbytes;
2747 abi_ulong msg_qnum;
2748 abi_ulong msg_qbytes;
2749 abi_ulong msg_lspid;
2750 abi_ulong msg_lrpid;
2751 abi_ulong __unused4;
2752 abi_ulong __unused5;
2755 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2756 abi_ulong target_addr)
2758 struct target_msqid_ds *target_md;
2760 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2761 return -TARGET_EFAULT;
2762 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2763 return -TARGET_EFAULT;
2764 host_md->msg_stime = tswapal(target_md->msg_stime);
2765 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2766 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2767 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2768 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2769 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2770 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2771 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2772 unlock_user_struct(target_md, target_addr, 0);
2773 return 0;
2776 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2777 struct msqid_ds *host_md)
2779 struct target_msqid_ds *target_md;
2781 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2782 return -TARGET_EFAULT;
2783 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2784 return -TARGET_EFAULT;
2785 target_md->msg_stime = tswapal(host_md->msg_stime);
2786 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2787 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2788 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2789 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2790 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2791 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2792 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2793 unlock_user_struct(target_md, target_addr, 1);
2794 return 0;
2797 struct target_msginfo {
2798 int msgpool;
2799 int msgmap;
2800 int msgmax;
2801 int msgmnb;
2802 int msgmni;
2803 int msgssz;
2804 int msgtql;
2805 unsigned short int msgseg;
2808 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2809 struct msginfo *host_msginfo)
2811 struct target_msginfo *target_msginfo;
2812 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2813 return -TARGET_EFAULT;
2814 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2815 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2816 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2817 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2818 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2819 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2820 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2821 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2822 unlock_user_struct(target_msginfo, target_addr, 1);
2823 return 0;
2826 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2828 struct msqid_ds dsarg;
2829 struct msginfo msginfo;
2830 abi_long ret = -TARGET_EINVAL;
2832 cmd &= 0xff;
2834 switch (cmd) {
2835 case IPC_STAT:
2836 case IPC_SET:
2837 case MSG_STAT:
2838 if (target_to_host_msqid_ds(&dsarg,ptr))
2839 return -TARGET_EFAULT;
2840 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2841 if (host_to_target_msqid_ds(ptr,&dsarg))
2842 return -TARGET_EFAULT;
2843 break;
2844 case IPC_RMID:
2845 ret = get_errno(msgctl(msgid, cmd, NULL));
2846 break;
2847 case IPC_INFO:
2848 case MSG_INFO:
2849 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2850 if (host_to_target_msginfo(ptr, &msginfo))
2851 return -TARGET_EFAULT;
2852 break;
2855 return ret;
2858 struct target_msgbuf {
2859 abi_long mtype;
2860 char mtext[1];
2863 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2864 unsigned int msgsz, int msgflg)
2866 struct target_msgbuf *target_mb;
2867 struct msgbuf *host_mb;
2868 abi_long ret = 0;
2870 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2871 return -TARGET_EFAULT;
2872 host_mb = malloc(msgsz+sizeof(long));
2873 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2874 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2875 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2876 free(host_mb);
2877 unlock_user_struct(target_mb, msgp, 0);
2879 return ret;
2882 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2883 unsigned int msgsz, abi_long msgtyp,
2884 int msgflg)
2886 struct target_msgbuf *target_mb;
2887 char *target_mtext;
2888 struct msgbuf *host_mb;
2889 abi_long ret = 0;
2891 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2892 return -TARGET_EFAULT;
2894 host_mb = g_malloc(msgsz+sizeof(long));
2895 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2897 if (ret > 0) {
2898 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2899 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2900 if (!target_mtext) {
2901 ret = -TARGET_EFAULT;
2902 goto end;
2904 memcpy(target_mb->mtext, host_mb->mtext, ret);
2905 unlock_user(target_mtext, target_mtext_addr, ret);
2908 target_mb->mtype = tswapal(host_mb->mtype);
2910 end:
2911 if (target_mb)
2912 unlock_user_struct(target_mb, msgp, 1);
2913 g_free(host_mb);
2914 return ret;
2917 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2918 abi_ulong target_addr)
2920 struct target_shmid_ds *target_sd;
2922 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2923 return -TARGET_EFAULT;
2924 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2925 return -TARGET_EFAULT;
2926 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2927 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2928 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2929 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2930 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2931 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2932 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2933 unlock_user_struct(target_sd, target_addr, 0);
2934 return 0;
2937 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2938 struct shmid_ds *host_sd)
2940 struct target_shmid_ds *target_sd;
2942 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2943 return -TARGET_EFAULT;
2944 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2945 return -TARGET_EFAULT;
2946 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2947 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2948 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2949 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2950 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2951 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2952 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2953 unlock_user_struct(target_sd, target_addr, 1);
2954 return 0;
2957 struct target_shminfo {
2958 abi_ulong shmmax;
2959 abi_ulong shmmin;
2960 abi_ulong shmmni;
2961 abi_ulong shmseg;
2962 abi_ulong shmall;
2965 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2966 struct shminfo *host_shminfo)
2968 struct target_shminfo *target_shminfo;
2969 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2970 return -TARGET_EFAULT;
2971 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2972 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2973 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2974 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2975 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2976 unlock_user_struct(target_shminfo, target_addr, 1);
2977 return 0;
2980 struct target_shm_info {
2981 int used_ids;
2982 abi_ulong shm_tot;
2983 abi_ulong shm_rss;
2984 abi_ulong shm_swp;
2985 abi_ulong swap_attempts;
2986 abi_ulong swap_successes;
2989 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2990 struct shm_info *host_shm_info)
2992 struct target_shm_info *target_shm_info;
2993 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2994 return -TARGET_EFAULT;
2995 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2996 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2997 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2998 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2999 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3000 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3001 unlock_user_struct(target_shm_info, target_addr, 1);
3002 return 0;
3005 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3007 struct shmid_ds dsarg;
3008 struct shminfo shminfo;
3009 struct shm_info shm_info;
3010 abi_long ret = -TARGET_EINVAL;
3012 cmd &= 0xff;
3014 switch(cmd) {
3015 case IPC_STAT:
3016 case IPC_SET:
3017 case SHM_STAT:
3018 if (target_to_host_shmid_ds(&dsarg, buf))
3019 return -TARGET_EFAULT;
3020 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3021 if (host_to_target_shmid_ds(buf, &dsarg))
3022 return -TARGET_EFAULT;
3023 break;
3024 case IPC_INFO:
3025 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3026 if (host_to_target_shminfo(buf, &shminfo))
3027 return -TARGET_EFAULT;
3028 break;
3029 case SHM_INFO:
3030 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3031 if (host_to_target_shm_info(buf, &shm_info))
3032 return -TARGET_EFAULT;
3033 break;
3034 case IPC_RMID:
3035 case SHM_LOCK:
3036 case SHM_UNLOCK:
3037 ret = get_errno(shmctl(shmid, cmd, NULL));
3038 break;
3041 return ret;
3044 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3046 abi_long raddr;
3047 void *host_raddr;
3048 struct shmid_ds shm_info;
3049 int i,ret;
3051 /* find out the length of the shared memory segment */
3052 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3053 if (is_error(ret)) {
3054 /* can't get length, bail out */
3055 return ret;
3058 mmap_lock();
3060 if (shmaddr)
3061 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3062 else {
3063 abi_ulong mmap_start;
3065 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3067 if (mmap_start == -1) {
3068 errno = ENOMEM;
3069 host_raddr = (void *)-1;
3070 } else
3071 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3074 if (host_raddr == (void *)-1) {
3075 mmap_unlock();
3076 return get_errno((long)host_raddr);
3078 raddr=h2g((unsigned long)host_raddr);
3080 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3081 PAGE_VALID | PAGE_READ |
3082 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3084 for (i = 0; i < N_SHM_REGIONS; i++) {
3085 if (shm_regions[i].start == 0) {
3086 shm_regions[i].start = raddr;
3087 shm_regions[i].size = shm_info.shm_segsz;
3088 break;
3092 mmap_unlock();
3093 return raddr;
3097 static inline abi_long do_shmdt(abi_ulong shmaddr)
3099 int i;
3101 for (i = 0; i < N_SHM_REGIONS; ++i) {
3102 if (shm_regions[i].start == shmaddr) {
3103 shm_regions[i].start = 0;
3104 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3105 break;
3109 return get_errno(shmdt(g2h(shmaddr)));
3112 #ifdef TARGET_NR_ipc
3113 /* ??? This only works with linear mappings. */
3114 /* do_ipc() must return target values and target errnos. */
3115 static abi_long do_ipc(unsigned int call, int first,
3116 int second, int third,
3117 abi_long ptr, abi_long fifth)
3119 int version;
3120 abi_long ret = 0;
3122 version = call >> 16;
3123 call &= 0xffff;
3125 switch (call) {
3126 case IPCOP_semop:
3127 ret = do_semop(first, ptr, second);
3128 break;
3130 case IPCOP_semget:
3131 ret = get_errno(semget(first, second, third));
3132 break;
3134 case IPCOP_semctl:
3135 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3136 break;
3138 case IPCOP_msgget:
3139 ret = get_errno(msgget(first, second));
3140 break;
3142 case IPCOP_msgsnd:
3143 ret = do_msgsnd(first, ptr, second, third);
3144 break;
3146 case IPCOP_msgctl:
3147 ret = do_msgctl(first, second, ptr);
3148 break;
3150 case IPCOP_msgrcv:
3151 switch (version) {
3152 case 0:
3154 struct target_ipc_kludge {
3155 abi_long msgp;
3156 abi_long msgtyp;
3157 } *tmp;
3159 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3160 ret = -TARGET_EFAULT;
3161 break;
3164 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3166 unlock_user_struct(tmp, ptr, 0);
3167 break;
3169 default:
3170 ret = do_msgrcv(first, ptr, second, fifth, third);
3172 break;
3174 case IPCOP_shmat:
3175 switch (version) {
3176 default:
3178 abi_ulong raddr;
3179 raddr = do_shmat(first, ptr, second);
3180 if (is_error(raddr))
3181 return get_errno(raddr);
3182 if (put_user_ual(raddr, third))
3183 return -TARGET_EFAULT;
3184 break;
3186 case 1:
3187 ret = -TARGET_EINVAL;
3188 break;
3190 break;
3191 case IPCOP_shmdt:
3192 ret = do_shmdt(ptr);
3193 break;
3195 case IPCOP_shmget:
3196 /* IPC_* flag values are the same on all linux platforms */
3197 ret = get_errno(shmget(first, second, third));
3198 break;
3200 /* IPC_* and SHM_* command values are the same on all linux platforms */
3201 case IPCOP_shmctl:
3202 ret = do_shmctl(first, second, ptr);
3203 break;
3204 default:
3205 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3206 ret = -TARGET_ENOSYS;
3207 break;
3209 return ret;
3211 #endif
3213 /* kernel structure types definitions */
3215 #define STRUCT(name, ...) STRUCT_ ## name,
3216 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3217 enum {
3218 #include "syscall_types.h"
3220 #undef STRUCT
3221 #undef STRUCT_SPECIAL
3223 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3224 #define STRUCT_SPECIAL(name)
3225 #include "syscall_types.h"
3226 #undef STRUCT
3227 #undef STRUCT_SPECIAL
3229 typedef struct IOCTLEntry IOCTLEntry;
3231 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3232 int fd, abi_long cmd, abi_long arg);
3234 struct IOCTLEntry {
3235 unsigned int target_cmd;
3236 unsigned int host_cmd;
3237 const char *name;
3238 int access;
3239 do_ioctl_fn *do_ioctl;
3240 const argtype arg_type[5];
3243 #define IOC_R 0x0001
3244 #define IOC_W 0x0002
3245 #define IOC_RW (IOC_R | IOC_W)
3247 #define MAX_STRUCT_SIZE 4096
3249 #ifdef CONFIG_FIEMAP
3250 /* So fiemap access checks don't overflow on 32 bit systems.
3251 * This is very slightly smaller than the limit imposed by
3252 * the underlying kernel.
3254 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3255 / sizeof(struct fiemap_extent))
3257 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3258 int fd, abi_long cmd, abi_long arg)
3260 /* The parameter for this ioctl is a struct fiemap followed
3261 * by an array of struct fiemap_extent whose size is set
3262 * in fiemap->fm_extent_count. The array is filled in by the
3263 * ioctl.
3265 int target_size_in, target_size_out;
3266 struct fiemap *fm;
3267 const argtype *arg_type = ie->arg_type;
3268 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3269 void *argptr, *p;
3270 abi_long ret;
3271 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3272 uint32_t outbufsz;
3273 int free_fm = 0;
3275 assert(arg_type[0] == TYPE_PTR);
3276 assert(ie->access == IOC_RW);
3277 arg_type++;
3278 target_size_in = thunk_type_size(arg_type, 0);
3279 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3280 if (!argptr) {
3281 return -TARGET_EFAULT;
3283 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3284 unlock_user(argptr, arg, 0);
3285 fm = (struct fiemap *)buf_temp;
3286 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3287 return -TARGET_EINVAL;
3290 outbufsz = sizeof (*fm) +
3291 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3293 if (outbufsz > MAX_STRUCT_SIZE) {
3294 /* We can't fit all the extents into the fixed size buffer.
3295 * Allocate one that is large enough and use it instead.
3297 fm = malloc(outbufsz);
3298 if (!fm) {
3299 return -TARGET_ENOMEM;
3301 memcpy(fm, buf_temp, sizeof(struct fiemap));
3302 free_fm = 1;
3304 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3305 if (!is_error(ret)) {
3306 target_size_out = target_size_in;
3307 /* An extent_count of 0 means we were only counting the extents
3308 * so there are no structs to copy
3310 if (fm->fm_extent_count != 0) {
3311 target_size_out += fm->fm_mapped_extents * extent_size;
3313 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3314 if (!argptr) {
3315 ret = -TARGET_EFAULT;
3316 } else {
3317 /* Convert the struct fiemap */
3318 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3319 if (fm->fm_extent_count != 0) {
3320 p = argptr + target_size_in;
3321 /* ...and then all the struct fiemap_extents */
3322 for (i = 0; i < fm->fm_mapped_extents; i++) {
3323 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3324 THUNK_TARGET);
3325 p += extent_size;
3328 unlock_user(argptr, arg, target_size_out);
3331 if (free_fm) {
3332 free(fm);
3334 return ret;
3336 #endif
3338 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3339 int fd, abi_long cmd, abi_long arg)
3341 const argtype *arg_type = ie->arg_type;
3342 int target_size;
3343 void *argptr;
3344 int ret;
3345 struct ifconf *host_ifconf;
3346 uint32_t outbufsz;
3347 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3348 int target_ifreq_size;
3349 int nb_ifreq;
3350 int free_buf = 0;
3351 int i;
3352 int target_ifc_len;
3353 abi_long target_ifc_buf;
3354 int host_ifc_len;
3355 char *host_ifc_buf;
3357 assert(arg_type[0] == TYPE_PTR);
3358 assert(ie->access == IOC_RW);
3360 arg_type++;
3361 target_size = thunk_type_size(arg_type, 0);
3363 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3364 if (!argptr)
3365 return -TARGET_EFAULT;
3366 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3367 unlock_user(argptr, arg, 0);
3369 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3370 target_ifc_len = host_ifconf->ifc_len;
3371 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3373 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3374 nb_ifreq = target_ifc_len / target_ifreq_size;
3375 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3377 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3378 if (outbufsz > MAX_STRUCT_SIZE) {
3379 /* We can't fit all the extents into the fixed size buffer.
3380 * Allocate one that is large enough and use it instead.
3382 host_ifconf = malloc(outbufsz);
3383 if (!host_ifconf) {
3384 return -TARGET_ENOMEM;
3386 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3387 free_buf = 1;
3389 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3391 host_ifconf->ifc_len = host_ifc_len;
3392 host_ifconf->ifc_buf = host_ifc_buf;
3394 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3395 if (!is_error(ret)) {
3396 /* convert host ifc_len to target ifc_len */
3398 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3399 target_ifc_len = nb_ifreq * target_ifreq_size;
3400 host_ifconf->ifc_len = target_ifc_len;
3402 /* restore target ifc_buf */
3404 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3406 /* copy struct ifconf to target user */
3408 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3409 if (!argptr)
3410 return -TARGET_EFAULT;
3411 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3412 unlock_user(argptr, arg, target_size);
3414 /* copy ifreq[] to target user */
3416 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3417 for (i = 0; i < nb_ifreq ; i++) {
3418 thunk_convert(argptr + i * target_ifreq_size,
3419 host_ifc_buf + i * sizeof(struct ifreq),
3420 ifreq_arg_type, THUNK_TARGET);
3422 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3425 if (free_buf) {
3426 free(host_ifconf);
3429 return ret;
3432 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3433 abi_long cmd, abi_long arg)
3435 void *argptr;
3436 struct dm_ioctl *host_dm;
3437 abi_long guest_data;
3438 uint32_t guest_data_size;
3439 int target_size;
3440 const argtype *arg_type = ie->arg_type;
3441 abi_long ret;
3442 void *big_buf = NULL;
3443 char *host_data;
3445 arg_type++;
3446 target_size = thunk_type_size(arg_type, 0);
3447 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3448 if (!argptr) {
3449 ret = -TARGET_EFAULT;
3450 goto out;
3452 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3453 unlock_user(argptr, arg, 0);
3455 /* buf_temp is too small, so fetch things into a bigger buffer */
3456 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3457 memcpy(big_buf, buf_temp, target_size);
3458 buf_temp = big_buf;
3459 host_dm = big_buf;
3461 guest_data = arg + host_dm->data_start;
3462 if ((guest_data - arg) < 0) {
3463 ret = -EINVAL;
3464 goto out;
3466 guest_data_size = host_dm->data_size - host_dm->data_start;
3467 host_data = (char*)host_dm + host_dm->data_start;
3469 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3470 switch (ie->host_cmd) {
3471 case DM_REMOVE_ALL:
3472 case DM_LIST_DEVICES:
3473 case DM_DEV_CREATE:
3474 case DM_DEV_REMOVE:
3475 case DM_DEV_SUSPEND:
3476 case DM_DEV_STATUS:
3477 case DM_DEV_WAIT:
3478 case DM_TABLE_STATUS:
3479 case DM_TABLE_CLEAR:
3480 case DM_TABLE_DEPS:
3481 case DM_LIST_VERSIONS:
3482 /* no input data */
3483 break;
3484 case DM_DEV_RENAME:
3485 case DM_DEV_SET_GEOMETRY:
3486 /* data contains only strings */
3487 memcpy(host_data, argptr, guest_data_size);
3488 break;
3489 case DM_TARGET_MSG:
3490 memcpy(host_data, argptr, guest_data_size);
3491 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3492 break;
3493 case DM_TABLE_LOAD:
3495 void *gspec = argptr;
3496 void *cur_data = host_data;
3497 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3498 int spec_size = thunk_type_size(arg_type, 0);
3499 int i;
3501 for (i = 0; i < host_dm->target_count; i++) {
3502 struct dm_target_spec *spec = cur_data;
3503 uint32_t next;
3504 int slen;
3506 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3507 slen = strlen((char*)gspec + spec_size) + 1;
3508 next = spec->next;
3509 spec->next = sizeof(*spec) + slen;
3510 strcpy((char*)&spec[1], gspec + spec_size);
3511 gspec += next;
3512 cur_data += spec->next;
3514 break;
3516 default:
3517 ret = -TARGET_EINVAL;
3518 goto out;
3520 unlock_user(argptr, guest_data, 0);
3522 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3523 if (!is_error(ret)) {
3524 guest_data = arg + host_dm->data_start;
3525 guest_data_size = host_dm->data_size - host_dm->data_start;
3526 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3527 switch (ie->host_cmd) {
3528 case DM_REMOVE_ALL:
3529 case DM_DEV_CREATE:
3530 case DM_DEV_REMOVE:
3531 case DM_DEV_RENAME:
3532 case DM_DEV_SUSPEND:
3533 case DM_DEV_STATUS:
3534 case DM_TABLE_LOAD:
3535 case DM_TABLE_CLEAR:
3536 case DM_TARGET_MSG:
3537 case DM_DEV_SET_GEOMETRY:
3538 /* no return data */
3539 break;
3540 case DM_LIST_DEVICES:
3542 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3543 uint32_t remaining_data = guest_data_size;
3544 void *cur_data = argptr;
3545 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3546 int nl_size = 12; /* can't use thunk_size due to alignment */
3548 while (1) {
3549 uint32_t next = nl->next;
3550 if (next) {
3551 nl->next = nl_size + (strlen(nl->name) + 1);
3553 if (remaining_data < nl->next) {
3554 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3555 break;
3557 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3558 strcpy(cur_data + nl_size, nl->name);
3559 cur_data += nl->next;
3560 remaining_data -= nl->next;
3561 if (!next) {
3562 break;
3564 nl = (void*)nl + next;
3566 break;
3568 case DM_DEV_WAIT:
3569 case DM_TABLE_STATUS:
3571 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3572 void *cur_data = argptr;
3573 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3574 int spec_size = thunk_type_size(arg_type, 0);
3575 int i;
3577 for (i = 0; i < host_dm->target_count; i++) {
3578 uint32_t next = spec->next;
3579 int slen = strlen((char*)&spec[1]) + 1;
3580 spec->next = (cur_data - argptr) + spec_size + slen;
3581 if (guest_data_size < spec->next) {
3582 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3583 break;
3585 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3586 strcpy(cur_data + spec_size, (char*)&spec[1]);
3587 cur_data = argptr + spec->next;
3588 spec = (void*)host_dm + host_dm->data_start + next;
3590 break;
3592 case DM_TABLE_DEPS:
3594 void *hdata = (void*)host_dm + host_dm->data_start;
3595 int count = *(uint32_t*)hdata;
3596 uint64_t *hdev = hdata + 8;
3597 uint64_t *gdev = argptr + 8;
3598 int i;
3600 *(uint32_t*)argptr = tswap32(count);
3601 for (i = 0; i < count; i++) {
3602 *gdev = tswap64(*hdev);
3603 gdev++;
3604 hdev++;
3606 break;
3608 case DM_LIST_VERSIONS:
3610 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3611 uint32_t remaining_data = guest_data_size;
3612 void *cur_data = argptr;
3613 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3614 int vers_size = thunk_type_size(arg_type, 0);
3616 while (1) {
3617 uint32_t next = vers->next;
3618 if (next) {
3619 vers->next = vers_size + (strlen(vers->name) + 1);
3621 if (remaining_data < vers->next) {
3622 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3623 break;
3625 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3626 strcpy(cur_data + vers_size, vers->name);
3627 cur_data += vers->next;
3628 remaining_data -= vers->next;
3629 if (!next) {
3630 break;
3632 vers = (void*)vers + next;
3634 break;
3636 default:
3637 ret = -TARGET_EINVAL;
3638 goto out;
3640 unlock_user(argptr, guest_data, guest_data_size);
3642 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3643 if (!argptr) {
3644 ret = -TARGET_EFAULT;
3645 goto out;
3647 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3648 unlock_user(argptr, arg, target_size);
3650 out:
3651 g_free(big_buf);
3652 return ret;
3655 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3656 int fd, abi_long cmd, abi_long arg)
3658 const argtype *arg_type = ie->arg_type;
3659 const StructEntry *se;
3660 const argtype *field_types;
3661 const int *dst_offsets, *src_offsets;
3662 int target_size;
3663 void *argptr;
3664 abi_ulong *target_rt_dev_ptr;
3665 unsigned long *host_rt_dev_ptr;
3666 abi_long ret;
3667 int i;
3669 assert(ie->access == IOC_W);
3670 assert(*arg_type == TYPE_PTR);
3671 arg_type++;
3672 assert(*arg_type == TYPE_STRUCT);
3673 target_size = thunk_type_size(arg_type, 0);
3674 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3675 if (!argptr) {
3676 return -TARGET_EFAULT;
3678 arg_type++;
3679 assert(*arg_type == (int)STRUCT_rtentry);
3680 se = struct_entries + *arg_type++;
3681 assert(se->convert[0] == NULL);
3682 /* convert struct here to be able to catch rt_dev string */
3683 field_types = se->field_types;
3684 dst_offsets = se->field_offsets[THUNK_HOST];
3685 src_offsets = se->field_offsets[THUNK_TARGET];
3686 for (i = 0; i < se->nb_fields; i++) {
3687 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3688 assert(*field_types == TYPE_PTRVOID);
3689 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3690 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3691 if (*target_rt_dev_ptr != 0) {
3692 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3693 tswapal(*target_rt_dev_ptr));
3694 if (!*host_rt_dev_ptr) {
3695 unlock_user(argptr, arg, 0);
3696 return -TARGET_EFAULT;
3698 } else {
3699 *host_rt_dev_ptr = 0;
3701 field_types++;
3702 continue;
3704 field_types = thunk_convert(buf_temp + dst_offsets[i],
3705 argptr + src_offsets[i],
3706 field_types, THUNK_HOST);
3708 unlock_user(argptr, arg, 0);
3710 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3711 if (*host_rt_dev_ptr != 0) {
3712 unlock_user((void *)*host_rt_dev_ptr,
3713 *target_rt_dev_ptr, 0);
3715 return ret;
3718 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3719 int fd, abi_long cmd, abi_long arg)
3721 int sig = target_to_host_signal(arg);
3722 return get_errno(ioctl(fd, ie->host_cmd, sig));
3725 static IOCTLEntry ioctl_entries[] = {
3726 #define IOCTL(cmd, access, ...) \
3727 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3728 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3729 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3730 #include "ioctls.h"
3731 { 0, 0, },
3734 /* ??? Implement proper locking for ioctls. */
3735 /* do_ioctl() Must return target values and target errnos. */
3736 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3738 const IOCTLEntry *ie;
3739 const argtype *arg_type;
3740 abi_long ret;
3741 uint8_t buf_temp[MAX_STRUCT_SIZE];
3742 int target_size;
3743 void *argptr;
3745 ie = ioctl_entries;
3746 for(;;) {
3747 if (ie->target_cmd == 0) {
3748 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3749 return -TARGET_ENOSYS;
3751 if (ie->target_cmd == cmd)
3752 break;
3753 ie++;
3755 arg_type = ie->arg_type;
3756 #if defined(DEBUG)
3757 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3758 #endif
3759 if (ie->do_ioctl) {
3760 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3763 switch(arg_type[0]) {
3764 case TYPE_NULL:
3765 /* no argument */
3766 ret = get_errno(ioctl(fd, ie->host_cmd));
3767 break;
3768 case TYPE_PTRVOID:
3769 case TYPE_INT:
3770 /* int argment */
3771 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3772 break;
3773 case TYPE_PTR:
3774 arg_type++;
3775 target_size = thunk_type_size(arg_type, 0);
3776 switch(ie->access) {
3777 case IOC_R:
3778 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3779 if (!is_error(ret)) {
3780 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3781 if (!argptr)
3782 return -TARGET_EFAULT;
3783 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3784 unlock_user(argptr, arg, target_size);
3786 break;
3787 case IOC_W:
3788 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3789 if (!argptr)
3790 return -TARGET_EFAULT;
3791 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3792 unlock_user(argptr, arg, 0);
3793 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3794 break;
3795 default:
3796 case IOC_RW:
3797 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3798 if (!argptr)
3799 return -TARGET_EFAULT;
3800 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3801 unlock_user(argptr, arg, 0);
3802 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3803 if (!is_error(ret)) {
3804 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3805 if (!argptr)
3806 return -TARGET_EFAULT;
3807 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3808 unlock_user(argptr, arg, target_size);
3810 break;
3812 break;
3813 default:
3814 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3815 (long)cmd, arg_type[0]);
3816 ret = -TARGET_ENOSYS;
3817 break;
3819 return ret;
3822 static const bitmask_transtbl iflag_tbl[] = {
3823 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3824 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3825 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3826 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3827 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3828 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3829 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3830 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3831 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3832 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3833 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3834 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3835 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3836 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3837 { 0, 0, 0, 0 }
3840 static const bitmask_transtbl oflag_tbl[] = {
3841 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3842 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3843 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3844 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3845 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3846 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3847 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3848 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3849 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3850 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3851 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3852 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3853 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3854 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3855 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3856 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3857 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3858 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3859 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3860 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3861 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3862 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3863 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3864 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3865 { 0, 0, 0, 0 }
3868 static const bitmask_transtbl cflag_tbl[] = {
3869 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3870 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3871 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3872 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3873 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3874 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3875 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3876 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3877 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3878 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3879 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3880 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3881 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3882 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3883 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3884 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3885 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3886 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3887 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3888 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3889 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3890 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3891 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3892 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3893 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3894 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3895 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3896 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3897 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3898 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3899 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3900 { 0, 0, 0, 0 }
3903 static const bitmask_transtbl lflag_tbl[] = {
3904 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3905 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3906 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3907 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3908 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3909 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3910 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3911 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3912 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3913 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3914 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3915 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3916 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3917 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3918 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3919 { 0, 0, 0, 0 }
3922 static void target_to_host_termios (void *dst, const void *src)
3924 struct host_termios *host = dst;
3925 const struct target_termios *target = src;
3927 host->c_iflag =
3928 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3929 host->c_oflag =
3930 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3931 host->c_cflag =
3932 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3933 host->c_lflag =
3934 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3935 host->c_line = target->c_line;
3937 memset(host->c_cc, 0, sizeof(host->c_cc));
3938 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3939 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3940 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3941 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3942 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3943 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3944 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3945 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3946 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3947 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3948 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3949 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3950 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3951 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3952 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3953 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3954 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3957 static void host_to_target_termios (void *dst, const void *src)
3959 struct target_termios *target = dst;
3960 const struct host_termios *host = src;
3962 target->c_iflag =
3963 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3964 target->c_oflag =
3965 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3966 target->c_cflag =
3967 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3968 target->c_lflag =
3969 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3970 target->c_line = host->c_line;
3972 memset(target->c_cc, 0, sizeof(target->c_cc));
3973 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3974 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3975 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3976 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3977 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3978 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3979 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3980 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3981 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3982 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3983 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3984 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3985 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3986 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3987 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3988 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3989 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3992 static const StructEntry struct_termios_def = {
3993 .convert = { host_to_target_termios, target_to_host_termios },
3994 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3995 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3998 static bitmask_transtbl mmap_flags_tbl[] = {
3999 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4000 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4001 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4002 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4003 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4004 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4005 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4006 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4007 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4008 MAP_NORESERVE },
4009 { 0, 0, 0, 0 }
4012 #if defined(TARGET_I386)
4014 /* NOTE: there is really one LDT for all the threads */
4015 static uint8_t *ldt_table;
4017 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4019 int size;
4020 void *p;
4022 if (!ldt_table)
4023 return 0;
4024 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4025 if (size > bytecount)
4026 size = bytecount;
4027 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4028 if (!p)
4029 return -TARGET_EFAULT;
4030 /* ??? Should this by byteswapped? */
4031 memcpy(p, ldt_table, size);
4032 unlock_user(p, ptr, size);
4033 return size;
4036 /* XXX: add locking support */
4037 static abi_long write_ldt(CPUX86State *env,
4038 abi_ulong ptr, unsigned long bytecount, int oldmode)
4040 struct target_modify_ldt_ldt_s ldt_info;
4041 struct target_modify_ldt_ldt_s *target_ldt_info;
4042 int seg_32bit, contents, read_exec_only, limit_in_pages;
4043 int seg_not_present, useable, lm;
4044 uint32_t *lp, entry_1, entry_2;
4046 if (bytecount != sizeof(ldt_info))
4047 return -TARGET_EINVAL;
4048 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4049 return -TARGET_EFAULT;
4050 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4051 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4052 ldt_info.limit = tswap32(target_ldt_info->limit);
4053 ldt_info.flags = tswap32(target_ldt_info->flags);
4054 unlock_user_struct(target_ldt_info, ptr, 0);
4056 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4057 return -TARGET_EINVAL;
4058 seg_32bit = ldt_info.flags & 1;
4059 contents = (ldt_info.flags >> 1) & 3;
4060 read_exec_only = (ldt_info.flags >> 3) & 1;
4061 limit_in_pages = (ldt_info.flags >> 4) & 1;
4062 seg_not_present = (ldt_info.flags >> 5) & 1;
4063 useable = (ldt_info.flags >> 6) & 1;
4064 #ifdef TARGET_ABI32
4065 lm = 0;
4066 #else
4067 lm = (ldt_info.flags >> 7) & 1;
4068 #endif
4069 if (contents == 3) {
4070 if (oldmode)
4071 return -TARGET_EINVAL;
4072 if (seg_not_present == 0)
4073 return -TARGET_EINVAL;
4075 /* allocate the LDT */
4076 if (!ldt_table) {
4077 env->ldt.base = target_mmap(0,
4078 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4079 PROT_READ|PROT_WRITE,
4080 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4081 if (env->ldt.base == -1)
4082 return -TARGET_ENOMEM;
4083 memset(g2h(env->ldt.base), 0,
4084 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4085 env->ldt.limit = 0xffff;
4086 ldt_table = g2h(env->ldt.base);
4089 /* NOTE: same code as Linux kernel */
4090 /* Allow LDTs to be cleared by the user. */
4091 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4092 if (oldmode ||
4093 (contents == 0 &&
4094 read_exec_only == 1 &&
4095 seg_32bit == 0 &&
4096 limit_in_pages == 0 &&
4097 seg_not_present == 1 &&
4098 useable == 0 )) {
4099 entry_1 = 0;
4100 entry_2 = 0;
4101 goto install;
4105 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4106 (ldt_info.limit & 0x0ffff);
4107 entry_2 = (ldt_info.base_addr & 0xff000000) |
4108 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4109 (ldt_info.limit & 0xf0000) |
4110 ((read_exec_only ^ 1) << 9) |
4111 (contents << 10) |
4112 ((seg_not_present ^ 1) << 15) |
4113 (seg_32bit << 22) |
4114 (limit_in_pages << 23) |
4115 (lm << 21) |
4116 0x7000;
4117 if (!oldmode)
4118 entry_2 |= (useable << 20);
4120 /* Install the new entry ... */
4121 install:
4122 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4123 lp[0] = tswap32(entry_1);
4124 lp[1] = tswap32(entry_2);
4125 return 0;
4128 /* specific and weird i386 syscalls */
4129 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4130 unsigned long bytecount)
4132 abi_long ret;
4134 switch (func) {
4135 case 0:
4136 ret = read_ldt(ptr, bytecount);
4137 break;
4138 case 1:
4139 ret = write_ldt(env, ptr, bytecount, 1);
4140 break;
4141 case 0x11:
4142 ret = write_ldt(env, ptr, bytecount, 0);
4143 break;
4144 default:
4145 ret = -TARGET_ENOSYS;
4146 break;
4148 return ret;
4151 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4152 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4154 uint64_t *gdt_table = g2h(env->gdt.base);
4155 struct target_modify_ldt_ldt_s ldt_info;
4156 struct target_modify_ldt_ldt_s *target_ldt_info;
4157 int seg_32bit, contents, read_exec_only, limit_in_pages;
4158 int seg_not_present, useable, lm;
4159 uint32_t *lp, entry_1, entry_2;
4160 int i;
4162 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4163 if (!target_ldt_info)
4164 return -TARGET_EFAULT;
4165 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4166 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4167 ldt_info.limit = tswap32(target_ldt_info->limit);
4168 ldt_info.flags = tswap32(target_ldt_info->flags);
4169 if (ldt_info.entry_number == -1) {
4170 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4171 if (gdt_table[i] == 0) {
4172 ldt_info.entry_number = i;
4173 target_ldt_info->entry_number = tswap32(i);
4174 break;
4178 unlock_user_struct(target_ldt_info, ptr, 1);
4180 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4181 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4182 return -TARGET_EINVAL;
4183 seg_32bit = ldt_info.flags & 1;
4184 contents = (ldt_info.flags >> 1) & 3;
4185 read_exec_only = (ldt_info.flags >> 3) & 1;
4186 limit_in_pages = (ldt_info.flags >> 4) & 1;
4187 seg_not_present = (ldt_info.flags >> 5) & 1;
4188 useable = (ldt_info.flags >> 6) & 1;
4189 #ifdef TARGET_ABI32
4190 lm = 0;
4191 #else
4192 lm = (ldt_info.flags >> 7) & 1;
4193 #endif
4195 if (contents == 3) {
4196 if (seg_not_present == 0)
4197 return -TARGET_EINVAL;
4200 /* NOTE: same code as Linux kernel */
4201 /* Allow LDTs to be cleared by the user. */
4202 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4203 if ((contents == 0 &&
4204 read_exec_only == 1 &&
4205 seg_32bit == 0 &&
4206 limit_in_pages == 0 &&
4207 seg_not_present == 1 &&
4208 useable == 0 )) {
4209 entry_1 = 0;
4210 entry_2 = 0;
4211 goto install;
4215 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4216 (ldt_info.limit & 0x0ffff);
4217 entry_2 = (ldt_info.base_addr & 0xff000000) |
4218 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4219 (ldt_info.limit & 0xf0000) |
4220 ((read_exec_only ^ 1) << 9) |
4221 (contents << 10) |
4222 ((seg_not_present ^ 1) << 15) |
4223 (seg_32bit << 22) |
4224 (limit_in_pages << 23) |
4225 (useable << 20) |
4226 (lm << 21) |
4227 0x7000;
4229 /* Install the new entry ... */
4230 install:
4231 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4232 lp[0] = tswap32(entry_1);
4233 lp[1] = tswap32(entry_2);
4234 return 0;
4237 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4239 struct target_modify_ldt_ldt_s *target_ldt_info;
4240 uint64_t *gdt_table = g2h(env->gdt.base);
4241 uint32_t base_addr, limit, flags;
4242 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4243 int seg_not_present, useable, lm;
4244 uint32_t *lp, entry_1, entry_2;
4246 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4247 if (!target_ldt_info)
4248 return -TARGET_EFAULT;
4249 idx = tswap32(target_ldt_info->entry_number);
4250 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4251 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4252 unlock_user_struct(target_ldt_info, ptr, 1);
4253 return -TARGET_EINVAL;
4255 lp = (uint32_t *)(gdt_table + idx);
4256 entry_1 = tswap32(lp[0]);
4257 entry_2 = tswap32(lp[1]);
4259 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4260 contents = (entry_2 >> 10) & 3;
4261 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4262 seg_32bit = (entry_2 >> 22) & 1;
4263 limit_in_pages = (entry_2 >> 23) & 1;
4264 useable = (entry_2 >> 20) & 1;
4265 #ifdef TARGET_ABI32
4266 lm = 0;
4267 #else
4268 lm = (entry_2 >> 21) & 1;
4269 #endif
4270 flags = (seg_32bit << 0) | (contents << 1) |
4271 (read_exec_only << 3) | (limit_in_pages << 4) |
4272 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4273 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4274 base_addr = (entry_1 >> 16) |
4275 (entry_2 & 0xff000000) |
4276 ((entry_2 & 0xff) << 16);
4277 target_ldt_info->base_addr = tswapal(base_addr);
4278 target_ldt_info->limit = tswap32(limit);
4279 target_ldt_info->flags = tswap32(flags);
4280 unlock_user_struct(target_ldt_info, ptr, 1);
4281 return 0;
4283 #endif /* TARGET_I386 && TARGET_ABI32 */
4285 #ifndef TARGET_ABI32
4286 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4288 abi_long ret = 0;
4289 abi_ulong val;
4290 int idx;
4292 switch(code) {
4293 case TARGET_ARCH_SET_GS:
4294 case TARGET_ARCH_SET_FS:
4295 if (code == TARGET_ARCH_SET_GS)
4296 idx = R_GS;
4297 else
4298 idx = R_FS;
4299 cpu_x86_load_seg(env, idx, 0);
4300 env->segs[idx].base = addr;
4301 break;
4302 case TARGET_ARCH_GET_GS:
4303 case TARGET_ARCH_GET_FS:
4304 if (code == TARGET_ARCH_GET_GS)
4305 idx = R_GS;
4306 else
4307 idx = R_FS;
4308 val = env->segs[idx].base;
4309 if (put_user(val, addr, abi_ulong))
4310 ret = -TARGET_EFAULT;
4311 break;
4312 default:
4313 ret = -TARGET_EINVAL;
4314 break;
4316 return ret;
4318 #endif
4320 #endif /* defined(TARGET_I386) */
4322 #define NEW_STACK_SIZE 0x40000
4325 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4326 typedef struct {
4327 CPUArchState *env;
4328 pthread_mutex_t mutex;
4329 pthread_cond_t cond;
4330 pthread_t thread;
4331 uint32_t tid;
4332 abi_ulong child_tidptr;
4333 abi_ulong parent_tidptr;
4334 sigset_t sigmask;
4335 } new_thread_info;
4337 static void *clone_func(void *arg)
4339 new_thread_info *info = arg;
4340 CPUArchState *env;
4341 CPUState *cpu;
4342 TaskState *ts;
4344 env = info->env;
4345 cpu = ENV_GET_CPU(env);
4346 thread_cpu = cpu;
4347 ts = (TaskState *)cpu->opaque;
4348 info->tid = gettid();
4349 cpu->host_tid = info->tid;
4350 task_settid(ts);
4351 if (info->child_tidptr)
4352 put_user_u32(info->tid, info->child_tidptr);
4353 if (info->parent_tidptr)
4354 put_user_u32(info->tid, info->parent_tidptr);
4355 /* Enable signals. */
4356 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4357 /* Signal to the parent that we're ready. */
4358 pthread_mutex_lock(&info->mutex);
4359 pthread_cond_broadcast(&info->cond);
4360 pthread_mutex_unlock(&info->mutex);
4361 /* Wait until the parent has finshed initializing the tls state. */
4362 pthread_mutex_lock(&clone_lock);
4363 pthread_mutex_unlock(&clone_lock);
4364 cpu_loop(env);
4365 /* never exits */
4366 return NULL;
4369 /* do_fork() Must return host values and target errnos (unlike most
4370 do_*() functions). */
4371 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4372 abi_ulong parent_tidptr, target_ulong newtls,
4373 abi_ulong child_tidptr)
4375 CPUState *cpu = ENV_GET_CPU(env);
4376 int ret;
4377 TaskState *ts;
4378 CPUState *new_cpu;
4379 CPUArchState *new_env;
4380 unsigned int nptl_flags;
4381 sigset_t sigmask;
4383 /* Emulate vfork() with fork() */
4384 if (flags & CLONE_VFORK)
4385 flags &= ~(CLONE_VFORK | CLONE_VM);
4387 if (flags & CLONE_VM) {
4388 TaskState *parent_ts = (TaskState *)cpu->opaque;
4389 new_thread_info info;
4390 pthread_attr_t attr;
4392 ts = g_malloc0(sizeof(TaskState));
4393 init_task_state(ts);
4394 /* we create a new CPU instance. */
4395 new_env = cpu_copy(env);
4396 /* Init regs that differ from the parent. */
4397 cpu_clone_regs(new_env, newsp);
4398 new_cpu = ENV_GET_CPU(new_env);
4399 new_cpu->opaque = ts;
4400 ts->bprm = parent_ts->bprm;
4401 ts->info = parent_ts->info;
4402 nptl_flags = flags;
4403 flags &= ~CLONE_NPTL_FLAGS2;
4405 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4406 ts->child_tidptr = child_tidptr;
4409 if (nptl_flags & CLONE_SETTLS)
4410 cpu_set_tls (new_env, newtls);
4412 /* Grab a mutex so that thread setup appears atomic. */
4413 pthread_mutex_lock(&clone_lock);
4415 memset(&info, 0, sizeof(info));
4416 pthread_mutex_init(&info.mutex, NULL);
4417 pthread_mutex_lock(&info.mutex);
4418 pthread_cond_init(&info.cond, NULL);
4419 info.env = new_env;
4420 if (nptl_flags & CLONE_CHILD_SETTID)
4421 info.child_tidptr = child_tidptr;
4422 if (nptl_flags & CLONE_PARENT_SETTID)
4423 info.parent_tidptr = parent_tidptr;
4425 ret = pthread_attr_init(&attr);
4426 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4427 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4428 /* It is not safe to deliver signals until the child has finished
4429 initializing, so temporarily block all signals. */
4430 sigfillset(&sigmask);
4431 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4433 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4434 /* TODO: Free new CPU state if thread creation failed. */
4436 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4437 pthread_attr_destroy(&attr);
4438 if (ret == 0) {
4439 /* Wait for the child to initialize. */
4440 pthread_cond_wait(&info.cond, &info.mutex);
4441 ret = info.tid;
4442 if (flags & CLONE_PARENT_SETTID)
4443 put_user_u32(ret, parent_tidptr);
4444 } else {
4445 ret = -1;
4447 pthread_mutex_unlock(&info.mutex);
4448 pthread_cond_destroy(&info.cond);
4449 pthread_mutex_destroy(&info.mutex);
4450 pthread_mutex_unlock(&clone_lock);
4451 } else {
4452 /* if no CLONE_VM, we consider it is a fork */
4453 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4454 return -EINVAL;
4455 fork_start();
4456 ret = fork();
4457 if (ret == 0) {
4458 /* Child Process. */
4459 cpu_clone_regs(env, newsp);
4460 fork_end(1);
4461 /* There is a race condition here. The parent process could
4462 theoretically read the TID in the child process before the child
4463 tid is set. This would require using either ptrace
4464 (not implemented) or having *_tidptr to point at a shared memory
4465 mapping. We can't repeat the spinlock hack used above because
4466 the child process gets its own copy of the lock. */
4467 if (flags & CLONE_CHILD_SETTID)
4468 put_user_u32(gettid(), child_tidptr);
4469 if (flags & CLONE_PARENT_SETTID)
4470 put_user_u32(gettid(), parent_tidptr);
4471 ts = (TaskState *)cpu->opaque;
4472 if (flags & CLONE_SETTLS)
4473 cpu_set_tls (env, newtls);
4474 if (flags & CLONE_CHILD_CLEARTID)
4475 ts->child_tidptr = child_tidptr;
4476 } else {
4477 fork_end(0);
4480 return ret;
4483 /* warning : doesn't handle linux specific flags... */
4484 static int target_to_host_fcntl_cmd(int cmd)
4486 switch(cmd) {
4487 case TARGET_F_DUPFD:
4488 case TARGET_F_GETFD:
4489 case TARGET_F_SETFD:
4490 case TARGET_F_GETFL:
4491 case TARGET_F_SETFL:
4492 return cmd;
4493 case TARGET_F_GETLK:
4494 return F_GETLK;
4495 case TARGET_F_SETLK:
4496 return F_SETLK;
4497 case TARGET_F_SETLKW:
4498 return F_SETLKW;
4499 case TARGET_F_GETOWN:
4500 return F_GETOWN;
4501 case TARGET_F_SETOWN:
4502 return F_SETOWN;
4503 case TARGET_F_GETSIG:
4504 return F_GETSIG;
4505 case TARGET_F_SETSIG:
4506 return F_SETSIG;
4507 #if TARGET_ABI_BITS == 32
4508 case TARGET_F_GETLK64:
4509 return F_GETLK64;
4510 case TARGET_F_SETLK64:
4511 return F_SETLK64;
4512 case TARGET_F_SETLKW64:
4513 return F_SETLKW64;
4514 #endif
4515 case TARGET_F_SETLEASE:
4516 return F_SETLEASE;
4517 case TARGET_F_GETLEASE:
4518 return F_GETLEASE;
4519 #ifdef F_DUPFD_CLOEXEC
4520 case TARGET_F_DUPFD_CLOEXEC:
4521 return F_DUPFD_CLOEXEC;
4522 #endif
4523 case TARGET_F_NOTIFY:
4524 return F_NOTIFY;
4525 #ifdef F_GETOWN_EX
4526 case TARGET_F_GETOWN_EX:
4527 return F_GETOWN_EX;
4528 #endif
4529 #ifdef F_SETOWN_EX
4530 case TARGET_F_SETOWN_EX:
4531 return F_SETOWN_EX;
4532 #endif
4533 default:
4534 return -TARGET_EINVAL;
4536 return -TARGET_EINVAL;
4539 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4540 static const bitmask_transtbl flock_tbl[] = {
4541 TRANSTBL_CONVERT(F_RDLCK),
4542 TRANSTBL_CONVERT(F_WRLCK),
4543 TRANSTBL_CONVERT(F_UNLCK),
4544 TRANSTBL_CONVERT(F_EXLCK),
4545 TRANSTBL_CONVERT(F_SHLCK),
4546 { 0, 0, 0, 0 }
4549 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4551 struct flock fl;
4552 struct target_flock *target_fl;
4553 struct flock64 fl64;
4554 struct target_flock64 *target_fl64;
4555 #ifdef F_GETOWN_EX
4556 struct f_owner_ex fox;
4557 struct target_f_owner_ex *target_fox;
4558 #endif
4559 abi_long ret;
4560 int host_cmd = target_to_host_fcntl_cmd(cmd);
4562 if (host_cmd == -TARGET_EINVAL)
4563 return host_cmd;
4565 switch(cmd) {
4566 case TARGET_F_GETLK:
4567 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4568 return -TARGET_EFAULT;
4569 fl.l_type =
4570 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4571 fl.l_whence = tswap16(target_fl->l_whence);
4572 fl.l_start = tswapal(target_fl->l_start);
4573 fl.l_len = tswapal(target_fl->l_len);
4574 fl.l_pid = tswap32(target_fl->l_pid);
4575 unlock_user_struct(target_fl, arg, 0);
4576 ret = get_errno(fcntl(fd, host_cmd, &fl));
4577 if (ret == 0) {
4578 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4579 return -TARGET_EFAULT;
4580 target_fl->l_type =
4581 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4582 target_fl->l_whence = tswap16(fl.l_whence);
4583 target_fl->l_start = tswapal(fl.l_start);
4584 target_fl->l_len = tswapal(fl.l_len);
4585 target_fl->l_pid = tswap32(fl.l_pid);
4586 unlock_user_struct(target_fl, arg, 1);
4588 break;
4590 case TARGET_F_SETLK:
4591 case TARGET_F_SETLKW:
4592 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4593 return -TARGET_EFAULT;
4594 fl.l_type =
4595 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4596 fl.l_whence = tswap16(target_fl->l_whence);
4597 fl.l_start = tswapal(target_fl->l_start);
4598 fl.l_len = tswapal(target_fl->l_len);
4599 fl.l_pid = tswap32(target_fl->l_pid);
4600 unlock_user_struct(target_fl, arg, 0);
4601 ret = get_errno(fcntl(fd, host_cmd, &fl));
4602 break;
4604 case TARGET_F_GETLK64:
4605 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4606 return -TARGET_EFAULT;
4607 fl64.l_type =
4608 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4609 fl64.l_whence = tswap16(target_fl64->l_whence);
4610 fl64.l_start = tswap64(target_fl64->l_start);
4611 fl64.l_len = tswap64(target_fl64->l_len);
4612 fl64.l_pid = tswap32(target_fl64->l_pid);
4613 unlock_user_struct(target_fl64, arg, 0);
4614 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4615 if (ret == 0) {
4616 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4617 return -TARGET_EFAULT;
4618 target_fl64->l_type =
4619 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4620 target_fl64->l_whence = tswap16(fl64.l_whence);
4621 target_fl64->l_start = tswap64(fl64.l_start);
4622 target_fl64->l_len = tswap64(fl64.l_len);
4623 target_fl64->l_pid = tswap32(fl64.l_pid);
4624 unlock_user_struct(target_fl64, arg, 1);
4626 break;
4627 case TARGET_F_SETLK64:
4628 case TARGET_F_SETLKW64:
4629 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4630 return -TARGET_EFAULT;
4631 fl64.l_type =
4632 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4633 fl64.l_whence = tswap16(target_fl64->l_whence);
4634 fl64.l_start = tswap64(target_fl64->l_start);
4635 fl64.l_len = tswap64(target_fl64->l_len);
4636 fl64.l_pid = tswap32(target_fl64->l_pid);
4637 unlock_user_struct(target_fl64, arg, 0);
4638 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4639 break;
4641 case TARGET_F_GETFL:
4642 ret = get_errno(fcntl(fd, host_cmd, arg));
4643 if (ret >= 0) {
4644 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4646 break;
4648 case TARGET_F_SETFL:
4649 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4650 break;
4652 #ifdef F_GETOWN_EX
4653 case TARGET_F_GETOWN_EX:
4654 ret = get_errno(fcntl(fd, host_cmd, &fox));
4655 if (ret >= 0) {
4656 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4657 return -TARGET_EFAULT;
4658 target_fox->type = tswap32(fox.type);
4659 target_fox->pid = tswap32(fox.pid);
4660 unlock_user_struct(target_fox, arg, 1);
4662 break;
4663 #endif
4665 #ifdef F_SETOWN_EX
4666 case TARGET_F_SETOWN_EX:
4667 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4668 return -TARGET_EFAULT;
4669 fox.type = tswap32(target_fox->type);
4670 fox.pid = tswap32(target_fox->pid);
4671 unlock_user_struct(target_fox, arg, 0);
4672 ret = get_errno(fcntl(fd, host_cmd, &fox));
4673 break;
4674 #endif
4676 case TARGET_F_SETOWN:
4677 case TARGET_F_GETOWN:
4678 case TARGET_F_SETSIG:
4679 case TARGET_F_GETSIG:
4680 case TARGET_F_SETLEASE:
4681 case TARGET_F_GETLEASE:
4682 ret = get_errno(fcntl(fd, host_cmd, arg));
4683 break;
4685 default:
4686 ret = get_errno(fcntl(fd, cmd, arg));
4687 break;
4689 return ret;
4692 #ifdef USE_UID16
4694 static inline int high2lowuid(int uid)
4696 if (uid > 65535)
4697 return 65534;
4698 else
4699 return uid;
4702 static inline int high2lowgid(int gid)
4704 if (gid > 65535)
4705 return 65534;
4706 else
4707 return gid;
4710 static inline int low2highuid(int uid)
4712 if ((int16_t)uid == -1)
4713 return -1;
4714 else
4715 return uid;
4718 static inline int low2highgid(int gid)
4720 if ((int16_t)gid == -1)
4721 return -1;
4722 else
4723 return gid;
4725 static inline int tswapid(int id)
4727 return tswap16(id);
4730 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4732 #else /* !USE_UID16 */
4733 static inline int high2lowuid(int uid)
4735 return uid;
4737 static inline int high2lowgid(int gid)
4739 return gid;
4741 static inline int low2highuid(int uid)
4743 return uid;
4745 static inline int low2highgid(int gid)
4747 return gid;
4749 static inline int tswapid(int id)
4751 return tswap32(id);
4754 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4756 #endif /* USE_UID16 */
4758 void syscall_init(void)
4760 IOCTLEntry *ie;
4761 const argtype *arg_type;
4762 int size;
4763 int i;
4765 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4766 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4767 #include "syscall_types.h"
4768 #undef STRUCT
4769 #undef STRUCT_SPECIAL
4771 /* Build target_to_host_errno_table[] table from
4772 * host_to_target_errno_table[]. */
4773 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4774 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4777 /* we patch the ioctl size if necessary. We rely on the fact that
4778 no ioctl has all the bits at '1' in the size field */
4779 ie = ioctl_entries;
4780 while (ie->target_cmd != 0) {
4781 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4782 TARGET_IOC_SIZEMASK) {
4783 arg_type = ie->arg_type;
4784 if (arg_type[0] != TYPE_PTR) {
4785 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4786 ie->target_cmd);
4787 exit(1);
4789 arg_type++;
4790 size = thunk_type_size(arg_type, 0);
4791 ie->target_cmd = (ie->target_cmd &
4792 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4793 (size << TARGET_IOC_SIZESHIFT);
4796 /* automatic consistency check if same arch */
4797 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4798 (defined(__x86_64__) && defined(TARGET_X86_64))
4799 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4800 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4801 ie->name, ie->target_cmd, ie->host_cmd);
4803 #endif
4804 ie++;
4808 #if TARGET_ABI_BITS == 32
4809 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4811 #ifdef TARGET_WORDS_BIGENDIAN
4812 return ((uint64_t)word0 << 32) | word1;
4813 #else
4814 return ((uint64_t)word1 << 32) | word0;
4815 #endif
4817 #else /* TARGET_ABI_BITS == 32 */
4818 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4820 return word0;
4822 #endif /* TARGET_ABI_BITS != 32 */
4824 #ifdef TARGET_NR_truncate64
4825 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4826 abi_long arg2,
4827 abi_long arg3,
4828 abi_long arg4)
4830 if (regpairs_aligned(cpu_env)) {
4831 arg2 = arg3;
4832 arg3 = arg4;
4834 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4836 #endif
4838 #ifdef TARGET_NR_ftruncate64
4839 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4840 abi_long arg2,
4841 abi_long arg3,
4842 abi_long arg4)
4844 if (regpairs_aligned(cpu_env)) {
4845 arg2 = arg3;
4846 arg3 = arg4;
4848 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4850 #endif
4852 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4853 abi_ulong target_addr)
4855 struct target_timespec *target_ts;
4857 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4858 return -TARGET_EFAULT;
4859 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4860 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4861 unlock_user_struct(target_ts, target_addr, 0);
4862 return 0;
4865 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4866 struct timespec *host_ts)
4868 struct target_timespec *target_ts;
4870 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4871 return -TARGET_EFAULT;
4872 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4873 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4874 unlock_user_struct(target_ts, target_addr, 1);
4875 return 0;
4878 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
4879 abi_ulong target_addr)
4881 struct target_itimerspec *target_itspec;
4883 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
4884 return -TARGET_EFAULT;
4887 host_itspec->it_interval.tv_sec =
4888 tswapal(target_itspec->it_interval.tv_sec);
4889 host_itspec->it_interval.tv_nsec =
4890 tswapal(target_itspec->it_interval.tv_nsec);
4891 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
4892 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
4894 unlock_user_struct(target_itspec, target_addr, 1);
4895 return 0;
4898 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
4899 struct itimerspec *host_its)
4901 struct target_itimerspec *target_itspec;
4903 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
4904 return -TARGET_EFAULT;
4907 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
4908 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
4910 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
4911 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
4913 unlock_user_struct(target_itspec, target_addr, 0);
4914 return 0;
4917 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4918 static inline abi_long host_to_target_stat64(void *cpu_env,
4919 abi_ulong target_addr,
4920 struct stat *host_st)
4922 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4923 if (((CPUARMState *)cpu_env)->eabi) {
4924 struct target_eabi_stat64 *target_st;
4926 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4927 return -TARGET_EFAULT;
4928 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4929 __put_user(host_st->st_dev, &target_st->st_dev);
4930 __put_user(host_st->st_ino, &target_st->st_ino);
4931 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4932 __put_user(host_st->st_ino, &target_st->__st_ino);
4933 #endif
4934 __put_user(host_st->st_mode, &target_st->st_mode);
4935 __put_user(host_st->st_nlink, &target_st->st_nlink);
4936 __put_user(host_st->st_uid, &target_st->st_uid);
4937 __put_user(host_st->st_gid, &target_st->st_gid);
4938 __put_user(host_st->st_rdev, &target_st->st_rdev);
4939 __put_user(host_st->st_size, &target_st->st_size);
4940 __put_user(host_st->st_blksize, &target_st->st_blksize);
4941 __put_user(host_st->st_blocks, &target_st->st_blocks);
4942 __put_user(host_st->st_atime, &target_st->target_st_atime);
4943 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4944 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4945 unlock_user_struct(target_st, target_addr, 1);
4946 } else
4947 #endif
4949 #if defined(TARGET_HAS_STRUCT_STAT64)
4950 struct target_stat64 *target_st;
4951 #else
4952 struct target_stat *target_st;
4953 #endif
4955 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4956 return -TARGET_EFAULT;
4957 memset(target_st, 0, sizeof(*target_st));
4958 __put_user(host_st->st_dev, &target_st->st_dev);
4959 __put_user(host_st->st_ino, &target_st->st_ino);
4960 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4961 __put_user(host_st->st_ino, &target_st->__st_ino);
4962 #endif
4963 __put_user(host_st->st_mode, &target_st->st_mode);
4964 __put_user(host_st->st_nlink, &target_st->st_nlink);
4965 __put_user(host_st->st_uid, &target_st->st_uid);
4966 __put_user(host_st->st_gid, &target_st->st_gid);
4967 __put_user(host_st->st_rdev, &target_st->st_rdev);
4968 /* XXX: better use of kernel struct */
4969 __put_user(host_st->st_size, &target_st->st_size);
4970 __put_user(host_st->st_blksize, &target_st->st_blksize);
4971 __put_user(host_st->st_blocks, &target_st->st_blocks);
4972 __put_user(host_st->st_atime, &target_st->target_st_atime);
4973 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4974 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4975 unlock_user_struct(target_st, target_addr, 1);
4978 return 0;
4980 #endif
4982 /* ??? Using host futex calls even when target atomic operations
4983 are not really atomic probably breaks things. However implementing
4984 futexes locally would make futexes shared between multiple processes
4985 tricky. However they're probably useless because guest atomic
4986 operations won't work either. */
4987 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4988 target_ulong uaddr2, int val3)
4990 struct timespec ts, *pts;
4991 int base_op;
4993 /* ??? We assume FUTEX_* constants are the same on both host
4994 and target. */
4995 #ifdef FUTEX_CMD_MASK
4996 base_op = op & FUTEX_CMD_MASK;
4997 #else
4998 base_op = op;
4999 #endif
5000 switch (base_op) {
5001 case FUTEX_WAIT:
5002 case FUTEX_WAIT_BITSET:
5003 if (timeout) {
5004 pts = &ts;
5005 target_to_host_timespec(pts, timeout);
5006 } else {
5007 pts = NULL;
5009 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5010 pts, NULL, val3));
5011 case FUTEX_WAKE:
5012 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5013 case FUTEX_FD:
5014 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5015 case FUTEX_REQUEUE:
5016 case FUTEX_CMP_REQUEUE:
5017 case FUTEX_WAKE_OP:
5018 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5019 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5020 But the prototype takes a `struct timespec *'; insert casts
5021 to satisfy the compiler. We do not need to tswap TIMEOUT
5022 since it's not compared to guest memory. */
5023 pts = (struct timespec *)(uintptr_t) timeout;
5024 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5025 g2h(uaddr2),
5026 (base_op == FUTEX_CMP_REQUEUE
5027 ? tswap32(val3)
5028 : val3)));
5029 default:
5030 return -TARGET_ENOSYS;
5034 /* Map host to target signal numbers for the wait family of syscalls.
5035 Assume all other status bits are the same. */
5036 int host_to_target_waitstatus(int status)
5038 if (WIFSIGNALED(status)) {
5039 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5041 if (WIFSTOPPED(status)) {
5042 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5043 | (status & 0xff);
5045 return status;
5048 static int open_self_cmdline(void *cpu_env, int fd)
5050 int fd_orig = -1;
5051 bool word_skipped = false;
5053 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5054 if (fd_orig < 0) {
5055 return fd_orig;
5058 while (true) {
5059 ssize_t nb_read;
5060 char buf[128];
5061 char *cp_buf = buf;
5063 nb_read = read(fd_orig, buf, sizeof(buf));
5064 if (nb_read < 0) {
5065 fd_orig = close(fd_orig);
5066 return -1;
5067 } else if (nb_read == 0) {
5068 break;
5071 if (!word_skipped) {
5072 /* Skip the first string, which is the path to qemu-*-static
5073 instead of the actual command. */
5074 cp_buf = memchr(buf, 0, sizeof(buf));
5075 if (cp_buf) {
5076 /* Null byte found, skip one string */
5077 cp_buf++;
5078 nb_read -= cp_buf - buf;
5079 word_skipped = true;
5083 if (word_skipped) {
5084 if (write(fd, cp_buf, nb_read) != nb_read) {
5085 return -1;
5090 return close(fd_orig);
5093 static int open_self_maps(void *cpu_env, int fd)
5095 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5096 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5097 TaskState *ts = cpu->opaque;
5098 #endif
5099 FILE *fp;
5100 char *line = NULL;
5101 size_t len = 0;
5102 ssize_t read;
5104 fp = fopen("/proc/self/maps", "r");
5105 if (fp == NULL) {
5106 return -EACCES;
5109 while ((read = getline(&line, &len, fp)) != -1) {
5110 int fields, dev_maj, dev_min, inode;
5111 uint64_t min, max, offset;
5112 char flag_r, flag_w, flag_x, flag_p;
5113 char path[512] = "";
5114 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5115 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5116 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5118 if ((fields < 10) || (fields > 11)) {
5119 continue;
5121 if (!strncmp(path, "[stack]", 7)) {
5122 continue;
5124 if (h2g_valid(min) && h2g_valid(max)) {
5125 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5126 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5127 h2g(min), h2g(max), flag_r, flag_w,
5128 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5129 path[0] ? " " : "", path);
5133 free(line);
5134 fclose(fp);
5136 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5137 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5138 (unsigned long long)ts->info->stack_limit,
5139 (unsigned long long)(ts->info->start_stack +
5140 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5141 (unsigned long long)0);
5142 #endif
5144 return 0;
5147 static int open_self_stat(void *cpu_env, int fd)
5149 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5150 TaskState *ts = cpu->opaque;
5151 abi_ulong start_stack = ts->info->start_stack;
5152 int i;
5154 for (i = 0; i < 44; i++) {
5155 char buf[128];
5156 int len;
5157 uint64_t val = 0;
5159 if (i == 0) {
5160 /* pid */
5161 val = getpid();
5162 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5163 } else if (i == 1) {
5164 /* app name */
5165 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5166 } else if (i == 27) {
5167 /* stack bottom */
5168 val = start_stack;
5169 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5170 } else {
5171 /* for the rest, there is MasterCard */
5172 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5175 len = strlen(buf);
5176 if (write(fd, buf, len) != len) {
5177 return -1;
5181 return 0;
5184 static int open_self_auxv(void *cpu_env, int fd)
5186 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5187 TaskState *ts = cpu->opaque;
5188 abi_ulong auxv = ts->info->saved_auxv;
5189 abi_ulong len = ts->info->auxv_len;
5190 char *ptr;
5193 * Auxiliary vector is stored in target process stack.
5194 * read in whole auxv vector and copy it to file
5196 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5197 if (ptr != NULL) {
5198 while (len > 0) {
5199 ssize_t r;
5200 r = write(fd, ptr, len);
5201 if (r <= 0) {
5202 break;
5204 len -= r;
5205 ptr += r;
5207 lseek(fd, 0, SEEK_SET);
5208 unlock_user(ptr, auxv, len);
5211 return 0;
5214 static int is_proc_myself(const char *filename, const char *entry)
5216 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5217 filename += strlen("/proc/");
5218 if (!strncmp(filename, "self/", strlen("self/"))) {
5219 filename += strlen("self/");
5220 } else if (*filename >= '1' && *filename <= '9') {
5221 char myself[80];
5222 snprintf(myself, sizeof(myself), "%d/", getpid());
5223 if (!strncmp(filename, myself, strlen(myself))) {
5224 filename += strlen(myself);
5225 } else {
5226 return 0;
5228 } else {
5229 return 0;
5231 if (!strcmp(filename, entry)) {
5232 return 1;
5235 return 0;
5238 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5239 static int is_proc(const char *filename, const char *entry)
5241 return strcmp(filename, entry) == 0;
5244 static int open_net_route(void *cpu_env, int fd)
5246 FILE *fp;
5247 char *line = NULL;
5248 size_t len = 0;
5249 ssize_t read;
5251 fp = fopen("/proc/net/route", "r");
5252 if (fp == NULL) {
5253 return -EACCES;
5256 /* read header */
5258 read = getline(&line, &len, fp);
5259 dprintf(fd, "%s", line);
5261 /* read routes */
5263 while ((read = getline(&line, &len, fp)) != -1) {
5264 char iface[16];
5265 uint32_t dest, gw, mask;
5266 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5267 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5268 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5269 &mask, &mtu, &window, &irtt);
5270 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5271 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5272 metric, tswap32(mask), mtu, window, irtt);
5275 free(line);
5276 fclose(fp);
5278 return 0;
5280 #endif
5282 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5284 struct fake_open {
5285 const char *filename;
5286 int (*fill)(void *cpu_env, int fd);
5287 int (*cmp)(const char *s1, const char *s2);
5289 const struct fake_open *fake_open;
5290 static const struct fake_open fakes[] = {
5291 { "maps", open_self_maps, is_proc_myself },
5292 { "stat", open_self_stat, is_proc_myself },
5293 { "auxv", open_self_auxv, is_proc_myself },
5294 { "cmdline", open_self_cmdline, is_proc_myself },
5295 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5296 { "/proc/net/route", open_net_route, is_proc },
5297 #endif
5298 { NULL, NULL, NULL }
5301 if (is_proc_myself(pathname, "exe")) {
5302 int execfd = qemu_getauxval(AT_EXECFD);
5303 return execfd ? execfd : get_errno(open(exec_path, flags, mode));
5306 for (fake_open = fakes; fake_open->filename; fake_open++) {
5307 if (fake_open->cmp(pathname, fake_open->filename)) {
5308 break;
5312 if (fake_open->filename) {
5313 const char *tmpdir;
5314 char filename[PATH_MAX];
5315 int fd, r;
5317 /* create temporary file to map stat to */
5318 tmpdir = getenv("TMPDIR");
5319 if (!tmpdir)
5320 tmpdir = "/tmp";
5321 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5322 fd = mkstemp(filename);
5323 if (fd < 0) {
5324 return fd;
5326 unlink(filename);
5328 if ((r = fake_open->fill(cpu_env, fd))) {
5329 close(fd);
5330 return r;
5332 lseek(fd, 0, SEEK_SET);
5334 return fd;
5337 return get_errno(open(path(pathname), flags, mode));
5340 /* do_syscall() should always have a single exit point at the end so
5341 that actions, such as logging of syscall results, can be performed.
5342 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5343 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5344 abi_long arg2, abi_long arg3, abi_long arg4,
5345 abi_long arg5, abi_long arg6, abi_long arg7,
5346 abi_long arg8)
5348 CPUState *cpu = ENV_GET_CPU(cpu_env);
5349 abi_long ret;
5350 struct stat st;
5351 struct statfs stfs;
5352 void *p;
5354 #ifdef DEBUG
5355 gemu_log("syscall %d", num);
5356 #endif
5357 if(do_strace)
5358 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5360 switch(num) {
5361 case TARGET_NR_exit:
5362 /* In old applications this may be used to implement _exit(2).
5363 However in threaded applictions it is used for thread termination,
5364 and _exit_group is used for application termination.
5365 Do thread termination if we have more then one thread. */
5366 /* FIXME: This probably breaks if a signal arrives. We should probably
5367 be disabling signals. */
5368 if (CPU_NEXT(first_cpu)) {
5369 TaskState *ts;
5371 cpu_list_lock();
5372 /* Remove the CPU from the list. */
5373 QTAILQ_REMOVE(&cpus, cpu, node);
5374 cpu_list_unlock();
5375 ts = cpu->opaque;
5376 if (ts->child_tidptr) {
5377 put_user_u32(0, ts->child_tidptr);
5378 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5379 NULL, NULL, 0);
5381 thread_cpu = NULL;
5382 object_unref(OBJECT(cpu));
5383 g_free(ts);
5384 pthread_exit(NULL);
5386 #ifdef TARGET_GPROF
5387 _mcleanup();
5388 #endif
5389 gdb_exit(cpu_env, arg1);
5390 _exit(arg1);
5391 ret = 0; /* avoid warning */
5392 break;
5393 case TARGET_NR_read:
5394 if (arg3 == 0)
5395 ret = 0;
5396 else {
5397 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5398 goto efault;
5399 ret = get_errno(read(arg1, p, arg3));
5400 unlock_user(p, arg2, ret);
5402 break;
5403 case TARGET_NR_write:
5404 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5405 goto efault;
5406 ret = get_errno(write(arg1, p, arg3));
5407 unlock_user(p, arg2, 0);
5408 break;
5409 case TARGET_NR_open:
5410 if (!(p = lock_user_string(arg1)))
5411 goto efault;
5412 ret = get_errno(do_open(cpu_env, p,
5413 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5414 arg3));
5415 unlock_user(p, arg1, 0);
5416 break;
5417 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5418 case TARGET_NR_openat:
5419 if (!(p = lock_user_string(arg2)))
5420 goto efault;
5421 ret = get_errno(sys_openat(arg1,
5422 path(p),
5423 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5424 arg4));
5425 unlock_user(p, arg2, 0);
5426 break;
5427 #endif
5428 case TARGET_NR_close:
5429 ret = get_errno(close(arg1));
5430 break;
5431 case TARGET_NR_brk:
5432 ret = do_brk(arg1);
5433 break;
5434 case TARGET_NR_fork:
5435 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5436 break;
5437 #ifdef TARGET_NR_waitpid
5438 case TARGET_NR_waitpid:
5440 int status;
5441 ret = get_errno(waitpid(arg1, &status, arg3));
5442 if (!is_error(ret) && arg2 && ret
5443 && put_user_s32(host_to_target_waitstatus(status), arg2))
5444 goto efault;
5446 break;
5447 #endif
5448 #ifdef TARGET_NR_waitid
5449 case TARGET_NR_waitid:
5451 siginfo_t info;
5452 info.si_pid = 0;
5453 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5454 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5455 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5456 goto efault;
5457 host_to_target_siginfo(p, &info);
5458 unlock_user(p, arg3, sizeof(target_siginfo_t));
5461 break;
5462 #endif
5463 #ifdef TARGET_NR_creat /* not on alpha */
5464 case TARGET_NR_creat:
5465 if (!(p = lock_user_string(arg1)))
5466 goto efault;
5467 ret = get_errno(creat(p, arg2));
5468 unlock_user(p, arg1, 0);
5469 break;
5470 #endif
5471 case TARGET_NR_link:
5473 void * p2;
5474 p = lock_user_string(arg1);
5475 p2 = lock_user_string(arg2);
5476 if (!p || !p2)
5477 ret = -TARGET_EFAULT;
5478 else
5479 ret = get_errno(link(p, p2));
5480 unlock_user(p2, arg2, 0);
5481 unlock_user(p, arg1, 0);
5483 break;
5484 #if defined(TARGET_NR_linkat)
5485 case TARGET_NR_linkat:
5487 void * p2 = NULL;
5488 if (!arg2 || !arg4)
5489 goto efault;
5490 p = lock_user_string(arg2);
5491 p2 = lock_user_string(arg4);
5492 if (!p || !p2)
5493 ret = -TARGET_EFAULT;
5494 else
5495 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5496 unlock_user(p, arg2, 0);
5497 unlock_user(p2, arg4, 0);
5499 break;
5500 #endif
5501 case TARGET_NR_unlink:
5502 if (!(p = lock_user_string(arg1)))
5503 goto efault;
5504 ret = get_errno(unlink(p));
5505 unlock_user(p, arg1, 0);
5506 break;
5507 #if defined(TARGET_NR_unlinkat)
5508 case TARGET_NR_unlinkat:
5509 if (!(p = lock_user_string(arg2)))
5510 goto efault;
5511 ret = get_errno(unlinkat(arg1, p, arg3));
5512 unlock_user(p, arg2, 0);
5513 break;
5514 #endif
5515 case TARGET_NR_execve:
5517 char **argp, **envp;
5518 int argc, envc;
5519 abi_ulong gp;
5520 abi_ulong guest_argp;
5521 abi_ulong guest_envp;
5522 abi_ulong addr;
5523 char **q;
5524 int total_size = 0;
5526 argc = 0;
5527 guest_argp = arg2;
5528 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5529 if (get_user_ual(addr, gp))
5530 goto efault;
5531 if (!addr)
5532 break;
5533 argc++;
5535 envc = 0;
5536 guest_envp = arg3;
5537 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5538 if (get_user_ual(addr, gp))
5539 goto efault;
5540 if (!addr)
5541 break;
5542 envc++;
5545 argp = alloca((argc + 1) * sizeof(void *));
5546 envp = alloca((envc + 1) * sizeof(void *));
5548 for (gp = guest_argp, q = argp; gp;
5549 gp += sizeof(abi_ulong), q++) {
5550 if (get_user_ual(addr, gp))
5551 goto execve_efault;
5552 if (!addr)
5553 break;
5554 if (!(*q = lock_user_string(addr)))
5555 goto execve_efault;
5556 total_size += strlen(*q) + 1;
5558 *q = NULL;
5560 for (gp = guest_envp, q = envp; gp;
5561 gp += sizeof(abi_ulong), q++) {
5562 if (get_user_ual(addr, gp))
5563 goto execve_efault;
5564 if (!addr)
5565 break;
5566 if (!(*q = lock_user_string(addr)))
5567 goto execve_efault;
5568 total_size += strlen(*q) + 1;
5570 *q = NULL;
5572 /* This case will not be caught by the host's execve() if its
5573 page size is bigger than the target's. */
5574 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5575 ret = -TARGET_E2BIG;
5576 goto execve_end;
5578 if (!(p = lock_user_string(arg1)))
5579 goto execve_efault;
5580 ret = get_errno(execve(p, argp, envp));
5581 unlock_user(p, arg1, 0);
5583 goto execve_end;
5585 execve_efault:
5586 ret = -TARGET_EFAULT;
5588 execve_end:
5589 for (gp = guest_argp, q = argp; *q;
5590 gp += sizeof(abi_ulong), q++) {
5591 if (get_user_ual(addr, gp)
5592 || !addr)
5593 break;
5594 unlock_user(*q, addr, 0);
5596 for (gp = guest_envp, q = envp; *q;
5597 gp += sizeof(abi_ulong), q++) {
5598 if (get_user_ual(addr, gp)
5599 || !addr)
5600 break;
5601 unlock_user(*q, addr, 0);
5604 break;
5605 case TARGET_NR_chdir:
5606 if (!(p = lock_user_string(arg1)))
5607 goto efault;
5608 ret = get_errno(chdir(p));
5609 unlock_user(p, arg1, 0);
5610 break;
5611 #ifdef TARGET_NR_time
5612 case TARGET_NR_time:
5614 time_t host_time;
5615 ret = get_errno(time(&host_time));
5616 if (!is_error(ret)
5617 && arg1
5618 && put_user_sal(host_time, arg1))
5619 goto efault;
5621 break;
5622 #endif
5623 case TARGET_NR_mknod:
5624 if (!(p = lock_user_string(arg1)))
5625 goto efault;
5626 ret = get_errno(mknod(p, arg2, arg3));
5627 unlock_user(p, arg1, 0);
5628 break;
5629 #if defined(TARGET_NR_mknodat)
5630 case TARGET_NR_mknodat:
5631 if (!(p = lock_user_string(arg2)))
5632 goto efault;
5633 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5634 unlock_user(p, arg2, 0);
5635 break;
5636 #endif
5637 case TARGET_NR_chmod:
5638 if (!(p = lock_user_string(arg1)))
5639 goto efault;
5640 ret = get_errno(chmod(p, arg2));
5641 unlock_user(p, arg1, 0);
5642 break;
5643 #ifdef TARGET_NR_break
5644 case TARGET_NR_break:
5645 goto unimplemented;
5646 #endif
5647 #ifdef TARGET_NR_oldstat
5648 case TARGET_NR_oldstat:
5649 goto unimplemented;
5650 #endif
5651 case TARGET_NR_lseek:
5652 ret = get_errno(lseek(arg1, arg2, arg3));
5653 break;
5654 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5655 /* Alpha specific */
5656 case TARGET_NR_getxpid:
5657 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5658 ret = get_errno(getpid());
5659 break;
5660 #endif
5661 #ifdef TARGET_NR_getpid
5662 case TARGET_NR_getpid:
5663 ret = get_errno(getpid());
5664 break;
5665 #endif
5666 case TARGET_NR_mount:
5668 /* need to look at the data field */
5669 void *p2, *p3;
5671 if (arg1) {
5672 p = lock_user_string(arg1);
5673 if (!p) {
5674 goto efault;
5676 } else {
5677 p = NULL;
5680 p2 = lock_user_string(arg2);
5681 if (!p2) {
5682 if (arg1) {
5683 unlock_user(p, arg1, 0);
5685 goto efault;
5688 if (arg3) {
5689 p3 = lock_user_string(arg3);
5690 if (!p3) {
5691 if (arg1) {
5692 unlock_user(p, arg1, 0);
5694 unlock_user(p2, arg2, 0);
5695 goto efault;
5697 } else {
5698 p3 = NULL;
5701 /* FIXME - arg5 should be locked, but it isn't clear how to
5702 * do that since it's not guaranteed to be a NULL-terminated
5703 * string.
5705 if (!arg5) {
5706 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
5707 } else {
5708 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
5710 ret = get_errno(ret);
5712 if (arg1) {
5713 unlock_user(p, arg1, 0);
5715 unlock_user(p2, arg2, 0);
5716 if (arg3) {
5717 unlock_user(p3, arg3, 0);
5720 break;
5721 #ifdef TARGET_NR_umount
5722 case TARGET_NR_umount:
5723 if (!(p = lock_user_string(arg1)))
5724 goto efault;
5725 ret = get_errno(umount(p));
5726 unlock_user(p, arg1, 0);
5727 break;
5728 #endif
5729 #ifdef TARGET_NR_stime /* not on alpha */
5730 case TARGET_NR_stime:
5732 time_t host_time;
5733 if (get_user_sal(host_time, arg1))
5734 goto efault;
5735 ret = get_errno(stime(&host_time));
5737 break;
5738 #endif
5739 case TARGET_NR_ptrace:
5740 goto unimplemented;
5741 #ifdef TARGET_NR_alarm /* not on alpha */
5742 case TARGET_NR_alarm:
5743 ret = alarm(arg1);
5744 break;
5745 #endif
5746 #ifdef TARGET_NR_oldfstat
5747 case TARGET_NR_oldfstat:
5748 goto unimplemented;
5749 #endif
5750 #ifdef TARGET_NR_pause /* not on alpha */
5751 case TARGET_NR_pause:
5752 ret = get_errno(pause());
5753 break;
5754 #endif
5755 #ifdef TARGET_NR_utime
5756 case TARGET_NR_utime:
5758 struct utimbuf tbuf, *host_tbuf;
5759 struct target_utimbuf *target_tbuf;
5760 if (arg2) {
5761 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5762 goto efault;
5763 tbuf.actime = tswapal(target_tbuf->actime);
5764 tbuf.modtime = tswapal(target_tbuf->modtime);
5765 unlock_user_struct(target_tbuf, arg2, 0);
5766 host_tbuf = &tbuf;
5767 } else {
5768 host_tbuf = NULL;
5770 if (!(p = lock_user_string(arg1)))
5771 goto efault;
5772 ret = get_errno(utime(p, host_tbuf));
5773 unlock_user(p, arg1, 0);
5775 break;
5776 #endif
5777 case TARGET_NR_utimes:
5779 struct timeval *tvp, tv[2];
5780 if (arg2) {
5781 if (copy_from_user_timeval(&tv[0], arg2)
5782 || copy_from_user_timeval(&tv[1],
5783 arg2 + sizeof(struct target_timeval)))
5784 goto efault;
5785 tvp = tv;
5786 } else {
5787 tvp = NULL;
5789 if (!(p = lock_user_string(arg1)))
5790 goto efault;
5791 ret = get_errno(utimes(p, tvp));
5792 unlock_user(p, arg1, 0);
5794 break;
5795 #if defined(TARGET_NR_futimesat)
5796 case TARGET_NR_futimesat:
5798 struct timeval *tvp, tv[2];
5799 if (arg3) {
5800 if (copy_from_user_timeval(&tv[0], arg3)
5801 || copy_from_user_timeval(&tv[1],
5802 arg3 + sizeof(struct target_timeval)))
5803 goto efault;
5804 tvp = tv;
5805 } else {
5806 tvp = NULL;
5808 if (!(p = lock_user_string(arg2)))
5809 goto efault;
5810 ret = get_errno(futimesat(arg1, path(p), tvp));
5811 unlock_user(p, arg2, 0);
5813 break;
5814 #endif
5815 #ifdef TARGET_NR_stty
5816 case TARGET_NR_stty:
5817 goto unimplemented;
5818 #endif
5819 #ifdef TARGET_NR_gtty
5820 case TARGET_NR_gtty:
5821 goto unimplemented;
5822 #endif
5823 case TARGET_NR_access:
5824 if (!(p = lock_user_string(arg1)))
5825 goto efault;
5826 ret = get_errno(access(path(p), arg2));
5827 unlock_user(p, arg1, 0);
5828 break;
5829 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5830 case TARGET_NR_faccessat:
5831 if (!(p = lock_user_string(arg2)))
5832 goto efault;
5833 ret = get_errno(faccessat(arg1, p, arg3, 0));
5834 unlock_user(p, arg2, 0);
5835 break;
5836 #endif
5837 #ifdef TARGET_NR_nice /* not on alpha */
5838 case TARGET_NR_nice:
5839 ret = get_errno(nice(arg1));
5840 break;
5841 #endif
5842 #ifdef TARGET_NR_ftime
5843 case TARGET_NR_ftime:
5844 goto unimplemented;
5845 #endif
5846 case TARGET_NR_sync:
5847 sync();
5848 ret = 0;
5849 break;
5850 case TARGET_NR_kill:
5851 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5852 break;
5853 case TARGET_NR_rename:
5855 void *p2;
5856 p = lock_user_string(arg1);
5857 p2 = lock_user_string(arg2);
5858 if (!p || !p2)
5859 ret = -TARGET_EFAULT;
5860 else
5861 ret = get_errno(rename(p, p2));
5862 unlock_user(p2, arg2, 0);
5863 unlock_user(p, arg1, 0);
5865 break;
5866 #if defined(TARGET_NR_renameat)
5867 case TARGET_NR_renameat:
5869 void *p2;
5870 p = lock_user_string(arg2);
5871 p2 = lock_user_string(arg4);
5872 if (!p || !p2)
5873 ret = -TARGET_EFAULT;
5874 else
5875 ret = get_errno(renameat(arg1, p, arg3, p2));
5876 unlock_user(p2, arg4, 0);
5877 unlock_user(p, arg2, 0);
5879 break;
5880 #endif
5881 case TARGET_NR_mkdir:
5882 if (!(p = lock_user_string(arg1)))
5883 goto efault;
5884 ret = get_errno(mkdir(p, arg2));
5885 unlock_user(p, arg1, 0);
5886 break;
5887 #if defined(TARGET_NR_mkdirat)
5888 case TARGET_NR_mkdirat:
5889 if (!(p = lock_user_string(arg2)))
5890 goto efault;
5891 ret = get_errno(mkdirat(arg1, p, arg3));
5892 unlock_user(p, arg2, 0);
5893 break;
5894 #endif
5895 case TARGET_NR_rmdir:
5896 if (!(p = lock_user_string(arg1)))
5897 goto efault;
5898 ret = get_errno(rmdir(p));
5899 unlock_user(p, arg1, 0);
5900 break;
5901 case TARGET_NR_dup:
5902 ret = get_errno(dup(arg1));
5903 break;
5904 case TARGET_NR_pipe:
5905 ret = do_pipe(cpu_env, arg1, 0, 0);
5906 break;
5907 #ifdef TARGET_NR_pipe2
5908 case TARGET_NR_pipe2:
5909 ret = do_pipe(cpu_env, arg1,
5910 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5911 break;
5912 #endif
5913 case TARGET_NR_times:
5915 struct target_tms *tmsp;
5916 struct tms tms;
5917 ret = get_errno(times(&tms));
5918 if (arg1) {
5919 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5920 if (!tmsp)
5921 goto efault;
5922 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5923 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5924 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5925 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5927 if (!is_error(ret))
5928 ret = host_to_target_clock_t(ret);
5930 break;
5931 #ifdef TARGET_NR_prof
5932 case TARGET_NR_prof:
5933 goto unimplemented;
5934 #endif
5935 #ifdef TARGET_NR_signal
5936 case TARGET_NR_signal:
5937 goto unimplemented;
5938 #endif
5939 case TARGET_NR_acct:
5940 if (arg1 == 0) {
5941 ret = get_errno(acct(NULL));
5942 } else {
5943 if (!(p = lock_user_string(arg1)))
5944 goto efault;
5945 ret = get_errno(acct(path(p)));
5946 unlock_user(p, arg1, 0);
5948 break;
5949 #ifdef TARGET_NR_umount2
5950 case TARGET_NR_umount2:
5951 if (!(p = lock_user_string(arg1)))
5952 goto efault;
5953 ret = get_errno(umount2(p, arg2));
5954 unlock_user(p, arg1, 0);
5955 break;
5956 #endif
5957 #ifdef TARGET_NR_lock
5958 case TARGET_NR_lock:
5959 goto unimplemented;
5960 #endif
5961 case TARGET_NR_ioctl:
5962 ret = do_ioctl(arg1, arg2, arg3);
5963 break;
5964 case TARGET_NR_fcntl:
5965 ret = do_fcntl(arg1, arg2, arg3);
5966 break;
5967 #ifdef TARGET_NR_mpx
5968 case TARGET_NR_mpx:
5969 goto unimplemented;
5970 #endif
5971 case TARGET_NR_setpgid:
5972 ret = get_errno(setpgid(arg1, arg2));
5973 break;
5974 #ifdef TARGET_NR_ulimit
5975 case TARGET_NR_ulimit:
5976 goto unimplemented;
5977 #endif
5978 #ifdef TARGET_NR_oldolduname
5979 case TARGET_NR_oldolduname:
5980 goto unimplemented;
5981 #endif
5982 case TARGET_NR_umask:
5983 ret = get_errno(umask(arg1));
5984 break;
5985 case TARGET_NR_chroot:
5986 if (!(p = lock_user_string(arg1)))
5987 goto efault;
5988 ret = get_errno(chroot(p));
5989 unlock_user(p, arg1, 0);
5990 break;
5991 case TARGET_NR_ustat:
5992 goto unimplemented;
5993 case TARGET_NR_dup2:
5994 ret = get_errno(dup2(arg1, arg2));
5995 break;
5996 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5997 case TARGET_NR_dup3:
5998 ret = get_errno(dup3(arg1, arg2, arg3));
5999 break;
6000 #endif
6001 #ifdef TARGET_NR_getppid /* not on alpha */
6002 case TARGET_NR_getppid:
6003 ret = get_errno(getppid());
6004 break;
6005 #endif
6006 case TARGET_NR_getpgrp:
6007 ret = get_errno(getpgrp());
6008 break;
6009 case TARGET_NR_setsid:
6010 ret = get_errno(setsid());
6011 break;
6012 #ifdef TARGET_NR_sigaction
6013 case TARGET_NR_sigaction:
6015 #if defined(TARGET_ALPHA)
6016 struct target_sigaction act, oact, *pact = 0;
6017 struct target_old_sigaction *old_act;
6018 if (arg2) {
6019 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6020 goto efault;
6021 act._sa_handler = old_act->_sa_handler;
6022 target_siginitset(&act.sa_mask, old_act->sa_mask);
6023 act.sa_flags = old_act->sa_flags;
6024 act.sa_restorer = 0;
6025 unlock_user_struct(old_act, arg2, 0);
6026 pact = &act;
6028 ret = get_errno(do_sigaction(arg1, pact, &oact));
6029 if (!is_error(ret) && arg3) {
6030 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6031 goto efault;
6032 old_act->_sa_handler = oact._sa_handler;
6033 old_act->sa_mask = oact.sa_mask.sig[0];
6034 old_act->sa_flags = oact.sa_flags;
6035 unlock_user_struct(old_act, arg3, 1);
6037 #elif defined(TARGET_MIPS)
6038 struct target_sigaction act, oact, *pact, *old_act;
6040 if (arg2) {
6041 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6042 goto efault;
6043 act._sa_handler = old_act->_sa_handler;
6044 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6045 act.sa_flags = old_act->sa_flags;
6046 unlock_user_struct(old_act, arg2, 0);
6047 pact = &act;
6048 } else {
6049 pact = NULL;
6052 ret = get_errno(do_sigaction(arg1, pact, &oact));
6054 if (!is_error(ret) && arg3) {
6055 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6056 goto efault;
6057 old_act->_sa_handler = oact._sa_handler;
6058 old_act->sa_flags = oact.sa_flags;
6059 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6060 old_act->sa_mask.sig[1] = 0;
6061 old_act->sa_mask.sig[2] = 0;
6062 old_act->sa_mask.sig[3] = 0;
6063 unlock_user_struct(old_act, arg3, 1);
6065 #else
6066 struct target_old_sigaction *old_act;
6067 struct target_sigaction act, oact, *pact;
6068 if (arg2) {
6069 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6070 goto efault;
6071 act._sa_handler = old_act->_sa_handler;
6072 target_siginitset(&act.sa_mask, old_act->sa_mask);
6073 act.sa_flags = old_act->sa_flags;
6074 act.sa_restorer = old_act->sa_restorer;
6075 unlock_user_struct(old_act, arg2, 0);
6076 pact = &act;
6077 } else {
6078 pact = NULL;
6080 ret = get_errno(do_sigaction(arg1, pact, &oact));
6081 if (!is_error(ret) && arg3) {
6082 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6083 goto efault;
6084 old_act->_sa_handler = oact._sa_handler;
6085 old_act->sa_mask = oact.sa_mask.sig[0];
6086 old_act->sa_flags = oact.sa_flags;
6087 old_act->sa_restorer = oact.sa_restorer;
6088 unlock_user_struct(old_act, arg3, 1);
6090 #endif
6092 break;
6093 #endif
6094 case TARGET_NR_rt_sigaction:
6096 #if defined(TARGET_ALPHA)
6097 struct target_sigaction act, oact, *pact = 0;
6098 struct target_rt_sigaction *rt_act;
6099 /* ??? arg4 == sizeof(sigset_t). */
6100 if (arg2) {
6101 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6102 goto efault;
6103 act._sa_handler = rt_act->_sa_handler;
6104 act.sa_mask = rt_act->sa_mask;
6105 act.sa_flags = rt_act->sa_flags;
6106 act.sa_restorer = arg5;
6107 unlock_user_struct(rt_act, arg2, 0);
6108 pact = &act;
6110 ret = get_errno(do_sigaction(arg1, pact, &oact));
6111 if (!is_error(ret) && arg3) {
6112 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6113 goto efault;
6114 rt_act->_sa_handler = oact._sa_handler;
6115 rt_act->sa_mask = oact.sa_mask;
6116 rt_act->sa_flags = oact.sa_flags;
6117 unlock_user_struct(rt_act, arg3, 1);
6119 #else
6120 struct target_sigaction *act;
6121 struct target_sigaction *oact;
6123 if (arg2) {
6124 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6125 goto efault;
6126 } else
6127 act = NULL;
6128 if (arg3) {
6129 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6130 ret = -TARGET_EFAULT;
6131 goto rt_sigaction_fail;
6133 } else
6134 oact = NULL;
6135 ret = get_errno(do_sigaction(arg1, act, oact));
6136 rt_sigaction_fail:
6137 if (act)
6138 unlock_user_struct(act, arg2, 0);
6139 if (oact)
6140 unlock_user_struct(oact, arg3, 1);
6141 #endif
6143 break;
6144 #ifdef TARGET_NR_sgetmask /* not on alpha */
6145 case TARGET_NR_sgetmask:
6147 sigset_t cur_set;
6148 abi_ulong target_set;
6149 do_sigprocmask(0, NULL, &cur_set);
6150 host_to_target_old_sigset(&target_set, &cur_set);
6151 ret = target_set;
6153 break;
6154 #endif
6155 #ifdef TARGET_NR_ssetmask /* not on alpha */
6156 case TARGET_NR_ssetmask:
6158 sigset_t set, oset, cur_set;
6159 abi_ulong target_set = arg1;
6160 do_sigprocmask(0, NULL, &cur_set);
6161 target_to_host_old_sigset(&set, &target_set);
6162 sigorset(&set, &set, &cur_set);
6163 do_sigprocmask(SIG_SETMASK, &set, &oset);
6164 host_to_target_old_sigset(&target_set, &oset);
6165 ret = target_set;
6167 break;
6168 #endif
6169 #ifdef TARGET_NR_sigprocmask
6170 case TARGET_NR_sigprocmask:
6172 #if defined(TARGET_ALPHA)
6173 sigset_t set, oldset;
6174 abi_ulong mask;
6175 int how;
6177 switch (arg1) {
6178 case TARGET_SIG_BLOCK:
6179 how = SIG_BLOCK;
6180 break;
6181 case TARGET_SIG_UNBLOCK:
6182 how = SIG_UNBLOCK;
6183 break;
6184 case TARGET_SIG_SETMASK:
6185 how = SIG_SETMASK;
6186 break;
6187 default:
6188 ret = -TARGET_EINVAL;
6189 goto fail;
6191 mask = arg2;
6192 target_to_host_old_sigset(&set, &mask);
6194 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6195 if (!is_error(ret)) {
6196 host_to_target_old_sigset(&mask, &oldset);
6197 ret = mask;
6198 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6200 #else
6201 sigset_t set, oldset, *set_ptr;
6202 int how;
6204 if (arg2) {
6205 switch (arg1) {
6206 case TARGET_SIG_BLOCK:
6207 how = SIG_BLOCK;
6208 break;
6209 case TARGET_SIG_UNBLOCK:
6210 how = SIG_UNBLOCK;
6211 break;
6212 case TARGET_SIG_SETMASK:
6213 how = SIG_SETMASK;
6214 break;
6215 default:
6216 ret = -TARGET_EINVAL;
6217 goto fail;
6219 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6220 goto efault;
6221 target_to_host_old_sigset(&set, p);
6222 unlock_user(p, arg2, 0);
6223 set_ptr = &set;
6224 } else {
6225 how = 0;
6226 set_ptr = NULL;
6228 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6229 if (!is_error(ret) && arg3) {
6230 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6231 goto efault;
6232 host_to_target_old_sigset(p, &oldset);
6233 unlock_user(p, arg3, sizeof(target_sigset_t));
6235 #endif
6237 break;
6238 #endif
6239 case TARGET_NR_rt_sigprocmask:
6241 int how = arg1;
6242 sigset_t set, oldset, *set_ptr;
6244 if (arg2) {
6245 switch(how) {
6246 case TARGET_SIG_BLOCK:
6247 how = SIG_BLOCK;
6248 break;
6249 case TARGET_SIG_UNBLOCK:
6250 how = SIG_UNBLOCK;
6251 break;
6252 case TARGET_SIG_SETMASK:
6253 how = SIG_SETMASK;
6254 break;
6255 default:
6256 ret = -TARGET_EINVAL;
6257 goto fail;
6259 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6260 goto efault;
6261 target_to_host_sigset(&set, p);
6262 unlock_user(p, arg2, 0);
6263 set_ptr = &set;
6264 } else {
6265 how = 0;
6266 set_ptr = NULL;
6268 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6269 if (!is_error(ret) && arg3) {
6270 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6271 goto efault;
6272 host_to_target_sigset(p, &oldset);
6273 unlock_user(p, arg3, sizeof(target_sigset_t));
6276 break;
6277 #ifdef TARGET_NR_sigpending
6278 case TARGET_NR_sigpending:
6280 sigset_t set;
6281 ret = get_errno(sigpending(&set));
6282 if (!is_error(ret)) {
6283 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6284 goto efault;
6285 host_to_target_old_sigset(p, &set);
6286 unlock_user(p, arg1, sizeof(target_sigset_t));
6289 break;
6290 #endif
6291 case TARGET_NR_rt_sigpending:
6293 sigset_t set;
6294 ret = get_errno(sigpending(&set));
6295 if (!is_error(ret)) {
6296 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6297 goto efault;
6298 host_to_target_sigset(p, &set);
6299 unlock_user(p, arg1, sizeof(target_sigset_t));
6302 break;
6303 #ifdef TARGET_NR_sigsuspend
6304 case TARGET_NR_sigsuspend:
6306 sigset_t set;
6307 #if defined(TARGET_ALPHA)
6308 abi_ulong mask = arg1;
6309 target_to_host_old_sigset(&set, &mask);
6310 #else
6311 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6312 goto efault;
6313 target_to_host_old_sigset(&set, p);
6314 unlock_user(p, arg1, 0);
6315 #endif
6316 ret = get_errno(sigsuspend(&set));
6318 break;
6319 #endif
6320 case TARGET_NR_rt_sigsuspend:
6322 sigset_t set;
6323 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6324 goto efault;
6325 target_to_host_sigset(&set, p);
6326 unlock_user(p, arg1, 0);
6327 ret = get_errno(sigsuspend(&set));
6329 break;
6330 case TARGET_NR_rt_sigtimedwait:
6332 sigset_t set;
6333 struct timespec uts, *puts;
6334 siginfo_t uinfo;
6336 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6337 goto efault;
6338 target_to_host_sigset(&set, p);
6339 unlock_user(p, arg1, 0);
6340 if (arg3) {
6341 puts = &uts;
6342 target_to_host_timespec(puts, arg3);
6343 } else {
6344 puts = NULL;
6346 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6347 if (!is_error(ret)) {
6348 if (arg2) {
6349 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6351 if (!p) {
6352 goto efault;
6354 host_to_target_siginfo(p, &uinfo);
6355 unlock_user(p, arg2, sizeof(target_siginfo_t));
6357 ret = host_to_target_signal(ret);
6360 break;
6361 case TARGET_NR_rt_sigqueueinfo:
6363 siginfo_t uinfo;
6364 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6365 goto efault;
6366 target_to_host_siginfo(&uinfo, p);
6367 unlock_user(p, arg1, 0);
6368 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6370 break;
6371 #ifdef TARGET_NR_sigreturn
6372 case TARGET_NR_sigreturn:
6373 /* NOTE: ret is eax, so not transcoding must be done */
6374 ret = do_sigreturn(cpu_env);
6375 break;
6376 #endif
6377 case TARGET_NR_rt_sigreturn:
6378 /* NOTE: ret is eax, so not transcoding must be done */
6379 ret = do_rt_sigreturn(cpu_env);
6380 break;
6381 case TARGET_NR_sethostname:
6382 if (!(p = lock_user_string(arg1)))
6383 goto efault;
6384 ret = get_errno(sethostname(p, arg2));
6385 unlock_user(p, arg1, 0);
6386 break;
6387 case TARGET_NR_setrlimit:
6389 int resource = target_to_host_resource(arg1);
6390 struct target_rlimit *target_rlim;
6391 struct rlimit rlim;
6392 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6393 goto efault;
6394 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6395 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6396 unlock_user_struct(target_rlim, arg2, 0);
6397 ret = get_errno(setrlimit(resource, &rlim));
6399 break;
6400 case TARGET_NR_getrlimit:
6402 int resource = target_to_host_resource(arg1);
6403 struct target_rlimit *target_rlim;
6404 struct rlimit rlim;
6406 ret = get_errno(getrlimit(resource, &rlim));
6407 if (!is_error(ret)) {
6408 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6409 goto efault;
6410 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6411 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6412 unlock_user_struct(target_rlim, arg2, 1);
6415 break;
6416 case TARGET_NR_getrusage:
6418 struct rusage rusage;
6419 ret = get_errno(getrusage(arg1, &rusage));
6420 if (!is_error(ret)) {
6421 ret = host_to_target_rusage(arg2, &rusage);
6424 break;
6425 case TARGET_NR_gettimeofday:
6427 struct timeval tv;
6428 ret = get_errno(gettimeofday(&tv, NULL));
6429 if (!is_error(ret)) {
6430 if (copy_to_user_timeval(arg1, &tv))
6431 goto efault;
6434 break;
6435 case TARGET_NR_settimeofday:
6437 struct timeval tv, *ptv = NULL;
6438 struct timezone tz, *ptz = NULL;
6440 if (arg1) {
6441 if (copy_from_user_timeval(&tv, arg1)) {
6442 goto efault;
6444 ptv = &tv;
6447 if (arg2) {
6448 if (copy_from_user_timezone(&tz, arg2)) {
6449 goto efault;
6451 ptz = &tz;
6454 ret = get_errno(settimeofday(ptv, ptz));
6456 break;
6457 #if defined(TARGET_NR_select)
6458 case TARGET_NR_select:
6459 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6460 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6461 #else
6463 struct target_sel_arg_struct *sel;
6464 abi_ulong inp, outp, exp, tvp;
6465 long nsel;
6467 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6468 goto efault;
6469 nsel = tswapal(sel->n);
6470 inp = tswapal(sel->inp);
6471 outp = tswapal(sel->outp);
6472 exp = tswapal(sel->exp);
6473 tvp = tswapal(sel->tvp);
6474 unlock_user_struct(sel, arg1, 0);
6475 ret = do_select(nsel, inp, outp, exp, tvp);
6477 #endif
6478 break;
6479 #endif
6480 #ifdef TARGET_NR_pselect6
6481 case TARGET_NR_pselect6:
6483 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6484 fd_set rfds, wfds, efds;
6485 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6486 struct timespec ts, *ts_ptr;
6489 * The 6th arg is actually two args smashed together,
6490 * so we cannot use the C library.
6492 sigset_t set;
6493 struct {
6494 sigset_t *set;
6495 size_t size;
6496 } sig, *sig_ptr;
6498 abi_ulong arg_sigset, arg_sigsize, *arg7;
6499 target_sigset_t *target_sigset;
6501 n = arg1;
6502 rfd_addr = arg2;
6503 wfd_addr = arg3;
6504 efd_addr = arg4;
6505 ts_addr = arg5;
6507 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6508 if (ret) {
6509 goto fail;
6511 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6512 if (ret) {
6513 goto fail;
6515 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6516 if (ret) {
6517 goto fail;
6521 * This takes a timespec, and not a timeval, so we cannot
6522 * use the do_select() helper ...
6524 if (ts_addr) {
6525 if (target_to_host_timespec(&ts, ts_addr)) {
6526 goto efault;
6528 ts_ptr = &ts;
6529 } else {
6530 ts_ptr = NULL;
6533 /* Extract the two packed args for the sigset */
6534 if (arg6) {
6535 sig_ptr = &sig;
6536 sig.size = _NSIG / 8;
6538 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6539 if (!arg7) {
6540 goto efault;
6542 arg_sigset = tswapal(arg7[0]);
6543 arg_sigsize = tswapal(arg7[1]);
6544 unlock_user(arg7, arg6, 0);
6546 if (arg_sigset) {
6547 sig.set = &set;
6548 if (arg_sigsize != sizeof(*target_sigset)) {
6549 /* Like the kernel, we enforce correct size sigsets */
6550 ret = -TARGET_EINVAL;
6551 goto fail;
6553 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6554 sizeof(*target_sigset), 1);
6555 if (!target_sigset) {
6556 goto efault;
6558 target_to_host_sigset(&set, target_sigset);
6559 unlock_user(target_sigset, arg_sigset, 0);
6560 } else {
6561 sig.set = NULL;
6563 } else {
6564 sig_ptr = NULL;
6567 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6568 ts_ptr, sig_ptr));
6570 if (!is_error(ret)) {
6571 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6572 goto efault;
6573 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6574 goto efault;
6575 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6576 goto efault;
6578 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6579 goto efault;
6582 break;
6583 #endif
6584 case TARGET_NR_symlink:
6586 void *p2;
6587 p = lock_user_string(arg1);
6588 p2 = lock_user_string(arg2);
6589 if (!p || !p2)
6590 ret = -TARGET_EFAULT;
6591 else
6592 ret = get_errno(symlink(p, p2));
6593 unlock_user(p2, arg2, 0);
6594 unlock_user(p, arg1, 0);
6596 break;
6597 #if defined(TARGET_NR_symlinkat)
6598 case TARGET_NR_symlinkat:
6600 void *p2;
6601 p = lock_user_string(arg1);
6602 p2 = lock_user_string(arg3);
6603 if (!p || !p2)
6604 ret = -TARGET_EFAULT;
6605 else
6606 ret = get_errno(symlinkat(p, arg2, p2));
6607 unlock_user(p2, arg3, 0);
6608 unlock_user(p, arg1, 0);
6610 break;
6611 #endif
6612 #ifdef TARGET_NR_oldlstat
6613 case TARGET_NR_oldlstat:
6614 goto unimplemented;
6615 #endif
6616 case TARGET_NR_readlink:
6618 void *p2;
6619 p = lock_user_string(arg1);
6620 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6621 if (!p || !p2) {
6622 ret = -TARGET_EFAULT;
6623 } else if (is_proc_myself((const char *)p, "exe")) {
6624 char real[PATH_MAX], *temp;
6625 temp = realpath(exec_path, real);
6626 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6627 snprintf((char *)p2, arg3, "%s", real);
6628 } else {
6629 ret = get_errno(readlink(path(p), p2, arg3));
6631 unlock_user(p2, arg2, ret);
6632 unlock_user(p, arg1, 0);
6634 break;
6635 #if defined(TARGET_NR_readlinkat)
6636 case TARGET_NR_readlinkat:
6638 void *p2;
6639 p = lock_user_string(arg2);
6640 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6641 if (!p || !p2) {
6642 ret = -TARGET_EFAULT;
6643 } else if (is_proc_myself((const char *)p, "exe")) {
6644 char real[PATH_MAX], *temp;
6645 temp = realpath(exec_path, real);
6646 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6647 snprintf((char *)p2, arg4, "%s", real);
6648 } else {
6649 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6651 unlock_user(p2, arg3, ret);
6652 unlock_user(p, arg2, 0);
6654 break;
6655 #endif
6656 #ifdef TARGET_NR_uselib
6657 case TARGET_NR_uselib:
6658 goto unimplemented;
6659 #endif
6660 #ifdef TARGET_NR_swapon
6661 case TARGET_NR_swapon:
6662 if (!(p = lock_user_string(arg1)))
6663 goto efault;
6664 ret = get_errno(swapon(p, arg2));
6665 unlock_user(p, arg1, 0);
6666 break;
6667 #endif
6668 case TARGET_NR_reboot:
6669 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6670 /* arg4 must be ignored in all other cases */
6671 p = lock_user_string(arg4);
6672 if (!p) {
6673 goto efault;
6675 ret = get_errno(reboot(arg1, arg2, arg3, p));
6676 unlock_user(p, arg4, 0);
6677 } else {
6678 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6680 break;
6681 #ifdef TARGET_NR_readdir
6682 case TARGET_NR_readdir:
6683 goto unimplemented;
6684 #endif
6685 #ifdef TARGET_NR_mmap
6686 case TARGET_NR_mmap:
6687 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6688 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6689 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6690 || defined(TARGET_S390X)
6692 abi_ulong *v;
6693 abi_ulong v1, v2, v3, v4, v5, v6;
6694 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6695 goto efault;
6696 v1 = tswapal(v[0]);
6697 v2 = tswapal(v[1]);
6698 v3 = tswapal(v[2]);
6699 v4 = tswapal(v[3]);
6700 v5 = tswapal(v[4]);
6701 v6 = tswapal(v[5]);
6702 unlock_user(v, arg1, 0);
6703 ret = get_errno(target_mmap(v1, v2, v3,
6704 target_to_host_bitmask(v4, mmap_flags_tbl),
6705 v5, v6));
6707 #else
6708 ret = get_errno(target_mmap(arg1, arg2, arg3,
6709 target_to_host_bitmask(arg4, mmap_flags_tbl),
6710 arg5,
6711 arg6));
6712 #endif
6713 break;
6714 #endif
6715 #ifdef TARGET_NR_mmap2
6716 case TARGET_NR_mmap2:
6717 #ifndef MMAP_SHIFT
6718 #define MMAP_SHIFT 12
6719 #endif
6720 ret = get_errno(target_mmap(arg1, arg2, arg3,
6721 target_to_host_bitmask(arg4, mmap_flags_tbl),
6722 arg5,
6723 arg6 << MMAP_SHIFT));
6724 break;
6725 #endif
6726 case TARGET_NR_munmap:
6727 ret = get_errno(target_munmap(arg1, arg2));
6728 break;
6729 case TARGET_NR_mprotect:
6731 TaskState *ts = cpu->opaque;
6732 /* Special hack to detect libc making the stack executable. */
6733 if ((arg3 & PROT_GROWSDOWN)
6734 && arg1 >= ts->info->stack_limit
6735 && arg1 <= ts->info->start_stack) {
6736 arg3 &= ~PROT_GROWSDOWN;
6737 arg2 = arg2 + arg1 - ts->info->stack_limit;
6738 arg1 = ts->info->stack_limit;
6741 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6742 break;
6743 #ifdef TARGET_NR_mremap
6744 case TARGET_NR_mremap:
6745 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6746 break;
6747 #endif
6748 /* ??? msync/mlock/munlock are broken for softmmu. */
6749 #ifdef TARGET_NR_msync
6750 case TARGET_NR_msync:
6751 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6752 break;
6753 #endif
6754 #ifdef TARGET_NR_mlock
6755 case TARGET_NR_mlock:
6756 ret = get_errno(mlock(g2h(arg1), arg2));
6757 break;
6758 #endif
6759 #ifdef TARGET_NR_munlock
6760 case TARGET_NR_munlock:
6761 ret = get_errno(munlock(g2h(arg1), arg2));
6762 break;
6763 #endif
6764 #ifdef TARGET_NR_mlockall
6765 case TARGET_NR_mlockall:
6766 ret = get_errno(mlockall(arg1));
6767 break;
6768 #endif
6769 #ifdef TARGET_NR_munlockall
6770 case TARGET_NR_munlockall:
6771 ret = get_errno(munlockall());
6772 break;
6773 #endif
6774 case TARGET_NR_truncate:
6775 if (!(p = lock_user_string(arg1)))
6776 goto efault;
6777 ret = get_errno(truncate(p, arg2));
6778 unlock_user(p, arg1, 0);
6779 break;
6780 case TARGET_NR_ftruncate:
6781 ret = get_errno(ftruncate(arg1, arg2));
6782 break;
6783 case TARGET_NR_fchmod:
6784 ret = get_errno(fchmod(arg1, arg2));
6785 break;
6786 #if defined(TARGET_NR_fchmodat)
6787 case TARGET_NR_fchmodat:
6788 if (!(p = lock_user_string(arg2)))
6789 goto efault;
6790 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6791 unlock_user(p, arg2, 0);
6792 break;
6793 #endif
6794 case TARGET_NR_getpriority:
6795 /* Note that negative values are valid for getpriority, so we must
6796 differentiate based on errno settings. */
6797 errno = 0;
6798 ret = getpriority(arg1, arg2);
6799 if (ret == -1 && errno != 0) {
6800 ret = -host_to_target_errno(errno);
6801 break;
6803 #ifdef TARGET_ALPHA
6804 /* Return value is the unbiased priority. Signal no error. */
6805 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6806 #else
6807 /* Return value is a biased priority to avoid negative numbers. */
6808 ret = 20 - ret;
6809 #endif
6810 break;
6811 case TARGET_NR_setpriority:
6812 ret = get_errno(setpriority(arg1, arg2, arg3));
6813 break;
6814 #ifdef TARGET_NR_profil
6815 case TARGET_NR_profil:
6816 goto unimplemented;
6817 #endif
6818 case TARGET_NR_statfs:
6819 if (!(p = lock_user_string(arg1)))
6820 goto efault;
6821 ret = get_errno(statfs(path(p), &stfs));
6822 unlock_user(p, arg1, 0);
6823 convert_statfs:
6824 if (!is_error(ret)) {
6825 struct target_statfs *target_stfs;
6827 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6828 goto efault;
6829 __put_user(stfs.f_type, &target_stfs->f_type);
6830 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6831 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6832 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6833 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6834 __put_user(stfs.f_files, &target_stfs->f_files);
6835 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6836 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6837 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6838 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6839 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6840 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6841 unlock_user_struct(target_stfs, arg2, 1);
6843 break;
6844 case TARGET_NR_fstatfs:
6845 ret = get_errno(fstatfs(arg1, &stfs));
6846 goto convert_statfs;
6847 #ifdef TARGET_NR_statfs64
6848 case TARGET_NR_statfs64:
6849 if (!(p = lock_user_string(arg1)))
6850 goto efault;
6851 ret = get_errno(statfs(path(p), &stfs));
6852 unlock_user(p, arg1, 0);
6853 convert_statfs64:
6854 if (!is_error(ret)) {
6855 struct target_statfs64 *target_stfs;
6857 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6858 goto efault;
6859 __put_user(stfs.f_type, &target_stfs->f_type);
6860 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6861 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6862 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6863 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6864 __put_user(stfs.f_files, &target_stfs->f_files);
6865 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6866 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6867 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6868 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6869 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6870 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6871 unlock_user_struct(target_stfs, arg3, 1);
6873 break;
6874 case TARGET_NR_fstatfs64:
6875 ret = get_errno(fstatfs(arg1, &stfs));
6876 goto convert_statfs64;
6877 #endif
6878 #ifdef TARGET_NR_ioperm
6879 case TARGET_NR_ioperm:
6880 goto unimplemented;
6881 #endif
6882 #ifdef TARGET_NR_socketcall
6883 case TARGET_NR_socketcall:
6884 ret = do_socketcall(arg1, arg2);
6885 break;
6886 #endif
6887 #ifdef TARGET_NR_accept
6888 case TARGET_NR_accept:
6889 ret = do_accept4(arg1, arg2, arg3, 0);
6890 break;
6891 #endif
6892 #ifdef TARGET_NR_accept4
6893 case TARGET_NR_accept4:
6894 #ifdef CONFIG_ACCEPT4
6895 ret = do_accept4(arg1, arg2, arg3, arg4);
6896 #else
6897 goto unimplemented;
6898 #endif
6899 break;
6900 #endif
6901 #ifdef TARGET_NR_bind
6902 case TARGET_NR_bind:
6903 ret = do_bind(arg1, arg2, arg3);
6904 break;
6905 #endif
6906 #ifdef TARGET_NR_connect
6907 case TARGET_NR_connect:
6908 ret = do_connect(arg1, arg2, arg3);
6909 break;
6910 #endif
6911 #ifdef TARGET_NR_getpeername
6912 case TARGET_NR_getpeername:
6913 ret = do_getpeername(arg1, arg2, arg3);
6914 break;
6915 #endif
6916 #ifdef TARGET_NR_getsockname
6917 case TARGET_NR_getsockname:
6918 ret = do_getsockname(arg1, arg2, arg3);
6919 break;
6920 #endif
6921 #ifdef TARGET_NR_getsockopt
6922 case TARGET_NR_getsockopt:
6923 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6924 break;
6925 #endif
6926 #ifdef TARGET_NR_listen
6927 case TARGET_NR_listen:
6928 ret = get_errno(listen(arg1, arg2));
6929 break;
6930 #endif
6931 #ifdef TARGET_NR_recv
6932 case TARGET_NR_recv:
6933 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6934 break;
6935 #endif
6936 #ifdef TARGET_NR_recvfrom
6937 case TARGET_NR_recvfrom:
6938 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6939 break;
6940 #endif
6941 #ifdef TARGET_NR_recvmsg
6942 case TARGET_NR_recvmsg:
6943 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6944 break;
6945 #endif
6946 #ifdef TARGET_NR_send
6947 case TARGET_NR_send:
6948 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6949 break;
6950 #endif
6951 #ifdef TARGET_NR_sendmsg
6952 case TARGET_NR_sendmsg:
6953 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6954 break;
6955 #endif
6956 #ifdef TARGET_NR_sendmmsg
6957 case TARGET_NR_sendmmsg:
6958 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
6959 break;
6960 case TARGET_NR_recvmmsg:
6961 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
6962 break;
6963 #endif
6964 #ifdef TARGET_NR_sendto
6965 case TARGET_NR_sendto:
6966 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6967 break;
6968 #endif
6969 #ifdef TARGET_NR_shutdown
6970 case TARGET_NR_shutdown:
6971 ret = get_errno(shutdown(arg1, arg2));
6972 break;
6973 #endif
6974 #ifdef TARGET_NR_socket
6975 case TARGET_NR_socket:
6976 ret = do_socket(arg1, arg2, arg3);
6977 break;
6978 #endif
6979 #ifdef TARGET_NR_socketpair
6980 case TARGET_NR_socketpair:
6981 ret = do_socketpair(arg1, arg2, arg3, arg4);
6982 break;
6983 #endif
6984 #ifdef TARGET_NR_setsockopt
6985 case TARGET_NR_setsockopt:
6986 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6987 break;
6988 #endif
6990 case TARGET_NR_syslog:
6991 if (!(p = lock_user_string(arg2)))
6992 goto efault;
6993 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6994 unlock_user(p, arg2, 0);
6995 break;
6997 case TARGET_NR_setitimer:
6999 struct itimerval value, ovalue, *pvalue;
7001 if (arg2) {
7002 pvalue = &value;
7003 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7004 || copy_from_user_timeval(&pvalue->it_value,
7005 arg2 + sizeof(struct target_timeval)))
7006 goto efault;
7007 } else {
7008 pvalue = NULL;
7010 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7011 if (!is_error(ret) && arg3) {
7012 if (copy_to_user_timeval(arg3,
7013 &ovalue.it_interval)
7014 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7015 &ovalue.it_value))
7016 goto efault;
7019 break;
7020 case TARGET_NR_getitimer:
7022 struct itimerval value;
7024 ret = get_errno(getitimer(arg1, &value));
7025 if (!is_error(ret) && arg2) {
7026 if (copy_to_user_timeval(arg2,
7027 &value.it_interval)
7028 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7029 &value.it_value))
7030 goto efault;
7033 break;
7034 case TARGET_NR_stat:
7035 if (!(p = lock_user_string(arg1)))
7036 goto efault;
7037 ret = get_errno(stat(path(p), &st));
7038 unlock_user(p, arg1, 0);
7039 goto do_stat;
7040 case TARGET_NR_lstat:
7041 if (!(p = lock_user_string(arg1)))
7042 goto efault;
7043 ret = get_errno(lstat(path(p), &st));
7044 unlock_user(p, arg1, 0);
7045 goto do_stat;
7046 case TARGET_NR_fstat:
7048 ret = get_errno(fstat(arg1, &st));
7049 do_stat:
7050 if (!is_error(ret)) {
7051 struct target_stat *target_st;
7053 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7054 goto efault;
7055 memset(target_st, 0, sizeof(*target_st));
7056 __put_user(st.st_dev, &target_st->st_dev);
7057 __put_user(st.st_ino, &target_st->st_ino);
7058 __put_user(st.st_mode, &target_st->st_mode);
7059 __put_user(st.st_uid, &target_st->st_uid);
7060 __put_user(st.st_gid, &target_st->st_gid);
7061 __put_user(st.st_nlink, &target_st->st_nlink);
7062 __put_user(st.st_rdev, &target_st->st_rdev);
7063 __put_user(st.st_size, &target_st->st_size);
7064 __put_user(st.st_blksize, &target_st->st_blksize);
7065 __put_user(st.st_blocks, &target_st->st_blocks);
7066 __put_user(st.st_atime, &target_st->target_st_atime);
7067 __put_user(st.st_mtime, &target_st->target_st_mtime);
7068 __put_user(st.st_ctime, &target_st->target_st_ctime);
7069 unlock_user_struct(target_st, arg2, 1);
7072 break;
7073 #ifdef TARGET_NR_olduname
7074 case TARGET_NR_olduname:
7075 goto unimplemented;
7076 #endif
7077 #ifdef TARGET_NR_iopl
7078 case TARGET_NR_iopl:
7079 goto unimplemented;
7080 #endif
7081 case TARGET_NR_vhangup:
7082 ret = get_errno(vhangup());
7083 break;
7084 #ifdef TARGET_NR_idle
7085 case TARGET_NR_idle:
7086 goto unimplemented;
7087 #endif
7088 #ifdef TARGET_NR_syscall
7089 case TARGET_NR_syscall:
7090 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7091 arg6, arg7, arg8, 0);
7092 break;
7093 #endif
7094 case TARGET_NR_wait4:
7096 int status;
7097 abi_long status_ptr = arg2;
7098 struct rusage rusage, *rusage_ptr;
7099 abi_ulong target_rusage = arg4;
7100 abi_long rusage_err;
7101 if (target_rusage)
7102 rusage_ptr = &rusage;
7103 else
7104 rusage_ptr = NULL;
7105 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7106 if (!is_error(ret)) {
7107 if (status_ptr && ret) {
7108 status = host_to_target_waitstatus(status);
7109 if (put_user_s32(status, status_ptr))
7110 goto efault;
7112 if (target_rusage) {
7113 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7114 if (rusage_err) {
7115 ret = rusage_err;
7120 break;
7121 #ifdef TARGET_NR_swapoff
7122 case TARGET_NR_swapoff:
7123 if (!(p = lock_user_string(arg1)))
7124 goto efault;
7125 ret = get_errno(swapoff(p));
7126 unlock_user(p, arg1, 0);
7127 break;
7128 #endif
7129 case TARGET_NR_sysinfo:
7131 struct target_sysinfo *target_value;
7132 struct sysinfo value;
7133 ret = get_errno(sysinfo(&value));
7134 if (!is_error(ret) && arg1)
7136 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7137 goto efault;
7138 __put_user(value.uptime, &target_value->uptime);
7139 __put_user(value.loads[0], &target_value->loads[0]);
7140 __put_user(value.loads[1], &target_value->loads[1]);
7141 __put_user(value.loads[2], &target_value->loads[2]);
7142 __put_user(value.totalram, &target_value->totalram);
7143 __put_user(value.freeram, &target_value->freeram);
7144 __put_user(value.sharedram, &target_value->sharedram);
7145 __put_user(value.bufferram, &target_value->bufferram);
7146 __put_user(value.totalswap, &target_value->totalswap);
7147 __put_user(value.freeswap, &target_value->freeswap);
7148 __put_user(value.procs, &target_value->procs);
7149 __put_user(value.totalhigh, &target_value->totalhigh);
7150 __put_user(value.freehigh, &target_value->freehigh);
7151 __put_user(value.mem_unit, &target_value->mem_unit);
7152 unlock_user_struct(target_value, arg1, 1);
7155 break;
7156 #ifdef TARGET_NR_ipc
7157 case TARGET_NR_ipc:
7158 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7159 break;
7160 #endif
7161 #ifdef TARGET_NR_semget
7162 case TARGET_NR_semget:
7163 ret = get_errno(semget(arg1, arg2, arg3));
7164 break;
7165 #endif
7166 #ifdef TARGET_NR_semop
7167 case TARGET_NR_semop:
7168 ret = do_semop(arg1, arg2, arg3);
7169 break;
7170 #endif
7171 #ifdef TARGET_NR_semctl
7172 case TARGET_NR_semctl:
7173 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7174 break;
7175 #endif
7176 #ifdef TARGET_NR_msgctl
7177 case TARGET_NR_msgctl:
7178 ret = do_msgctl(arg1, arg2, arg3);
7179 break;
7180 #endif
7181 #ifdef TARGET_NR_msgget
7182 case TARGET_NR_msgget:
7183 ret = get_errno(msgget(arg1, arg2));
7184 break;
7185 #endif
7186 #ifdef TARGET_NR_msgrcv
7187 case TARGET_NR_msgrcv:
7188 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7189 break;
7190 #endif
7191 #ifdef TARGET_NR_msgsnd
7192 case TARGET_NR_msgsnd:
7193 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7194 break;
7195 #endif
7196 #ifdef TARGET_NR_shmget
7197 case TARGET_NR_shmget:
7198 ret = get_errno(shmget(arg1, arg2, arg3));
7199 break;
7200 #endif
7201 #ifdef TARGET_NR_shmctl
7202 case TARGET_NR_shmctl:
7203 ret = do_shmctl(arg1, arg2, arg3);
7204 break;
7205 #endif
7206 #ifdef TARGET_NR_shmat
7207 case TARGET_NR_shmat:
7208 ret = do_shmat(arg1, arg2, arg3);
7209 break;
7210 #endif
7211 #ifdef TARGET_NR_shmdt
7212 case TARGET_NR_shmdt:
7213 ret = do_shmdt(arg1);
7214 break;
7215 #endif
7216 case TARGET_NR_fsync:
7217 ret = get_errno(fsync(arg1));
7218 break;
7219 case TARGET_NR_clone:
7220 /* Linux manages to have three different orderings for its
7221 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7222 * match the kernel's CONFIG_CLONE_* settings.
7223 * Microblaze is further special in that it uses a sixth
7224 * implicit argument to clone for the TLS pointer.
7226 #if defined(TARGET_MICROBLAZE)
7227 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7228 #elif defined(TARGET_CLONE_BACKWARDS)
7229 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7230 #elif defined(TARGET_CLONE_BACKWARDS2)
7231 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7232 #else
7233 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7234 #endif
7235 break;
7236 #ifdef __NR_exit_group
7237 /* new thread calls */
7238 case TARGET_NR_exit_group:
7239 #ifdef TARGET_GPROF
7240 _mcleanup();
7241 #endif
7242 gdb_exit(cpu_env, arg1);
7243 ret = get_errno(exit_group(arg1));
7244 break;
7245 #endif
7246 case TARGET_NR_setdomainname:
7247 if (!(p = lock_user_string(arg1)))
7248 goto efault;
7249 ret = get_errno(setdomainname(p, arg2));
7250 unlock_user(p, arg1, 0);
7251 break;
7252 case TARGET_NR_uname:
7253 /* no need to transcode because we use the linux syscall */
7255 struct new_utsname * buf;
7257 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7258 goto efault;
7259 ret = get_errno(sys_uname(buf));
7260 if (!is_error(ret)) {
7261 /* Overrite the native machine name with whatever is being
7262 emulated. */
7263 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7264 /* Allow the user to override the reported release. */
7265 if (qemu_uname_release && *qemu_uname_release)
7266 strcpy (buf->release, qemu_uname_release);
7268 unlock_user_struct(buf, arg1, 1);
7270 break;
7271 #ifdef TARGET_I386
7272 case TARGET_NR_modify_ldt:
7273 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7274 break;
7275 #if !defined(TARGET_X86_64)
7276 case TARGET_NR_vm86old:
7277 goto unimplemented;
7278 case TARGET_NR_vm86:
7279 ret = do_vm86(cpu_env, arg1, arg2);
7280 break;
7281 #endif
7282 #endif
7283 case TARGET_NR_adjtimex:
7284 goto unimplemented;
7285 #ifdef TARGET_NR_create_module
7286 case TARGET_NR_create_module:
7287 #endif
7288 case TARGET_NR_init_module:
7289 case TARGET_NR_delete_module:
7290 #ifdef TARGET_NR_get_kernel_syms
7291 case TARGET_NR_get_kernel_syms:
7292 #endif
7293 goto unimplemented;
7294 case TARGET_NR_quotactl:
7295 goto unimplemented;
7296 case TARGET_NR_getpgid:
7297 ret = get_errno(getpgid(arg1));
7298 break;
7299 case TARGET_NR_fchdir:
7300 ret = get_errno(fchdir(arg1));
7301 break;
7302 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7303 case TARGET_NR_bdflush:
7304 goto unimplemented;
7305 #endif
7306 #ifdef TARGET_NR_sysfs
7307 case TARGET_NR_sysfs:
7308 goto unimplemented;
7309 #endif
7310 case TARGET_NR_personality:
7311 ret = get_errno(personality(arg1));
7312 break;
7313 #ifdef TARGET_NR_afs_syscall
7314 case TARGET_NR_afs_syscall:
7315 goto unimplemented;
7316 #endif
7317 #ifdef TARGET_NR__llseek /* Not on alpha */
7318 case TARGET_NR__llseek:
7320 int64_t res;
7321 #if !defined(__NR_llseek)
7322 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7323 if (res == -1) {
7324 ret = get_errno(res);
7325 } else {
7326 ret = 0;
7328 #else
7329 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7330 #endif
7331 if ((ret == 0) && put_user_s64(res, arg4)) {
7332 goto efault;
7335 break;
7336 #endif
7337 case TARGET_NR_getdents:
7338 #ifdef __NR_getdents
7339 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7341 struct target_dirent *target_dirp;
7342 struct linux_dirent *dirp;
7343 abi_long count = arg3;
7345 dirp = malloc(count);
7346 if (!dirp) {
7347 ret = -TARGET_ENOMEM;
7348 goto fail;
7351 ret = get_errno(sys_getdents(arg1, dirp, count));
7352 if (!is_error(ret)) {
7353 struct linux_dirent *de;
7354 struct target_dirent *tde;
7355 int len = ret;
7356 int reclen, treclen;
7357 int count1, tnamelen;
7359 count1 = 0;
7360 de = dirp;
7361 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7362 goto efault;
7363 tde = target_dirp;
7364 while (len > 0) {
7365 reclen = de->d_reclen;
7366 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7367 assert(tnamelen >= 0);
7368 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7369 assert(count1 + treclen <= count);
7370 tde->d_reclen = tswap16(treclen);
7371 tde->d_ino = tswapal(de->d_ino);
7372 tde->d_off = tswapal(de->d_off);
7373 memcpy(tde->d_name, de->d_name, tnamelen);
7374 de = (struct linux_dirent *)((char *)de + reclen);
7375 len -= reclen;
7376 tde = (struct target_dirent *)((char *)tde + treclen);
7377 count1 += treclen;
7379 ret = count1;
7380 unlock_user(target_dirp, arg2, ret);
7382 free(dirp);
7384 #else
7386 struct linux_dirent *dirp;
7387 abi_long count = arg3;
7389 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7390 goto efault;
7391 ret = get_errno(sys_getdents(arg1, dirp, count));
7392 if (!is_error(ret)) {
7393 struct linux_dirent *de;
7394 int len = ret;
7395 int reclen;
7396 de = dirp;
7397 while (len > 0) {
7398 reclen = de->d_reclen;
7399 if (reclen > len)
7400 break;
7401 de->d_reclen = tswap16(reclen);
7402 tswapls(&de->d_ino);
7403 tswapls(&de->d_off);
7404 de = (struct linux_dirent *)((char *)de + reclen);
7405 len -= reclen;
7408 unlock_user(dirp, arg2, ret);
7410 #endif
7411 #else
7412 /* Implement getdents in terms of getdents64 */
7414 struct linux_dirent64 *dirp;
7415 abi_long count = arg3;
7417 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7418 if (!dirp) {
7419 goto efault;
7421 ret = get_errno(sys_getdents64(arg1, dirp, count));
7422 if (!is_error(ret)) {
7423 /* Convert the dirent64 structs to target dirent. We do this
7424 * in-place, since we can guarantee that a target_dirent is no
7425 * larger than a dirent64; however this means we have to be
7426 * careful to read everything before writing in the new format.
7428 struct linux_dirent64 *de;
7429 struct target_dirent *tde;
7430 int len = ret;
7431 int tlen = 0;
7433 de = dirp;
7434 tde = (struct target_dirent *)dirp;
7435 while (len > 0) {
7436 int namelen, treclen;
7437 int reclen = de->d_reclen;
7438 uint64_t ino = de->d_ino;
7439 int64_t off = de->d_off;
7440 uint8_t type = de->d_type;
7442 namelen = strlen(de->d_name);
7443 treclen = offsetof(struct target_dirent, d_name)
7444 + namelen + 2;
7445 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7447 memmove(tde->d_name, de->d_name, namelen + 1);
7448 tde->d_ino = tswapal(ino);
7449 tde->d_off = tswapal(off);
7450 tde->d_reclen = tswap16(treclen);
7451 /* The target_dirent type is in what was formerly a padding
7452 * byte at the end of the structure:
7454 *(((char *)tde) + treclen - 1) = type;
7456 de = (struct linux_dirent64 *)((char *)de + reclen);
7457 tde = (struct target_dirent *)((char *)tde + treclen);
7458 len -= reclen;
7459 tlen += treclen;
7461 ret = tlen;
7463 unlock_user(dirp, arg2, ret);
7465 #endif
7466 break;
7467 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7468 case TARGET_NR_getdents64:
7470 struct linux_dirent64 *dirp;
7471 abi_long count = arg3;
7472 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7473 goto efault;
7474 ret = get_errno(sys_getdents64(arg1, dirp, count));
7475 if (!is_error(ret)) {
7476 struct linux_dirent64 *de;
7477 int len = ret;
7478 int reclen;
7479 de = dirp;
7480 while (len > 0) {
7481 reclen = de->d_reclen;
7482 if (reclen > len)
7483 break;
7484 de->d_reclen = tswap16(reclen);
7485 tswap64s((uint64_t *)&de->d_ino);
7486 tswap64s((uint64_t *)&de->d_off);
7487 de = (struct linux_dirent64 *)((char *)de + reclen);
7488 len -= reclen;
7491 unlock_user(dirp, arg2, ret);
7493 break;
7494 #endif /* TARGET_NR_getdents64 */
7495 #if defined(TARGET_NR__newselect)
7496 case TARGET_NR__newselect:
7497 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7498 break;
7499 #endif
7500 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7501 # ifdef TARGET_NR_poll
7502 case TARGET_NR_poll:
7503 # endif
7504 # ifdef TARGET_NR_ppoll
7505 case TARGET_NR_ppoll:
7506 # endif
7508 struct target_pollfd *target_pfd;
7509 unsigned int nfds = arg2;
7510 int timeout = arg3;
7511 struct pollfd *pfd;
7512 unsigned int i;
7514 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7515 if (!target_pfd)
7516 goto efault;
7518 pfd = alloca(sizeof(struct pollfd) * nfds);
7519 for(i = 0; i < nfds; i++) {
7520 pfd[i].fd = tswap32(target_pfd[i].fd);
7521 pfd[i].events = tswap16(target_pfd[i].events);
7524 # ifdef TARGET_NR_ppoll
7525 if (num == TARGET_NR_ppoll) {
7526 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7527 target_sigset_t *target_set;
7528 sigset_t _set, *set = &_set;
7530 if (arg3) {
7531 if (target_to_host_timespec(timeout_ts, arg3)) {
7532 unlock_user(target_pfd, arg1, 0);
7533 goto efault;
7535 } else {
7536 timeout_ts = NULL;
7539 if (arg4) {
7540 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7541 if (!target_set) {
7542 unlock_user(target_pfd, arg1, 0);
7543 goto efault;
7545 target_to_host_sigset(set, target_set);
7546 } else {
7547 set = NULL;
7550 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7552 if (!is_error(ret) && arg3) {
7553 host_to_target_timespec(arg3, timeout_ts);
7555 if (arg4) {
7556 unlock_user(target_set, arg4, 0);
7558 } else
7559 # endif
7560 ret = get_errno(poll(pfd, nfds, timeout));
7562 if (!is_error(ret)) {
7563 for(i = 0; i < nfds; i++) {
7564 target_pfd[i].revents = tswap16(pfd[i].revents);
7567 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7569 break;
7570 #endif
7571 case TARGET_NR_flock:
7572 /* NOTE: the flock constant seems to be the same for every
7573 Linux platform */
7574 ret = get_errno(flock(arg1, arg2));
7575 break;
7576 case TARGET_NR_readv:
7578 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7579 if (vec != NULL) {
7580 ret = get_errno(readv(arg1, vec, arg3));
7581 unlock_iovec(vec, arg2, arg3, 1);
7582 } else {
7583 ret = -host_to_target_errno(errno);
7586 break;
7587 case TARGET_NR_writev:
7589 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7590 if (vec != NULL) {
7591 ret = get_errno(writev(arg1, vec, arg3));
7592 unlock_iovec(vec, arg2, arg3, 0);
7593 } else {
7594 ret = -host_to_target_errno(errno);
7597 break;
7598 case TARGET_NR_getsid:
7599 ret = get_errno(getsid(arg1));
7600 break;
7601 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7602 case TARGET_NR_fdatasync:
7603 ret = get_errno(fdatasync(arg1));
7604 break;
7605 #endif
7606 case TARGET_NR__sysctl:
7607 /* We don't implement this, but ENOTDIR is always a safe
7608 return value. */
7609 ret = -TARGET_ENOTDIR;
7610 break;
7611 case TARGET_NR_sched_getaffinity:
7613 unsigned int mask_size;
7614 unsigned long *mask;
7617 * sched_getaffinity needs multiples of ulong, so need to take
7618 * care of mismatches between target ulong and host ulong sizes.
7620 if (arg2 & (sizeof(abi_ulong) - 1)) {
7621 ret = -TARGET_EINVAL;
7622 break;
7624 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7626 mask = alloca(mask_size);
7627 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7629 if (!is_error(ret)) {
7630 if (ret > arg2) {
7631 /* More data returned than the caller's buffer will fit.
7632 * This only happens if sizeof(abi_long) < sizeof(long)
7633 * and the caller passed us a buffer holding an odd number
7634 * of abi_longs. If the host kernel is actually using the
7635 * extra 4 bytes then fail EINVAL; otherwise we can just
7636 * ignore them and only copy the interesting part.
7638 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
7639 if (numcpus > arg2 * 8) {
7640 ret = -TARGET_EINVAL;
7641 break;
7643 ret = arg2;
7646 if (copy_to_user(arg3, mask, ret)) {
7647 goto efault;
7651 break;
7652 case TARGET_NR_sched_setaffinity:
7654 unsigned int mask_size;
7655 unsigned long *mask;
7658 * sched_setaffinity needs multiples of ulong, so need to take
7659 * care of mismatches between target ulong and host ulong sizes.
7661 if (arg2 & (sizeof(abi_ulong) - 1)) {
7662 ret = -TARGET_EINVAL;
7663 break;
7665 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7667 mask = alloca(mask_size);
7668 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7669 goto efault;
7671 memcpy(mask, p, arg2);
7672 unlock_user_struct(p, arg2, 0);
7674 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7676 break;
7677 case TARGET_NR_sched_setparam:
7679 struct sched_param *target_schp;
7680 struct sched_param schp;
7682 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7683 goto efault;
7684 schp.sched_priority = tswap32(target_schp->sched_priority);
7685 unlock_user_struct(target_schp, arg2, 0);
7686 ret = get_errno(sched_setparam(arg1, &schp));
7688 break;
7689 case TARGET_NR_sched_getparam:
7691 struct sched_param *target_schp;
7692 struct sched_param schp;
7693 ret = get_errno(sched_getparam(arg1, &schp));
7694 if (!is_error(ret)) {
7695 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7696 goto efault;
7697 target_schp->sched_priority = tswap32(schp.sched_priority);
7698 unlock_user_struct(target_schp, arg2, 1);
7701 break;
7702 case TARGET_NR_sched_setscheduler:
7704 struct sched_param *target_schp;
7705 struct sched_param schp;
7706 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7707 goto efault;
7708 schp.sched_priority = tswap32(target_schp->sched_priority);
7709 unlock_user_struct(target_schp, arg3, 0);
7710 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7712 break;
7713 case TARGET_NR_sched_getscheduler:
7714 ret = get_errno(sched_getscheduler(arg1));
7715 break;
7716 case TARGET_NR_sched_yield:
7717 ret = get_errno(sched_yield());
7718 break;
7719 case TARGET_NR_sched_get_priority_max:
7720 ret = get_errno(sched_get_priority_max(arg1));
7721 break;
7722 case TARGET_NR_sched_get_priority_min:
7723 ret = get_errno(sched_get_priority_min(arg1));
7724 break;
7725 case TARGET_NR_sched_rr_get_interval:
7727 struct timespec ts;
7728 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7729 if (!is_error(ret)) {
7730 host_to_target_timespec(arg2, &ts);
7733 break;
7734 case TARGET_NR_nanosleep:
7736 struct timespec req, rem;
7737 target_to_host_timespec(&req, arg1);
7738 ret = get_errno(nanosleep(&req, &rem));
7739 if (is_error(ret) && arg2) {
7740 host_to_target_timespec(arg2, &rem);
7743 break;
7744 #ifdef TARGET_NR_query_module
7745 case TARGET_NR_query_module:
7746 goto unimplemented;
7747 #endif
7748 #ifdef TARGET_NR_nfsservctl
7749 case TARGET_NR_nfsservctl:
7750 goto unimplemented;
7751 #endif
7752 case TARGET_NR_prctl:
7753 switch (arg1) {
7754 case PR_GET_PDEATHSIG:
7756 int deathsig;
7757 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7758 if (!is_error(ret) && arg2
7759 && put_user_ual(deathsig, arg2)) {
7760 goto efault;
7762 break;
7764 #ifdef PR_GET_NAME
7765 case PR_GET_NAME:
7767 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7768 if (!name) {
7769 goto efault;
7771 ret = get_errno(prctl(arg1, (unsigned long)name,
7772 arg3, arg4, arg5));
7773 unlock_user(name, arg2, 16);
7774 break;
7776 case PR_SET_NAME:
7778 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7779 if (!name) {
7780 goto efault;
7782 ret = get_errno(prctl(arg1, (unsigned long)name,
7783 arg3, arg4, arg5));
7784 unlock_user(name, arg2, 0);
7785 break;
7787 #endif
7788 default:
7789 /* Most prctl options have no pointer arguments */
7790 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7791 break;
7793 break;
7794 #ifdef TARGET_NR_arch_prctl
7795 case TARGET_NR_arch_prctl:
7796 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7797 ret = do_arch_prctl(cpu_env, arg1, arg2);
7798 break;
7799 #else
7800 goto unimplemented;
7801 #endif
7802 #endif
7803 #ifdef TARGET_NR_pread64
7804 case TARGET_NR_pread64:
7805 if (regpairs_aligned(cpu_env)) {
7806 arg4 = arg5;
7807 arg5 = arg6;
7809 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7810 goto efault;
7811 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7812 unlock_user(p, arg2, ret);
7813 break;
7814 case TARGET_NR_pwrite64:
7815 if (regpairs_aligned(cpu_env)) {
7816 arg4 = arg5;
7817 arg5 = arg6;
7819 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7820 goto efault;
7821 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7822 unlock_user(p, arg2, 0);
7823 break;
7824 #endif
7825 case TARGET_NR_getcwd:
7826 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7827 goto efault;
7828 ret = get_errno(sys_getcwd1(p, arg2));
7829 unlock_user(p, arg1, ret);
7830 break;
7831 case TARGET_NR_capget:
7832 case TARGET_NR_capset:
7834 struct target_user_cap_header *target_header;
7835 struct target_user_cap_data *target_data = NULL;
7836 struct __user_cap_header_struct header;
7837 struct __user_cap_data_struct data[2];
7838 struct __user_cap_data_struct *dataptr = NULL;
7839 int i, target_datalen;
7840 int data_items = 1;
7842 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
7843 goto efault;
7845 header.version = tswap32(target_header->version);
7846 header.pid = tswap32(target_header->pid);
7848 if (header.version != _LINUX_CAPABILITY_VERSION) {
7849 /* Version 2 and up takes pointer to two user_data structs */
7850 data_items = 2;
7853 target_datalen = sizeof(*target_data) * data_items;
7855 if (arg2) {
7856 if (num == TARGET_NR_capget) {
7857 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
7858 } else {
7859 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
7861 if (!target_data) {
7862 unlock_user_struct(target_header, arg1, 0);
7863 goto efault;
7866 if (num == TARGET_NR_capset) {
7867 for (i = 0; i < data_items; i++) {
7868 data[i].effective = tswap32(target_data[i].effective);
7869 data[i].permitted = tswap32(target_data[i].permitted);
7870 data[i].inheritable = tswap32(target_data[i].inheritable);
7874 dataptr = data;
7877 if (num == TARGET_NR_capget) {
7878 ret = get_errno(capget(&header, dataptr));
7879 } else {
7880 ret = get_errno(capset(&header, dataptr));
7883 /* The kernel always updates version for both capget and capset */
7884 target_header->version = tswap32(header.version);
7885 unlock_user_struct(target_header, arg1, 1);
7887 if (arg2) {
7888 if (num == TARGET_NR_capget) {
7889 for (i = 0; i < data_items; i++) {
7890 target_data[i].effective = tswap32(data[i].effective);
7891 target_data[i].permitted = tswap32(data[i].permitted);
7892 target_data[i].inheritable = tswap32(data[i].inheritable);
7894 unlock_user(target_data, arg2, target_datalen);
7895 } else {
7896 unlock_user(target_data, arg2, 0);
7899 break;
7901 case TARGET_NR_sigaltstack:
7902 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7903 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7904 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7905 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7906 break;
7907 #else
7908 goto unimplemented;
7909 #endif
7911 #ifdef CONFIG_SENDFILE
7912 case TARGET_NR_sendfile:
7914 off_t *offp = NULL;
7915 off_t off;
7916 if (arg3) {
7917 ret = get_user_sal(off, arg3);
7918 if (is_error(ret)) {
7919 break;
7921 offp = &off;
7923 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7924 if (!is_error(ret) && arg3) {
7925 abi_long ret2 = put_user_sal(off, arg3);
7926 if (is_error(ret2)) {
7927 ret = ret2;
7930 break;
7932 #ifdef TARGET_NR_sendfile64
7933 case TARGET_NR_sendfile64:
7935 off_t *offp = NULL;
7936 off_t off;
7937 if (arg3) {
7938 ret = get_user_s64(off, arg3);
7939 if (is_error(ret)) {
7940 break;
7942 offp = &off;
7944 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7945 if (!is_error(ret) && arg3) {
7946 abi_long ret2 = put_user_s64(off, arg3);
7947 if (is_error(ret2)) {
7948 ret = ret2;
7951 break;
7953 #endif
7954 #else
7955 case TARGET_NR_sendfile:
7956 #ifdef TARGET_NR_sendfile64
7957 case TARGET_NR_sendfile64:
7958 #endif
7959 goto unimplemented;
7960 #endif
7962 #ifdef TARGET_NR_getpmsg
7963 case TARGET_NR_getpmsg:
7964 goto unimplemented;
7965 #endif
7966 #ifdef TARGET_NR_putpmsg
7967 case TARGET_NR_putpmsg:
7968 goto unimplemented;
7969 #endif
7970 #ifdef TARGET_NR_vfork
7971 case TARGET_NR_vfork:
7972 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7973 0, 0, 0, 0));
7974 break;
7975 #endif
7976 #ifdef TARGET_NR_ugetrlimit
7977 case TARGET_NR_ugetrlimit:
7979 struct rlimit rlim;
7980 int resource = target_to_host_resource(arg1);
7981 ret = get_errno(getrlimit(resource, &rlim));
7982 if (!is_error(ret)) {
7983 struct target_rlimit *target_rlim;
7984 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7985 goto efault;
7986 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7987 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7988 unlock_user_struct(target_rlim, arg2, 1);
7990 break;
7992 #endif
7993 #ifdef TARGET_NR_truncate64
7994 case TARGET_NR_truncate64:
7995 if (!(p = lock_user_string(arg1)))
7996 goto efault;
7997 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7998 unlock_user(p, arg1, 0);
7999 break;
8000 #endif
8001 #ifdef TARGET_NR_ftruncate64
8002 case TARGET_NR_ftruncate64:
8003 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8004 break;
8005 #endif
8006 #ifdef TARGET_NR_stat64
8007 case TARGET_NR_stat64:
8008 if (!(p = lock_user_string(arg1)))
8009 goto efault;
8010 ret = get_errno(stat(path(p), &st));
8011 unlock_user(p, arg1, 0);
8012 if (!is_error(ret))
8013 ret = host_to_target_stat64(cpu_env, arg2, &st);
8014 break;
8015 #endif
8016 #ifdef TARGET_NR_lstat64
8017 case TARGET_NR_lstat64:
8018 if (!(p = lock_user_string(arg1)))
8019 goto efault;
8020 ret = get_errno(lstat(path(p), &st));
8021 unlock_user(p, arg1, 0);
8022 if (!is_error(ret))
8023 ret = host_to_target_stat64(cpu_env, arg2, &st);
8024 break;
8025 #endif
8026 #ifdef TARGET_NR_fstat64
8027 case TARGET_NR_fstat64:
8028 ret = get_errno(fstat(arg1, &st));
8029 if (!is_error(ret))
8030 ret = host_to_target_stat64(cpu_env, arg2, &st);
8031 break;
8032 #endif
8033 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8034 #ifdef TARGET_NR_fstatat64
8035 case TARGET_NR_fstatat64:
8036 #endif
8037 #ifdef TARGET_NR_newfstatat
8038 case TARGET_NR_newfstatat:
8039 #endif
8040 if (!(p = lock_user_string(arg2)))
8041 goto efault;
8042 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8043 if (!is_error(ret))
8044 ret = host_to_target_stat64(cpu_env, arg3, &st);
8045 break;
8046 #endif
8047 case TARGET_NR_lchown:
8048 if (!(p = lock_user_string(arg1)))
8049 goto efault;
8050 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8051 unlock_user(p, arg1, 0);
8052 break;
8053 #ifdef TARGET_NR_getuid
8054 case TARGET_NR_getuid:
8055 ret = get_errno(high2lowuid(getuid()));
8056 break;
8057 #endif
8058 #ifdef TARGET_NR_getgid
8059 case TARGET_NR_getgid:
8060 ret = get_errno(high2lowgid(getgid()));
8061 break;
8062 #endif
8063 #ifdef TARGET_NR_geteuid
8064 case TARGET_NR_geteuid:
8065 ret = get_errno(high2lowuid(geteuid()));
8066 break;
8067 #endif
8068 #ifdef TARGET_NR_getegid
8069 case TARGET_NR_getegid:
8070 ret = get_errno(high2lowgid(getegid()));
8071 break;
8072 #endif
8073 case TARGET_NR_setreuid:
8074 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8075 break;
8076 case TARGET_NR_setregid:
8077 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8078 break;
8079 case TARGET_NR_getgroups:
8081 int gidsetsize = arg1;
8082 target_id *target_grouplist;
8083 gid_t *grouplist;
8084 int i;
8086 grouplist = alloca(gidsetsize * sizeof(gid_t));
8087 ret = get_errno(getgroups(gidsetsize, grouplist));
8088 if (gidsetsize == 0)
8089 break;
8090 if (!is_error(ret)) {
8091 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8092 if (!target_grouplist)
8093 goto efault;
8094 for(i = 0;i < ret; i++)
8095 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8096 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8099 break;
8100 case TARGET_NR_setgroups:
8102 int gidsetsize = arg1;
8103 target_id *target_grouplist;
8104 gid_t *grouplist = NULL;
8105 int i;
8106 if (gidsetsize) {
8107 grouplist = alloca(gidsetsize * sizeof(gid_t));
8108 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8109 if (!target_grouplist) {
8110 ret = -TARGET_EFAULT;
8111 goto fail;
8113 for (i = 0; i < gidsetsize; i++) {
8114 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8116 unlock_user(target_grouplist, arg2, 0);
8118 ret = get_errno(setgroups(gidsetsize, grouplist));
8120 break;
8121 case TARGET_NR_fchown:
8122 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8123 break;
8124 #if defined(TARGET_NR_fchownat)
8125 case TARGET_NR_fchownat:
8126 if (!(p = lock_user_string(arg2)))
8127 goto efault;
8128 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8129 low2highgid(arg4), arg5));
8130 unlock_user(p, arg2, 0);
8131 break;
8132 #endif
8133 #ifdef TARGET_NR_setresuid
8134 case TARGET_NR_setresuid:
8135 ret = get_errno(setresuid(low2highuid(arg1),
8136 low2highuid(arg2),
8137 low2highuid(arg3)));
8138 break;
8139 #endif
8140 #ifdef TARGET_NR_getresuid
8141 case TARGET_NR_getresuid:
8143 uid_t ruid, euid, suid;
8144 ret = get_errno(getresuid(&ruid, &euid, &suid));
8145 if (!is_error(ret)) {
8146 if (put_user_id(high2lowuid(ruid), arg1)
8147 || put_user_id(high2lowuid(euid), arg2)
8148 || put_user_id(high2lowuid(suid), arg3))
8149 goto efault;
8152 break;
8153 #endif
8154 #ifdef TARGET_NR_getresgid
8155 case TARGET_NR_setresgid:
8156 ret = get_errno(setresgid(low2highgid(arg1),
8157 low2highgid(arg2),
8158 low2highgid(arg3)));
8159 break;
8160 #endif
8161 #ifdef TARGET_NR_getresgid
8162 case TARGET_NR_getresgid:
8164 gid_t rgid, egid, sgid;
8165 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8166 if (!is_error(ret)) {
8167 if (put_user_id(high2lowgid(rgid), arg1)
8168 || put_user_id(high2lowgid(egid), arg2)
8169 || put_user_id(high2lowgid(sgid), arg3))
8170 goto efault;
8173 break;
8174 #endif
8175 case TARGET_NR_chown:
8176 if (!(p = lock_user_string(arg1)))
8177 goto efault;
8178 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8179 unlock_user(p, arg1, 0);
8180 break;
8181 case TARGET_NR_setuid:
8182 ret = get_errno(setuid(low2highuid(arg1)));
8183 break;
8184 case TARGET_NR_setgid:
8185 ret = get_errno(setgid(low2highgid(arg1)));
8186 break;
8187 case TARGET_NR_setfsuid:
8188 ret = get_errno(setfsuid(arg1));
8189 break;
8190 case TARGET_NR_setfsgid:
8191 ret = get_errno(setfsgid(arg1));
8192 break;
8194 #ifdef TARGET_NR_lchown32
8195 case TARGET_NR_lchown32:
8196 if (!(p = lock_user_string(arg1)))
8197 goto efault;
8198 ret = get_errno(lchown(p, arg2, arg3));
8199 unlock_user(p, arg1, 0);
8200 break;
8201 #endif
8202 #ifdef TARGET_NR_getuid32
8203 case TARGET_NR_getuid32:
8204 ret = get_errno(getuid());
8205 break;
8206 #endif
8208 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8209 /* Alpha specific */
8210 case TARGET_NR_getxuid:
8212 uid_t euid;
8213 euid=geteuid();
8214 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8216 ret = get_errno(getuid());
8217 break;
8218 #endif
8219 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8220 /* Alpha specific */
8221 case TARGET_NR_getxgid:
8223 uid_t egid;
8224 egid=getegid();
8225 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8227 ret = get_errno(getgid());
8228 break;
8229 #endif
8230 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8231 /* Alpha specific */
8232 case TARGET_NR_osf_getsysinfo:
8233 ret = -TARGET_EOPNOTSUPP;
8234 switch (arg1) {
8235 case TARGET_GSI_IEEE_FP_CONTROL:
8237 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8239 /* Copied from linux ieee_fpcr_to_swcr. */
8240 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8241 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8242 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8243 | SWCR_TRAP_ENABLE_DZE
8244 | SWCR_TRAP_ENABLE_OVF);
8245 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8246 | SWCR_TRAP_ENABLE_INE);
8247 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8248 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8250 if (put_user_u64 (swcr, arg2))
8251 goto efault;
8252 ret = 0;
8254 break;
8256 /* case GSI_IEEE_STATE_AT_SIGNAL:
8257 -- Not implemented in linux kernel.
8258 case GSI_UACPROC:
8259 -- Retrieves current unaligned access state; not much used.
8260 case GSI_PROC_TYPE:
8261 -- Retrieves implver information; surely not used.
8262 case GSI_GET_HWRPB:
8263 -- Grabs a copy of the HWRPB; surely not used.
8266 break;
8267 #endif
8268 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8269 /* Alpha specific */
8270 case TARGET_NR_osf_setsysinfo:
8271 ret = -TARGET_EOPNOTSUPP;
8272 switch (arg1) {
8273 case TARGET_SSI_IEEE_FP_CONTROL:
8275 uint64_t swcr, fpcr, orig_fpcr;
8277 if (get_user_u64 (swcr, arg2)) {
8278 goto efault;
8280 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8281 fpcr = orig_fpcr & FPCR_DYN_MASK;
8283 /* Copied from linux ieee_swcr_to_fpcr. */
8284 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8285 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8286 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8287 | SWCR_TRAP_ENABLE_DZE
8288 | SWCR_TRAP_ENABLE_OVF)) << 48;
8289 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8290 | SWCR_TRAP_ENABLE_INE)) << 57;
8291 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8292 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8294 cpu_alpha_store_fpcr(cpu_env, fpcr);
8295 ret = 0;
8297 break;
8299 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8301 uint64_t exc, fpcr, orig_fpcr;
8302 int si_code;
8304 if (get_user_u64(exc, arg2)) {
8305 goto efault;
8308 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8310 /* We only add to the exception status here. */
8311 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8313 cpu_alpha_store_fpcr(cpu_env, fpcr);
8314 ret = 0;
8316 /* Old exceptions are not signaled. */
8317 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8319 /* If any exceptions set by this call,
8320 and are unmasked, send a signal. */
8321 si_code = 0;
8322 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8323 si_code = TARGET_FPE_FLTRES;
8325 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8326 si_code = TARGET_FPE_FLTUND;
8328 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8329 si_code = TARGET_FPE_FLTOVF;
8331 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8332 si_code = TARGET_FPE_FLTDIV;
8334 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8335 si_code = TARGET_FPE_FLTINV;
8337 if (si_code != 0) {
8338 target_siginfo_t info;
8339 info.si_signo = SIGFPE;
8340 info.si_errno = 0;
8341 info.si_code = si_code;
8342 info._sifields._sigfault._addr
8343 = ((CPUArchState *)cpu_env)->pc;
8344 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8347 break;
8349 /* case SSI_NVPAIRS:
8350 -- Used with SSIN_UACPROC to enable unaligned accesses.
8351 case SSI_IEEE_STATE_AT_SIGNAL:
8352 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8353 -- Not implemented in linux kernel
8356 break;
8357 #endif
8358 #ifdef TARGET_NR_osf_sigprocmask
8359 /* Alpha specific. */
8360 case TARGET_NR_osf_sigprocmask:
8362 abi_ulong mask;
8363 int how;
8364 sigset_t set, oldset;
8366 switch(arg1) {
8367 case TARGET_SIG_BLOCK:
8368 how = SIG_BLOCK;
8369 break;
8370 case TARGET_SIG_UNBLOCK:
8371 how = SIG_UNBLOCK;
8372 break;
8373 case TARGET_SIG_SETMASK:
8374 how = SIG_SETMASK;
8375 break;
8376 default:
8377 ret = -TARGET_EINVAL;
8378 goto fail;
8380 mask = arg2;
8381 target_to_host_old_sigset(&set, &mask);
8382 do_sigprocmask(how, &set, &oldset);
8383 host_to_target_old_sigset(&mask, &oldset);
8384 ret = mask;
8386 break;
8387 #endif
8389 #ifdef TARGET_NR_getgid32
8390 case TARGET_NR_getgid32:
8391 ret = get_errno(getgid());
8392 break;
8393 #endif
8394 #ifdef TARGET_NR_geteuid32
8395 case TARGET_NR_geteuid32:
8396 ret = get_errno(geteuid());
8397 break;
8398 #endif
8399 #ifdef TARGET_NR_getegid32
8400 case TARGET_NR_getegid32:
8401 ret = get_errno(getegid());
8402 break;
8403 #endif
8404 #ifdef TARGET_NR_setreuid32
8405 case TARGET_NR_setreuid32:
8406 ret = get_errno(setreuid(arg1, arg2));
8407 break;
8408 #endif
8409 #ifdef TARGET_NR_setregid32
8410 case TARGET_NR_setregid32:
8411 ret = get_errno(setregid(arg1, arg2));
8412 break;
8413 #endif
8414 #ifdef TARGET_NR_getgroups32
8415 case TARGET_NR_getgroups32:
8417 int gidsetsize = arg1;
8418 uint32_t *target_grouplist;
8419 gid_t *grouplist;
8420 int i;
8422 grouplist = alloca(gidsetsize * sizeof(gid_t));
8423 ret = get_errno(getgroups(gidsetsize, grouplist));
8424 if (gidsetsize == 0)
8425 break;
8426 if (!is_error(ret)) {
8427 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8428 if (!target_grouplist) {
8429 ret = -TARGET_EFAULT;
8430 goto fail;
8432 for(i = 0;i < ret; i++)
8433 target_grouplist[i] = tswap32(grouplist[i]);
8434 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8437 break;
8438 #endif
8439 #ifdef TARGET_NR_setgroups32
8440 case TARGET_NR_setgroups32:
8442 int gidsetsize = arg1;
8443 uint32_t *target_grouplist;
8444 gid_t *grouplist;
8445 int i;
8447 grouplist = alloca(gidsetsize * sizeof(gid_t));
8448 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8449 if (!target_grouplist) {
8450 ret = -TARGET_EFAULT;
8451 goto fail;
8453 for(i = 0;i < gidsetsize; i++)
8454 grouplist[i] = tswap32(target_grouplist[i]);
8455 unlock_user(target_grouplist, arg2, 0);
8456 ret = get_errno(setgroups(gidsetsize, grouplist));
8458 break;
8459 #endif
8460 #ifdef TARGET_NR_fchown32
8461 case TARGET_NR_fchown32:
8462 ret = get_errno(fchown(arg1, arg2, arg3));
8463 break;
8464 #endif
8465 #ifdef TARGET_NR_setresuid32
8466 case TARGET_NR_setresuid32:
8467 ret = get_errno(setresuid(arg1, arg2, arg3));
8468 break;
8469 #endif
8470 #ifdef TARGET_NR_getresuid32
8471 case TARGET_NR_getresuid32:
8473 uid_t ruid, euid, suid;
8474 ret = get_errno(getresuid(&ruid, &euid, &suid));
8475 if (!is_error(ret)) {
8476 if (put_user_u32(ruid, arg1)
8477 || put_user_u32(euid, arg2)
8478 || put_user_u32(suid, arg3))
8479 goto efault;
8482 break;
8483 #endif
8484 #ifdef TARGET_NR_setresgid32
8485 case TARGET_NR_setresgid32:
8486 ret = get_errno(setresgid(arg1, arg2, arg3));
8487 break;
8488 #endif
8489 #ifdef TARGET_NR_getresgid32
8490 case TARGET_NR_getresgid32:
8492 gid_t rgid, egid, sgid;
8493 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8494 if (!is_error(ret)) {
8495 if (put_user_u32(rgid, arg1)
8496 || put_user_u32(egid, arg2)
8497 || put_user_u32(sgid, arg3))
8498 goto efault;
8501 break;
8502 #endif
8503 #ifdef TARGET_NR_chown32
8504 case TARGET_NR_chown32:
8505 if (!(p = lock_user_string(arg1)))
8506 goto efault;
8507 ret = get_errno(chown(p, arg2, arg3));
8508 unlock_user(p, arg1, 0);
8509 break;
8510 #endif
8511 #ifdef TARGET_NR_setuid32
8512 case TARGET_NR_setuid32:
8513 ret = get_errno(setuid(arg1));
8514 break;
8515 #endif
8516 #ifdef TARGET_NR_setgid32
8517 case TARGET_NR_setgid32:
8518 ret = get_errno(setgid(arg1));
8519 break;
8520 #endif
8521 #ifdef TARGET_NR_setfsuid32
8522 case TARGET_NR_setfsuid32:
8523 ret = get_errno(setfsuid(arg1));
8524 break;
8525 #endif
8526 #ifdef TARGET_NR_setfsgid32
8527 case TARGET_NR_setfsgid32:
8528 ret = get_errno(setfsgid(arg1));
8529 break;
8530 #endif
8532 case TARGET_NR_pivot_root:
8533 goto unimplemented;
8534 #ifdef TARGET_NR_mincore
8535 case TARGET_NR_mincore:
8537 void *a;
8538 ret = -TARGET_EFAULT;
8539 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8540 goto efault;
8541 if (!(p = lock_user_string(arg3)))
8542 goto mincore_fail;
8543 ret = get_errno(mincore(a, arg2, p));
8544 unlock_user(p, arg3, ret);
8545 mincore_fail:
8546 unlock_user(a, arg1, 0);
8548 break;
8549 #endif
8550 #ifdef TARGET_NR_arm_fadvise64_64
8551 case TARGET_NR_arm_fadvise64_64:
8554 * arm_fadvise64_64 looks like fadvise64_64 but
8555 * with different argument order
8557 abi_long temp;
8558 temp = arg3;
8559 arg3 = arg4;
8560 arg4 = temp;
8562 #endif
8563 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8564 #ifdef TARGET_NR_fadvise64_64
8565 case TARGET_NR_fadvise64_64:
8566 #endif
8567 #ifdef TARGET_NR_fadvise64
8568 case TARGET_NR_fadvise64:
8569 #endif
8570 #ifdef TARGET_S390X
8571 switch (arg4) {
8572 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8573 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8574 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8575 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8576 default: break;
8578 #endif
8579 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8580 break;
8581 #endif
8582 #ifdef TARGET_NR_madvise
8583 case TARGET_NR_madvise:
8584 /* A straight passthrough may not be safe because qemu sometimes
8585 turns private file-backed mappings into anonymous mappings.
8586 This will break MADV_DONTNEED.
8587 This is a hint, so ignoring and returning success is ok. */
8588 ret = get_errno(0);
8589 break;
8590 #endif
8591 #if TARGET_ABI_BITS == 32
8592 case TARGET_NR_fcntl64:
8594 int cmd;
8595 struct flock64 fl;
8596 struct target_flock64 *target_fl;
8597 #ifdef TARGET_ARM
8598 struct target_eabi_flock64 *target_efl;
8599 #endif
8601 cmd = target_to_host_fcntl_cmd(arg2);
8602 if (cmd == -TARGET_EINVAL) {
8603 ret = cmd;
8604 break;
8607 switch(arg2) {
8608 case TARGET_F_GETLK64:
8609 #ifdef TARGET_ARM
8610 if (((CPUARMState *)cpu_env)->eabi) {
8611 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8612 goto efault;
8613 fl.l_type = tswap16(target_efl->l_type);
8614 fl.l_whence = tswap16(target_efl->l_whence);
8615 fl.l_start = tswap64(target_efl->l_start);
8616 fl.l_len = tswap64(target_efl->l_len);
8617 fl.l_pid = tswap32(target_efl->l_pid);
8618 unlock_user_struct(target_efl, arg3, 0);
8619 } else
8620 #endif
8622 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8623 goto efault;
8624 fl.l_type = tswap16(target_fl->l_type);
8625 fl.l_whence = tswap16(target_fl->l_whence);
8626 fl.l_start = tswap64(target_fl->l_start);
8627 fl.l_len = tswap64(target_fl->l_len);
8628 fl.l_pid = tswap32(target_fl->l_pid);
8629 unlock_user_struct(target_fl, arg3, 0);
8631 ret = get_errno(fcntl(arg1, cmd, &fl));
8632 if (ret == 0) {
8633 #ifdef TARGET_ARM
8634 if (((CPUARMState *)cpu_env)->eabi) {
8635 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8636 goto efault;
8637 target_efl->l_type = tswap16(fl.l_type);
8638 target_efl->l_whence = tswap16(fl.l_whence);
8639 target_efl->l_start = tswap64(fl.l_start);
8640 target_efl->l_len = tswap64(fl.l_len);
8641 target_efl->l_pid = tswap32(fl.l_pid);
8642 unlock_user_struct(target_efl, arg3, 1);
8643 } else
8644 #endif
8646 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8647 goto efault;
8648 target_fl->l_type = tswap16(fl.l_type);
8649 target_fl->l_whence = tswap16(fl.l_whence);
8650 target_fl->l_start = tswap64(fl.l_start);
8651 target_fl->l_len = tswap64(fl.l_len);
8652 target_fl->l_pid = tswap32(fl.l_pid);
8653 unlock_user_struct(target_fl, arg3, 1);
8656 break;
8658 case TARGET_F_SETLK64:
8659 case TARGET_F_SETLKW64:
8660 #ifdef TARGET_ARM
8661 if (((CPUARMState *)cpu_env)->eabi) {
8662 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8663 goto efault;
8664 fl.l_type = tswap16(target_efl->l_type);
8665 fl.l_whence = tswap16(target_efl->l_whence);
8666 fl.l_start = tswap64(target_efl->l_start);
8667 fl.l_len = tswap64(target_efl->l_len);
8668 fl.l_pid = tswap32(target_efl->l_pid);
8669 unlock_user_struct(target_efl, arg3, 0);
8670 } else
8671 #endif
8673 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8674 goto efault;
8675 fl.l_type = tswap16(target_fl->l_type);
8676 fl.l_whence = tswap16(target_fl->l_whence);
8677 fl.l_start = tswap64(target_fl->l_start);
8678 fl.l_len = tswap64(target_fl->l_len);
8679 fl.l_pid = tswap32(target_fl->l_pid);
8680 unlock_user_struct(target_fl, arg3, 0);
8682 ret = get_errno(fcntl(arg1, cmd, &fl));
8683 break;
8684 default:
8685 ret = do_fcntl(arg1, arg2, arg3);
8686 break;
8688 break;
8690 #endif
8691 #ifdef TARGET_NR_cacheflush
8692 case TARGET_NR_cacheflush:
8693 /* self-modifying code is handled automatically, so nothing needed */
8694 ret = 0;
8695 break;
8696 #endif
8697 #ifdef TARGET_NR_security
8698 case TARGET_NR_security:
8699 goto unimplemented;
8700 #endif
8701 #ifdef TARGET_NR_getpagesize
8702 case TARGET_NR_getpagesize:
8703 ret = TARGET_PAGE_SIZE;
8704 break;
8705 #endif
8706 case TARGET_NR_gettid:
8707 ret = get_errno(gettid());
8708 break;
8709 #ifdef TARGET_NR_readahead
8710 case TARGET_NR_readahead:
8711 #if TARGET_ABI_BITS == 32
8712 if (regpairs_aligned(cpu_env)) {
8713 arg2 = arg3;
8714 arg3 = arg4;
8715 arg4 = arg5;
8717 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8718 #else
8719 ret = get_errno(readahead(arg1, arg2, arg3));
8720 #endif
8721 break;
8722 #endif
8723 #ifdef CONFIG_ATTR
8724 #ifdef TARGET_NR_setxattr
8725 case TARGET_NR_listxattr:
8726 case TARGET_NR_llistxattr:
8728 void *p, *b = 0;
8729 if (arg2) {
8730 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8731 if (!b) {
8732 ret = -TARGET_EFAULT;
8733 break;
8736 p = lock_user_string(arg1);
8737 if (p) {
8738 if (num == TARGET_NR_listxattr) {
8739 ret = get_errno(listxattr(p, b, arg3));
8740 } else {
8741 ret = get_errno(llistxattr(p, b, arg3));
8743 } else {
8744 ret = -TARGET_EFAULT;
8746 unlock_user(p, arg1, 0);
8747 unlock_user(b, arg2, arg3);
8748 break;
8750 case TARGET_NR_flistxattr:
8752 void *b = 0;
8753 if (arg2) {
8754 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8755 if (!b) {
8756 ret = -TARGET_EFAULT;
8757 break;
8760 ret = get_errno(flistxattr(arg1, b, arg3));
8761 unlock_user(b, arg2, arg3);
8762 break;
8764 case TARGET_NR_setxattr:
8765 case TARGET_NR_lsetxattr:
8767 void *p, *n, *v = 0;
8768 if (arg3) {
8769 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8770 if (!v) {
8771 ret = -TARGET_EFAULT;
8772 break;
8775 p = lock_user_string(arg1);
8776 n = lock_user_string(arg2);
8777 if (p && n) {
8778 if (num == TARGET_NR_setxattr) {
8779 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8780 } else {
8781 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8783 } else {
8784 ret = -TARGET_EFAULT;
8786 unlock_user(p, arg1, 0);
8787 unlock_user(n, arg2, 0);
8788 unlock_user(v, arg3, 0);
8790 break;
8791 case TARGET_NR_fsetxattr:
8793 void *n, *v = 0;
8794 if (arg3) {
8795 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8796 if (!v) {
8797 ret = -TARGET_EFAULT;
8798 break;
8801 n = lock_user_string(arg2);
8802 if (n) {
8803 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8804 } else {
8805 ret = -TARGET_EFAULT;
8807 unlock_user(n, arg2, 0);
8808 unlock_user(v, arg3, 0);
8810 break;
8811 case TARGET_NR_getxattr:
8812 case TARGET_NR_lgetxattr:
8814 void *p, *n, *v = 0;
8815 if (arg3) {
8816 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8817 if (!v) {
8818 ret = -TARGET_EFAULT;
8819 break;
8822 p = lock_user_string(arg1);
8823 n = lock_user_string(arg2);
8824 if (p && n) {
8825 if (num == TARGET_NR_getxattr) {
8826 ret = get_errno(getxattr(p, n, v, arg4));
8827 } else {
8828 ret = get_errno(lgetxattr(p, n, v, arg4));
8830 } else {
8831 ret = -TARGET_EFAULT;
8833 unlock_user(p, arg1, 0);
8834 unlock_user(n, arg2, 0);
8835 unlock_user(v, arg3, arg4);
8837 break;
8838 case TARGET_NR_fgetxattr:
8840 void *n, *v = 0;
8841 if (arg3) {
8842 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8843 if (!v) {
8844 ret = -TARGET_EFAULT;
8845 break;
8848 n = lock_user_string(arg2);
8849 if (n) {
8850 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8851 } else {
8852 ret = -TARGET_EFAULT;
8854 unlock_user(n, arg2, 0);
8855 unlock_user(v, arg3, arg4);
8857 break;
8858 case TARGET_NR_removexattr:
8859 case TARGET_NR_lremovexattr:
8861 void *p, *n;
8862 p = lock_user_string(arg1);
8863 n = lock_user_string(arg2);
8864 if (p && n) {
8865 if (num == TARGET_NR_removexattr) {
8866 ret = get_errno(removexattr(p, n));
8867 } else {
8868 ret = get_errno(lremovexattr(p, n));
8870 } else {
8871 ret = -TARGET_EFAULT;
8873 unlock_user(p, arg1, 0);
8874 unlock_user(n, arg2, 0);
8876 break;
8877 case TARGET_NR_fremovexattr:
8879 void *n;
8880 n = lock_user_string(arg2);
8881 if (n) {
8882 ret = get_errno(fremovexattr(arg1, n));
8883 } else {
8884 ret = -TARGET_EFAULT;
8886 unlock_user(n, arg2, 0);
8888 break;
8889 #endif
8890 #endif /* CONFIG_ATTR */
8891 #ifdef TARGET_NR_set_thread_area
8892 case TARGET_NR_set_thread_area:
8893 #if defined(TARGET_MIPS)
8894 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
8895 ret = 0;
8896 break;
8897 #elif defined(TARGET_CRIS)
8898 if (arg1 & 0xff)
8899 ret = -TARGET_EINVAL;
8900 else {
8901 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8902 ret = 0;
8904 break;
8905 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8906 ret = do_set_thread_area(cpu_env, arg1);
8907 break;
8908 #elif defined(TARGET_M68K)
8910 TaskState *ts = cpu->opaque;
8911 ts->tp_value = arg1;
8912 ret = 0;
8913 break;
8915 #else
8916 goto unimplemented_nowarn;
8917 #endif
8918 #endif
8919 #ifdef TARGET_NR_get_thread_area
8920 case TARGET_NR_get_thread_area:
8921 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8922 ret = do_get_thread_area(cpu_env, arg1);
8923 break;
8924 #elif defined(TARGET_M68K)
8926 TaskState *ts = cpu->opaque;
8927 ret = ts->tp_value;
8928 break;
8930 #else
8931 goto unimplemented_nowarn;
8932 #endif
8933 #endif
8934 #ifdef TARGET_NR_getdomainname
8935 case TARGET_NR_getdomainname:
8936 goto unimplemented_nowarn;
8937 #endif
8939 #ifdef TARGET_NR_clock_gettime
8940 case TARGET_NR_clock_gettime:
8942 struct timespec ts;
8943 ret = get_errno(clock_gettime(arg1, &ts));
8944 if (!is_error(ret)) {
8945 host_to_target_timespec(arg2, &ts);
8947 break;
8949 #endif
8950 #ifdef TARGET_NR_clock_getres
8951 case TARGET_NR_clock_getres:
8953 struct timespec ts;
8954 ret = get_errno(clock_getres(arg1, &ts));
8955 if (!is_error(ret)) {
8956 host_to_target_timespec(arg2, &ts);
8958 break;
8960 #endif
8961 #ifdef TARGET_NR_clock_nanosleep
8962 case TARGET_NR_clock_nanosleep:
8964 struct timespec ts;
8965 target_to_host_timespec(&ts, arg3);
8966 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8967 if (arg4)
8968 host_to_target_timespec(arg4, &ts);
8969 break;
8971 #endif
8973 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8974 case TARGET_NR_set_tid_address:
8975 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8976 break;
8977 #endif
8979 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8980 case TARGET_NR_tkill:
8981 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8982 break;
8983 #endif
8985 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8986 case TARGET_NR_tgkill:
8987 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8988 target_to_host_signal(arg3)));
8989 break;
8990 #endif
8992 #ifdef TARGET_NR_set_robust_list
8993 case TARGET_NR_set_robust_list:
8994 case TARGET_NR_get_robust_list:
8995 /* The ABI for supporting robust futexes has userspace pass
8996 * the kernel a pointer to a linked list which is updated by
8997 * userspace after the syscall; the list is walked by the kernel
8998 * when the thread exits. Since the linked list in QEMU guest
8999 * memory isn't a valid linked list for the host and we have
9000 * no way to reliably intercept the thread-death event, we can't
9001 * support these. Silently return ENOSYS so that guest userspace
9002 * falls back to a non-robust futex implementation (which should
9003 * be OK except in the corner case of the guest crashing while
9004 * holding a mutex that is shared with another process via
9005 * shared memory).
9007 goto unimplemented_nowarn;
9008 #endif
9010 #if defined(TARGET_NR_utimensat)
9011 case TARGET_NR_utimensat:
9013 struct timespec *tsp, ts[2];
9014 if (!arg3) {
9015 tsp = NULL;
9016 } else {
9017 target_to_host_timespec(ts, arg3);
9018 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9019 tsp = ts;
9021 if (!arg2)
9022 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9023 else {
9024 if (!(p = lock_user_string(arg2))) {
9025 ret = -TARGET_EFAULT;
9026 goto fail;
9028 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9029 unlock_user(p, arg2, 0);
9032 break;
9033 #endif
9034 case TARGET_NR_futex:
9035 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9036 break;
9037 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9038 case TARGET_NR_inotify_init:
9039 ret = get_errno(sys_inotify_init());
9040 break;
9041 #endif
9042 #ifdef CONFIG_INOTIFY1
9043 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9044 case TARGET_NR_inotify_init1:
9045 ret = get_errno(sys_inotify_init1(arg1));
9046 break;
9047 #endif
9048 #endif
9049 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9050 case TARGET_NR_inotify_add_watch:
9051 p = lock_user_string(arg2);
9052 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9053 unlock_user(p, arg2, 0);
9054 break;
9055 #endif
9056 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9057 case TARGET_NR_inotify_rm_watch:
9058 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9059 break;
9060 #endif
9062 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9063 case TARGET_NR_mq_open:
9065 struct mq_attr posix_mq_attr;
9067 p = lock_user_string(arg1 - 1);
9068 if (arg4 != 0)
9069 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9070 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
9071 unlock_user (p, arg1, 0);
9073 break;
9075 case TARGET_NR_mq_unlink:
9076 p = lock_user_string(arg1 - 1);
9077 ret = get_errno(mq_unlink(p));
9078 unlock_user (p, arg1, 0);
9079 break;
9081 case TARGET_NR_mq_timedsend:
9083 struct timespec ts;
9085 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9086 if (arg5 != 0) {
9087 target_to_host_timespec(&ts, arg5);
9088 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9089 host_to_target_timespec(arg5, &ts);
9091 else
9092 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9093 unlock_user (p, arg2, arg3);
9095 break;
9097 case TARGET_NR_mq_timedreceive:
9099 struct timespec ts;
9100 unsigned int prio;
9102 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9103 if (arg5 != 0) {
9104 target_to_host_timespec(&ts, arg5);
9105 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9106 host_to_target_timespec(arg5, &ts);
9108 else
9109 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9110 unlock_user (p, arg2, arg3);
9111 if (arg4 != 0)
9112 put_user_u32(prio, arg4);
9114 break;
9116 /* Not implemented for now... */
9117 /* case TARGET_NR_mq_notify: */
9118 /* break; */
9120 case TARGET_NR_mq_getsetattr:
9122 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9123 ret = 0;
9124 if (arg3 != 0) {
9125 ret = mq_getattr(arg1, &posix_mq_attr_out);
9126 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9128 if (arg2 != 0) {
9129 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9130 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9134 break;
9135 #endif
9137 #ifdef CONFIG_SPLICE
9138 #ifdef TARGET_NR_tee
9139 case TARGET_NR_tee:
9141 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9143 break;
9144 #endif
9145 #ifdef TARGET_NR_splice
9146 case TARGET_NR_splice:
9148 loff_t loff_in, loff_out;
9149 loff_t *ploff_in = NULL, *ploff_out = NULL;
9150 if(arg2) {
9151 get_user_u64(loff_in, arg2);
9152 ploff_in = &loff_in;
9154 if(arg4) {
9155 get_user_u64(loff_out, arg2);
9156 ploff_out = &loff_out;
9158 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9160 break;
9161 #endif
9162 #ifdef TARGET_NR_vmsplice
9163 case TARGET_NR_vmsplice:
9165 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9166 if (vec != NULL) {
9167 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9168 unlock_iovec(vec, arg2, arg3, 0);
9169 } else {
9170 ret = -host_to_target_errno(errno);
9173 break;
9174 #endif
9175 #endif /* CONFIG_SPLICE */
9176 #ifdef CONFIG_EVENTFD
9177 #if defined(TARGET_NR_eventfd)
9178 case TARGET_NR_eventfd:
9179 ret = get_errno(eventfd(arg1, 0));
9180 break;
9181 #endif
9182 #if defined(TARGET_NR_eventfd2)
9183 case TARGET_NR_eventfd2:
9185 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9186 if (arg2 & TARGET_O_NONBLOCK) {
9187 host_flags |= O_NONBLOCK;
9189 if (arg2 & TARGET_O_CLOEXEC) {
9190 host_flags |= O_CLOEXEC;
9192 ret = get_errno(eventfd(arg1, host_flags));
9193 break;
9195 #endif
9196 #endif /* CONFIG_EVENTFD */
9197 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9198 case TARGET_NR_fallocate:
9199 #if TARGET_ABI_BITS == 32
9200 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9201 target_offset64(arg5, arg6)));
9202 #else
9203 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9204 #endif
9205 break;
9206 #endif
9207 #if defined(CONFIG_SYNC_FILE_RANGE)
9208 #if defined(TARGET_NR_sync_file_range)
9209 case TARGET_NR_sync_file_range:
9210 #if TARGET_ABI_BITS == 32
9211 #if defined(TARGET_MIPS)
9212 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9213 target_offset64(arg5, arg6), arg7));
9214 #else
9215 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9216 target_offset64(arg4, arg5), arg6));
9217 #endif /* !TARGET_MIPS */
9218 #else
9219 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9220 #endif
9221 break;
9222 #endif
9223 #if defined(TARGET_NR_sync_file_range2)
9224 case TARGET_NR_sync_file_range2:
9225 /* This is like sync_file_range but the arguments are reordered */
9226 #if TARGET_ABI_BITS == 32
9227 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9228 target_offset64(arg5, arg6), arg2));
9229 #else
9230 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9231 #endif
9232 break;
9233 #endif
9234 #endif
9235 #if defined(CONFIG_EPOLL)
9236 #if defined(TARGET_NR_epoll_create)
9237 case TARGET_NR_epoll_create:
9238 ret = get_errno(epoll_create(arg1));
9239 break;
9240 #endif
9241 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9242 case TARGET_NR_epoll_create1:
9243 ret = get_errno(epoll_create1(arg1));
9244 break;
9245 #endif
9246 #if defined(TARGET_NR_epoll_ctl)
9247 case TARGET_NR_epoll_ctl:
9249 struct epoll_event ep;
9250 struct epoll_event *epp = 0;
9251 if (arg4) {
9252 struct target_epoll_event *target_ep;
9253 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9254 goto efault;
9256 ep.events = tswap32(target_ep->events);
9257 /* The epoll_data_t union is just opaque data to the kernel,
9258 * so we transfer all 64 bits across and need not worry what
9259 * actual data type it is.
9261 ep.data.u64 = tswap64(target_ep->data.u64);
9262 unlock_user_struct(target_ep, arg4, 0);
9263 epp = &ep;
9265 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9266 break;
9268 #endif
9270 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9271 #define IMPLEMENT_EPOLL_PWAIT
9272 #endif
9273 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9274 #if defined(TARGET_NR_epoll_wait)
9275 case TARGET_NR_epoll_wait:
9276 #endif
9277 #if defined(IMPLEMENT_EPOLL_PWAIT)
9278 case TARGET_NR_epoll_pwait:
9279 #endif
9281 struct target_epoll_event *target_ep;
9282 struct epoll_event *ep;
9283 int epfd = arg1;
9284 int maxevents = arg3;
9285 int timeout = arg4;
9287 target_ep = lock_user(VERIFY_WRITE, arg2,
9288 maxevents * sizeof(struct target_epoll_event), 1);
9289 if (!target_ep) {
9290 goto efault;
9293 ep = alloca(maxevents * sizeof(struct epoll_event));
9295 switch (num) {
9296 #if defined(IMPLEMENT_EPOLL_PWAIT)
9297 case TARGET_NR_epoll_pwait:
9299 target_sigset_t *target_set;
9300 sigset_t _set, *set = &_set;
9302 if (arg5) {
9303 target_set = lock_user(VERIFY_READ, arg5,
9304 sizeof(target_sigset_t), 1);
9305 if (!target_set) {
9306 unlock_user(target_ep, arg2, 0);
9307 goto efault;
9309 target_to_host_sigset(set, target_set);
9310 unlock_user(target_set, arg5, 0);
9311 } else {
9312 set = NULL;
9315 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9316 break;
9318 #endif
9319 #if defined(TARGET_NR_epoll_wait)
9320 case TARGET_NR_epoll_wait:
9321 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9322 break;
9323 #endif
9324 default:
9325 ret = -TARGET_ENOSYS;
9327 if (!is_error(ret)) {
9328 int i;
9329 for (i = 0; i < ret; i++) {
9330 target_ep[i].events = tswap32(ep[i].events);
9331 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9334 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9335 break;
9337 #endif
9338 #endif
9339 #ifdef TARGET_NR_prlimit64
9340 case TARGET_NR_prlimit64:
9342 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9343 struct target_rlimit64 *target_rnew, *target_rold;
9344 struct host_rlimit64 rnew, rold, *rnewp = 0;
9345 if (arg3) {
9346 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9347 goto efault;
9349 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9350 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9351 unlock_user_struct(target_rnew, arg3, 0);
9352 rnewp = &rnew;
9355 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9356 if (!is_error(ret) && arg4) {
9357 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9358 goto efault;
9360 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9361 target_rold->rlim_max = tswap64(rold.rlim_max);
9362 unlock_user_struct(target_rold, arg4, 1);
9364 break;
9366 #endif
9367 #ifdef TARGET_NR_gethostname
9368 case TARGET_NR_gethostname:
9370 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9371 if (name) {
9372 ret = get_errno(gethostname(name, arg2));
9373 unlock_user(name, arg1, arg2);
9374 } else {
9375 ret = -TARGET_EFAULT;
9377 break;
9379 #endif
9380 #ifdef TARGET_NR_atomic_cmpxchg_32
9381 case TARGET_NR_atomic_cmpxchg_32:
9383 /* should use start_exclusive from main.c */
9384 abi_ulong mem_value;
9385 if (get_user_u32(mem_value, arg6)) {
9386 target_siginfo_t info;
9387 info.si_signo = SIGSEGV;
9388 info.si_errno = 0;
9389 info.si_code = TARGET_SEGV_MAPERR;
9390 info._sifields._sigfault._addr = arg6;
9391 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9392 ret = 0xdeadbeef;
9395 if (mem_value == arg2)
9396 put_user_u32(arg1, arg6);
9397 ret = mem_value;
9398 break;
9400 #endif
9401 #ifdef TARGET_NR_atomic_barrier
9402 case TARGET_NR_atomic_barrier:
9404 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9405 ret = 0;
9406 break;
9408 #endif
9410 #ifdef TARGET_NR_timer_create
9411 case TARGET_NR_timer_create:
9413 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9415 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9416 struct target_sigevent *ptarget_sevp;
9417 struct target_timer_t *ptarget_timer;
9419 int clkid = arg1;
9420 int timer_index = next_free_host_timer();
9422 if (timer_index < 0) {
9423 ret = -TARGET_EAGAIN;
9424 } else {
9425 timer_t *phtimer = g_posix_timers + timer_index;
9427 if (arg2) {
9428 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) {
9429 goto efault;
9432 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo);
9433 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify);
9435 phost_sevp = &host_sevp;
9438 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9439 if (ret) {
9440 phtimer = NULL;
9441 } else {
9442 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) {
9443 goto efault;
9445 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index);
9446 unlock_user_struct(ptarget_timer, arg3, 1);
9449 break;
9451 #endif
9453 #ifdef TARGET_NR_timer_settime
9454 case TARGET_NR_timer_settime:
9456 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9457 * struct itimerspec * old_value */
9458 arg1 &= 0xffff;
9459 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9460 ret = -TARGET_EINVAL;
9461 } else {
9462 timer_t htimer = g_posix_timers[arg1];
9463 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9465 target_to_host_itimerspec(&hspec_new, arg3);
9466 ret = get_errno(
9467 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9468 host_to_target_itimerspec(arg2, &hspec_old);
9470 break;
9472 #endif
9474 #ifdef TARGET_NR_timer_gettime
9475 case TARGET_NR_timer_gettime:
9477 /* args: timer_t timerid, struct itimerspec *curr_value */
9478 arg1 &= 0xffff;
9479 if (!arg2) {
9480 return -TARGET_EFAULT;
9481 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9482 ret = -TARGET_EINVAL;
9483 } else {
9484 timer_t htimer = g_posix_timers[arg1];
9485 struct itimerspec hspec;
9486 ret = get_errno(timer_gettime(htimer, &hspec));
9488 if (host_to_target_itimerspec(arg2, &hspec)) {
9489 ret = -TARGET_EFAULT;
9492 break;
9494 #endif
9496 #ifdef TARGET_NR_timer_getoverrun
9497 case TARGET_NR_timer_getoverrun:
9499 /* args: timer_t timerid */
9500 arg1 &= 0xffff;
9501 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9502 ret = -TARGET_EINVAL;
9503 } else {
9504 timer_t htimer = g_posix_timers[arg1];
9505 ret = get_errno(timer_getoverrun(htimer));
9507 break;
9509 #endif
9511 #ifdef TARGET_NR_timer_delete
9512 case TARGET_NR_timer_delete:
9514 /* args: timer_t timerid */
9515 arg1 &= 0xffff;
9516 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9517 ret = -TARGET_EINVAL;
9518 } else {
9519 timer_t htimer = g_posix_timers[arg1];
9520 ret = get_errno(timer_delete(htimer));
9521 g_posix_timers[arg1] = 0;
9523 break;
9525 #endif
9527 default:
9528 unimplemented:
9529 gemu_log("qemu: Unsupported syscall: %d\n", num);
9530 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9531 unimplemented_nowarn:
9532 #endif
9533 ret = -TARGET_ENOSYS;
9534 break;
9536 fail:
9537 #ifdef DEBUG
9538 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9539 #endif
9540 if(do_strace)
9541 print_syscall_ret(num, ret);
9542 return ret;
9543 efault:
9544 ret = -TARGET_EFAULT;
9545 goto fail;