linux-user: Allocate thunk size dynamically
[qemu/ar7.git] / linux-user / syscall.c
blobf56f3e0431af6d2f08cf9d383b41ada80ecc53c9
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <linux/capability.h>
47 #include <signal.h>
48 #include <sched.h>
49 #ifdef __ia64__
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
52 #endif
53 #include <sys/socket.h>
54 #include <sys/un.h>
55 #include <sys/uio.h>
56 #include <sys/poll.h>
57 #include <sys/times.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/statfs.h>
61 #include <utime.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef TARGET_GPROF
73 #include <sys/gmon.h>
74 #endif
75 #ifdef CONFIG_EVENTFD
76 #include <sys/eventfd.h>
77 #endif
78 #ifdef CONFIG_EPOLL
79 #include <sys/epoll.h>
80 #endif
81 #ifdef CONFIG_ATTR
82 #include "qemu/xattr.h"
83 #endif
84 #ifdef CONFIG_SENDFILE
85 #include <sys/sendfile.h>
86 #endif
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #include <linux/vt.h>
108 #include <linux/dm-ioctl.h>
109 #include <linux/reboot.h>
110 #include <linux/route.h>
111 #include <linux/filter.h>
112 #include <linux/blkpg.h>
113 #include "linux_loop.h"
114 #include "uname.h"
116 #include "qemu.h"
118 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
119 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
121 //#define DEBUG
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_futex __NR_futex
193 #define __NR_sys_inotify_init __NR_inotify_init
194 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
195 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
197 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
198 defined(__s390x__)
199 #define __NR__llseek __NR_lseek
200 #endif
202 /* Newer kernel ports have llseek() instead of _llseek() */
203 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
204 #define TARGET_NR__llseek TARGET_NR_llseek
205 #endif
207 #ifdef __NR_gettid
208 _syscall0(int, gettid)
209 #else
210 /* This is a replacement for the host gettid() and must return a host
211 errno. */
212 static int gettid(void) {
213 return -ENOSYS;
215 #endif
216 #ifdef __NR_getdents
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #endif
219 #if !defined(__NR_getdents) || \
220 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
221 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
222 #endif
223 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
224 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
225 loff_t *, res, uint, wh);
226 #endif
227 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
228 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
229 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
230 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
231 #endif
232 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
233 _syscall2(int,sys_tkill,int,tid,int,sig)
234 #endif
235 #ifdef __NR_exit_group
236 _syscall1(int,exit_group,int,error_code)
237 #endif
238 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
239 _syscall1(int,set_tid_address,int *,tidptr)
240 #endif
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
253 _syscall2(int, capget, struct __user_cap_header_struct *, header,
254 struct __user_cap_data_struct *, data);
255 _syscall2(int, capset, struct __user_cap_header_struct *, header,
256 struct __user_cap_data_struct *, data);
257 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
258 _syscall2(int, ioprio_get, int, which, int, who)
259 #endif
260 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
261 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
262 #endif
264 static bitmask_transtbl fcntl_flags_tbl[] = {
265 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
266 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
267 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
268 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
269 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
270 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
271 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
272 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
273 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
274 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
275 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
276 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
277 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
278 #if defined(O_DIRECT)
279 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
280 #endif
281 #if defined(O_NOATIME)
282 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
283 #endif
284 #if defined(O_CLOEXEC)
285 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
286 #endif
287 #if defined(O_PATH)
288 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
289 #endif
290 /* Don't terminate the list prematurely on 64-bit host+guest. */
291 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
292 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
293 #endif
294 { 0, 0, 0, 0 }
297 static int sys_getcwd1(char *buf, size_t size)
299 if (getcwd(buf, size) == NULL) {
300 /* getcwd() sets errno */
301 return (-1);
303 return strlen(buf)+1;
306 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
309 * open(2) has extra parameter 'mode' when called with
310 * flag O_CREAT.
312 if ((flags & O_CREAT) != 0) {
313 return (openat(dirfd, pathname, flags, mode));
315 return (openat(dirfd, pathname, flags));
318 #ifdef TARGET_NR_utimensat
319 #ifdef CONFIG_UTIMENSAT
320 static int sys_utimensat(int dirfd, const char *pathname,
321 const struct timespec times[2], int flags)
323 if (pathname == NULL)
324 return futimens(dirfd, times);
325 else
326 return utimensat(dirfd, pathname, times, flags);
328 #elif defined(__NR_utimensat)
329 #define __NR_sys_utimensat __NR_utimensat
330 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
331 const struct timespec *,tsp,int,flags)
332 #else
333 static int sys_utimensat(int dirfd, const char *pathname,
334 const struct timespec times[2], int flags)
336 errno = ENOSYS;
337 return -1;
339 #endif
340 #endif /* TARGET_NR_utimensat */
342 #ifdef CONFIG_INOTIFY
343 #include <sys/inotify.h>
345 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
346 static int sys_inotify_init(void)
348 return (inotify_init());
350 #endif
351 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
352 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
354 return (inotify_add_watch(fd, pathname, mask));
356 #endif
357 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
358 static int sys_inotify_rm_watch(int fd, int32_t wd)
360 return (inotify_rm_watch(fd, wd));
362 #endif
363 #ifdef CONFIG_INOTIFY1
364 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
365 static int sys_inotify_init1(int flags)
367 return (inotify_init1(flags));
369 #endif
370 #endif
371 #else
372 /* Userspace can usually survive runtime without inotify */
373 #undef TARGET_NR_inotify_init
374 #undef TARGET_NR_inotify_init1
375 #undef TARGET_NR_inotify_add_watch
376 #undef TARGET_NR_inotify_rm_watch
377 #endif /* CONFIG_INOTIFY */
379 #if defined(TARGET_NR_ppoll)
380 #ifndef __NR_ppoll
381 # define __NR_ppoll -1
382 #endif
383 #define __NR_sys_ppoll __NR_ppoll
384 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
385 struct timespec *, timeout, const sigset_t *, sigmask,
386 size_t, sigsetsize)
387 #endif
389 #if defined(TARGET_NR_pselect6)
390 #ifndef __NR_pselect6
391 # define __NR_pselect6 -1
392 #endif
393 #define __NR_sys_pselect6 __NR_pselect6
394 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
395 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
396 #endif
398 #if defined(TARGET_NR_prlimit64)
399 #ifndef __NR_prlimit64
400 # define __NR_prlimit64 -1
401 #endif
402 #define __NR_sys_prlimit64 __NR_prlimit64
403 /* The glibc rlimit structure may not be that used by the underlying syscall */
404 struct host_rlimit64 {
405 uint64_t rlim_cur;
406 uint64_t rlim_max;
408 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
409 const struct host_rlimit64 *, new_limit,
410 struct host_rlimit64 *, old_limit)
411 #endif
414 #if defined(TARGET_NR_timer_create)
415 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
416 static timer_t g_posix_timers[32] = { 0, } ;
418 static inline int next_free_host_timer(void)
420 int k ;
421 /* FIXME: Does finding the next free slot require a lock? */
422 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
423 if (g_posix_timers[k] == 0) {
424 g_posix_timers[k] = (timer_t) 1;
425 return k;
428 return -1;
430 #endif
432 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
433 #ifdef TARGET_ARM
434 static inline int regpairs_aligned(void *cpu_env) {
435 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
437 #elif defined(TARGET_MIPS)
438 static inline int regpairs_aligned(void *cpu_env) { return 1; }
439 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
440 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
441 * of registers which translates to the same as ARM/MIPS, because we start with
442 * r3 as arg1 */
443 static inline int regpairs_aligned(void *cpu_env) { return 1; }
444 #else
445 static inline int regpairs_aligned(void *cpu_env) { return 0; }
446 #endif
448 #define ERRNO_TABLE_SIZE 1200
450 /* target_to_host_errno_table[] is initialized from
451 * host_to_target_errno_table[] in syscall_init(). */
452 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
456 * This list is the union of errno values overridden in asm-<arch>/errno.h
457 * minus the errnos that are not actually generic to all archs.
459 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
460 [EIDRM] = TARGET_EIDRM,
461 [ECHRNG] = TARGET_ECHRNG,
462 [EL2NSYNC] = TARGET_EL2NSYNC,
463 [EL3HLT] = TARGET_EL3HLT,
464 [EL3RST] = TARGET_EL3RST,
465 [ELNRNG] = TARGET_ELNRNG,
466 [EUNATCH] = TARGET_EUNATCH,
467 [ENOCSI] = TARGET_ENOCSI,
468 [EL2HLT] = TARGET_EL2HLT,
469 [EDEADLK] = TARGET_EDEADLK,
470 [ENOLCK] = TARGET_ENOLCK,
471 [EBADE] = TARGET_EBADE,
472 [EBADR] = TARGET_EBADR,
473 [EXFULL] = TARGET_EXFULL,
474 [ENOANO] = TARGET_ENOANO,
475 [EBADRQC] = TARGET_EBADRQC,
476 [EBADSLT] = TARGET_EBADSLT,
477 [EBFONT] = TARGET_EBFONT,
478 [ENOSTR] = TARGET_ENOSTR,
479 [ENODATA] = TARGET_ENODATA,
480 [ETIME] = TARGET_ETIME,
481 [ENOSR] = TARGET_ENOSR,
482 [ENONET] = TARGET_ENONET,
483 [ENOPKG] = TARGET_ENOPKG,
484 [EREMOTE] = TARGET_EREMOTE,
485 [ENOLINK] = TARGET_ENOLINK,
486 [EADV] = TARGET_EADV,
487 [ESRMNT] = TARGET_ESRMNT,
488 [ECOMM] = TARGET_ECOMM,
489 [EPROTO] = TARGET_EPROTO,
490 [EDOTDOT] = TARGET_EDOTDOT,
491 [EMULTIHOP] = TARGET_EMULTIHOP,
492 [EBADMSG] = TARGET_EBADMSG,
493 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
494 [EOVERFLOW] = TARGET_EOVERFLOW,
495 [ENOTUNIQ] = TARGET_ENOTUNIQ,
496 [EBADFD] = TARGET_EBADFD,
497 [EREMCHG] = TARGET_EREMCHG,
498 [ELIBACC] = TARGET_ELIBACC,
499 [ELIBBAD] = TARGET_ELIBBAD,
500 [ELIBSCN] = TARGET_ELIBSCN,
501 [ELIBMAX] = TARGET_ELIBMAX,
502 [ELIBEXEC] = TARGET_ELIBEXEC,
503 [EILSEQ] = TARGET_EILSEQ,
504 [ENOSYS] = TARGET_ENOSYS,
505 [ELOOP] = TARGET_ELOOP,
506 [ERESTART] = TARGET_ERESTART,
507 [ESTRPIPE] = TARGET_ESTRPIPE,
508 [ENOTEMPTY] = TARGET_ENOTEMPTY,
509 [EUSERS] = TARGET_EUSERS,
510 [ENOTSOCK] = TARGET_ENOTSOCK,
511 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
512 [EMSGSIZE] = TARGET_EMSGSIZE,
513 [EPROTOTYPE] = TARGET_EPROTOTYPE,
514 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
515 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
516 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
517 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
518 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
519 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
520 [EADDRINUSE] = TARGET_EADDRINUSE,
521 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
522 [ENETDOWN] = TARGET_ENETDOWN,
523 [ENETUNREACH] = TARGET_ENETUNREACH,
524 [ENETRESET] = TARGET_ENETRESET,
525 [ECONNABORTED] = TARGET_ECONNABORTED,
526 [ECONNRESET] = TARGET_ECONNRESET,
527 [ENOBUFS] = TARGET_ENOBUFS,
528 [EISCONN] = TARGET_EISCONN,
529 [ENOTCONN] = TARGET_ENOTCONN,
530 [EUCLEAN] = TARGET_EUCLEAN,
531 [ENOTNAM] = TARGET_ENOTNAM,
532 [ENAVAIL] = TARGET_ENAVAIL,
533 [EISNAM] = TARGET_EISNAM,
534 [EREMOTEIO] = TARGET_EREMOTEIO,
535 [ESHUTDOWN] = TARGET_ESHUTDOWN,
536 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
537 [ETIMEDOUT] = TARGET_ETIMEDOUT,
538 [ECONNREFUSED] = TARGET_ECONNREFUSED,
539 [EHOSTDOWN] = TARGET_EHOSTDOWN,
540 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
541 [EALREADY] = TARGET_EALREADY,
542 [EINPROGRESS] = TARGET_EINPROGRESS,
543 [ESTALE] = TARGET_ESTALE,
544 [ECANCELED] = TARGET_ECANCELED,
545 [ENOMEDIUM] = TARGET_ENOMEDIUM,
546 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
547 #ifdef ENOKEY
548 [ENOKEY] = TARGET_ENOKEY,
549 #endif
550 #ifdef EKEYEXPIRED
551 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
552 #endif
553 #ifdef EKEYREVOKED
554 [EKEYREVOKED] = TARGET_EKEYREVOKED,
555 #endif
556 #ifdef EKEYREJECTED
557 [EKEYREJECTED] = TARGET_EKEYREJECTED,
558 #endif
559 #ifdef EOWNERDEAD
560 [EOWNERDEAD] = TARGET_EOWNERDEAD,
561 #endif
562 #ifdef ENOTRECOVERABLE
563 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
564 #endif
567 static inline int host_to_target_errno(int err)
569 if(host_to_target_errno_table[err])
570 return host_to_target_errno_table[err];
571 return err;
574 static inline int target_to_host_errno(int err)
576 if (target_to_host_errno_table[err])
577 return target_to_host_errno_table[err];
578 return err;
581 static inline abi_long get_errno(abi_long ret)
583 if (ret == -1)
584 return -host_to_target_errno(errno);
585 else
586 return ret;
589 static inline int is_error(abi_long ret)
591 return (abi_ulong)ret >= (abi_ulong)(-4096);
594 char *target_strerror(int err)
596 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
597 return NULL;
599 return strerror(target_to_host_errno(err));
602 static inline int host_to_target_sock_type(int host_type)
604 int target_type;
606 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
607 case SOCK_DGRAM:
608 target_type = TARGET_SOCK_DGRAM;
609 break;
610 case SOCK_STREAM:
611 target_type = TARGET_SOCK_STREAM;
612 break;
613 default:
614 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
615 break;
618 #if defined(SOCK_CLOEXEC)
619 if (host_type & SOCK_CLOEXEC) {
620 target_type |= TARGET_SOCK_CLOEXEC;
622 #endif
624 #if defined(SOCK_NONBLOCK)
625 if (host_type & SOCK_NONBLOCK) {
626 target_type |= TARGET_SOCK_NONBLOCK;
628 #endif
630 return target_type;
633 static abi_ulong target_brk;
634 static abi_ulong target_original_brk;
635 static abi_ulong brk_page;
637 void target_set_brk(abi_ulong new_brk)
639 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
640 brk_page = HOST_PAGE_ALIGN(target_brk);
643 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
644 #define DEBUGF_BRK(message, args...)
646 /* do_brk() must return target values and target errnos. */
647 abi_long do_brk(abi_ulong new_brk)
649 abi_long mapped_addr;
650 int new_alloc_size;
652 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
654 if (!new_brk) {
655 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
656 return target_brk;
658 if (new_brk < target_original_brk) {
659 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
660 target_brk);
661 return target_brk;
664 /* If the new brk is less than the highest page reserved to the
665 * target heap allocation, set it and we're almost done... */
666 if (new_brk <= brk_page) {
667 /* Heap contents are initialized to zero, as for anonymous
668 * mapped pages. */
669 if (new_brk > target_brk) {
670 memset(g2h(target_brk), 0, new_brk - target_brk);
672 target_brk = new_brk;
673 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
674 return target_brk;
677 /* We need to allocate more memory after the brk... Note that
678 * we don't use MAP_FIXED because that will map over the top of
679 * any existing mapping (like the one with the host libc or qemu
680 * itself); instead we treat "mapped but at wrong address" as
681 * a failure and unmap again.
683 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
684 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
685 PROT_READ|PROT_WRITE,
686 MAP_ANON|MAP_PRIVATE, 0, 0));
688 if (mapped_addr == brk_page) {
689 /* Heap contents are initialized to zero, as for anonymous
690 * mapped pages. Technically the new pages are already
691 * initialized to zero since they *are* anonymous mapped
692 * pages, however we have to take care with the contents that
693 * come from the remaining part of the previous page: it may
694 * contains garbage data due to a previous heap usage (grown
695 * then shrunken). */
696 memset(g2h(target_brk), 0, brk_page - target_brk);
698 target_brk = new_brk;
699 brk_page = HOST_PAGE_ALIGN(target_brk);
700 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
701 target_brk);
702 return target_brk;
703 } else if (mapped_addr != -1) {
704 /* Mapped but at wrong address, meaning there wasn't actually
705 * enough space for this brk.
707 target_munmap(mapped_addr, new_alloc_size);
708 mapped_addr = -1;
709 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
711 else {
712 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
715 #if defined(TARGET_ALPHA)
716 /* We (partially) emulate OSF/1 on Alpha, which requires we
717 return a proper errno, not an unchanged brk value. */
718 return -TARGET_ENOMEM;
719 #endif
720 /* For everything else, return the previous break. */
721 return target_brk;
724 static inline abi_long copy_from_user_fdset(fd_set *fds,
725 abi_ulong target_fds_addr,
726 int n)
728 int i, nw, j, k;
729 abi_ulong b, *target_fds;
731 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
732 if (!(target_fds = lock_user(VERIFY_READ,
733 target_fds_addr,
734 sizeof(abi_ulong) * nw,
735 1)))
736 return -TARGET_EFAULT;
738 FD_ZERO(fds);
739 k = 0;
740 for (i = 0; i < nw; i++) {
741 /* grab the abi_ulong */
742 __get_user(b, &target_fds[i]);
743 for (j = 0; j < TARGET_ABI_BITS; j++) {
744 /* check the bit inside the abi_ulong */
745 if ((b >> j) & 1)
746 FD_SET(k, fds);
747 k++;
751 unlock_user(target_fds, target_fds_addr, 0);
753 return 0;
756 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
757 abi_ulong target_fds_addr,
758 int n)
760 if (target_fds_addr) {
761 if (copy_from_user_fdset(fds, target_fds_addr, n))
762 return -TARGET_EFAULT;
763 *fds_ptr = fds;
764 } else {
765 *fds_ptr = NULL;
767 return 0;
770 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
771 const fd_set *fds,
772 int n)
774 int i, nw, j, k;
775 abi_long v;
776 abi_ulong *target_fds;
778 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
779 if (!(target_fds = lock_user(VERIFY_WRITE,
780 target_fds_addr,
781 sizeof(abi_ulong) * nw,
782 0)))
783 return -TARGET_EFAULT;
785 k = 0;
786 for (i = 0; i < nw; i++) {
787 v = 0;
788 for (j = 0; j < TARGET_ABI_BITS; j++) {
789 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
790 k++;
792 __put_user(v, &target_fds[i]);
795 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
797 return 0;
800 #if defined(__alpha__)
801 #define HOST_HZ 1024
802 #else
803 #define HOST_HZ 100
804 #endif
806 static inline abi_long host_to_target_clock_t(long ticks)
808 #if HOST_HZ == TARGET_HZ
809 return ticks;
810 #else
811 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
812 #endif
815 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
816 const struct rusage *rusage)
818 struct target_rusage *target_rusage;
820 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
821 return -TARGET_EFAULT;
822 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
823 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
824 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
825 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
826 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
827 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
828 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
829 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
830 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
831 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
832 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
833 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
834 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
835 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
836 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
837 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
838 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
839 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
840 unlock_user_struct(target_rusage, target_addr, 1);
842 return 0;
845 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
847 abi_ulong target_rlim_swap;
848 rlim_t result;
850 target_rlim_swap = tswapal(target_rlim);
851 if (target_rlim_swap == TARGET_RLIM_INFINITY)
852 return RLIM_INFINITY;
854 result = target_rlim_swap;
855 if (target_rlim_swap != (rlim_t)result)
856 return RLIM_INFINITY;
858 return result;
861 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
863 abi_ulong target_rlim_swap;
864 abi_ulong result;
866 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
867 target_rlim_swap = TARGET_RLIM_INFINITY;
868 else
869 target_rlim_swap = rlim;
870 result = tswapal(target_rlim_swap);
872 return result;
875 static inline int target_to_host_resource(int code)
877 switch (code) {
878 case TARGET_RLIMIT_AS:
879 return RLIMIT_AS;
880 case TARGET_RLIMIT_CORE:
881 return RLIMIT_CORE;
882 case TARGET_RLIMIT_CPU:
883 return RLIMIT_CPU;
884 case TARGET_RLIMIT_DATA:
885 return RLIMIT_DATA;
886 case TARGET_RLIMIT_FSIZE:
887 return RLIMIT_FSIZE;
888 case TARGET_RLIMIT_LOCKS:
889 return RLIMIT_LOCKS;
890 case TARGET_RLIMIT_MEMLOCK:
891 return RLIMIT_MEMLOCK;
892 case TARGET_RLIMIT_MSGQUEUE:
893 return RLIMIT_MSGQUEUE;
894 case TARGET_RLIMIT_NICE:
895 return RLIMIT_NICE;
896 case TARGET_RLIMIT_NOFILE:
897 return RLIMIT_NOFILE;
898 case TARGET_RLIMIT_NPROC:
899 return RLIMIT_NPROC;
900 case TARGET_RLIMIT_RSS:
901 return RLIMIT_RSS;
902 case TARGET_RLIMIT_RTPRIO:
903 return RLIMIT_RTPRIO;
904 case TARGET_RLIMIT_SIGPENDING:
905 return RLIMIT_SIGPENDING;
906 case TARGET_RLIMIT_STACK:
907 return RLIMIT_STACK;
908 default:
909 return code;
913 static inline abi_long copy_from_user_timeval(struct timeval *tv,
914 abi_ulong target_tv_addr)
916 struct target_timeval *target_tv;
918 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
919 return -TARGET_EFAULT;
921 __get_user(tv->tv_sec, &target_tv->tv_sec);
922 __get_user(tv->tv_usec, &target_tv->tv_usec);
924 unlock_user_struct(target_tv, target_tv_addr, 0);
926 return 0;
929 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
930 const struct timeval *tv)
932 struct target_timeval *target_tv;
934 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
935 return -TARGET_EFAULT;
937 __put_user(tv->tv_sec, &target_tv->tv_sec);
938 __put_user(tv->tv_usec, &target_tv->tv_usec);
940 unlock_user_struct(target_tv, target_tv_addr, 1);
942 return 0;
945 static inline abi_long copy_from_user_timezone(struct timezone *tz,
946 abi_ulong target_tz_addr)
948 struct target_timezone *target_tz;
950 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
951 return -TARGET_EFAULT;
954 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
955 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
957 unlock_user_struct(target_tz, target_tz_addr, 0);
959 return 0;
962 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
963 #include <mqueue.h>
965 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
966 abi_ulong target_mq_attr_addr)
968 struct target_mq_attr *target_mq_attr;
970 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
971 target_mq_attr_addr, 1))
972 return -TARGET_EFAULT;
974 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
975 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
976 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
977 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
979 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
981 return 0;
984 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
985 const struct mq_attr *attr)
987 struct target_mq_attr *target_mq_attr;
989 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
990 target_mq_attr_addr, 0))
991 return -TARGET_EFAULT;
993 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
994 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
995 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
996 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
998 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1000 return 0;
1002 #endif
1004 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1005 /* do_select() must return target values and target errnos. */
1006 static abi_long do_select(int n,
1007 abi_ulong rfd_addr, abi_ulong wfd_addr,
1008 abi_ulong efd_addr, abi_ulong target_tv_addr)
1010 fd_set rfds, wfds, efds;
1011 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1012 struct timeval tv, *tv_ptr;
1013 abi_long ret;
1015 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1016 if (ret) {
1017 return ret;
1019 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1020 if (ret) {
1021 return ret;
1023 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1024 if (ret) {
1025 return ret;
1028 if (target_tv_addr) {
1029 if (copy_from_user_timeval(&tv, target_tv_addr))
1030 return -TARGET_EFAULT;
1031 tv_ptr = &tv;
1032 } else {
1033 tv_ptr = NULL;
1036 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1038 if (!is_error(ret)) {
1039 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1040 return -TARGET_EFAULT;
1041 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1042 return -TARGET_EFAULT;
1043 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1044 return -TARGET_EFAULT;
1046 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1047 return -TARGET_EFAULT;
1050 return ret;
1052 #endif
1054 static abi_long do_pipe2(int host_pipe[], int flags)
1056 #ifdef CONFIG_PIPE2
1057 return pipe2(host_pipe, flags);
1058 #else
1059 return -ENOSYS;
1060 #endif
1063 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1064 int flags, int is_pipe2)
1066 int host_pipe[2];
1067 abi_long ret;
1068 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1070 if (is_error(ret))
1071 return get_errno(ret);
1073 /* Several targets have special calling conventions for the original
1074 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1075 if (!is_pipe2) {
1076 #if defined(TARGET_ALPHA)
1077 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1078 return host_pipe[0];
1079 #elif defined(TARGET_MIPS)
1080 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1081 return host_pipe[0];
1082 #elif defined(TARGET_SH4)
1083 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1084 return host_pipe[0];
1085 #elif defined(TARGET_SPARC)
1086 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1087 return host_pipe[0];
1088 #endif
1091 if (put_user_s32(host_pipe[0], pipedes)
1092 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1093 return -TARGET_EFAULT;
1094 return get_errno(ret);
1097 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1098 abi_ulong target_addr,
1099 socklen_t len)
1101 struct target_ip_mreqn *target_smreqn;
1103 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1104 if (!target_smreqn)
1105 return -TARGET_EFAULT;
1106 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1107 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1108 if (len == sizeof(struct target_ip_mreqn))
1109 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1110 unlock_user(target_smreqn, target_addr, 0);
1112 return 0;
1115 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1116 abi_ulong target_addr,
1117 socklen_t len)
1119 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1120 sa_family_t sa_family;
1121 struct target_sockaddr *target_saddr;
1123 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1124 if (!target_saddr)
1125 return -TARGET_EFAULT;
1127 sa_family = tswap16(target_saddr->sa_family);
1129 /* Oops. The caller might send a incomplete sun_path; sun_path
1130 * must be terminated by \0 (see the manual page), but
1131 * unfortunately it is quite common to specify sockaddr_un
1132 * length as "strlen(x->sun_path)" while it should be
1133 * "strlen(...) + 1". We'll fix that here if needed.
1134 * Linux kernel has a similar feature.
1137 if (sa_family == AF_UNIX) {
1138 if (len < unix_maxlen && len > 0) {
1139 char *cp = (char*)target_saddr;
1141 if ( cp[len-1] && !cp[len] )
1142 len++;
1144 if (len > unix_maxlen)
1145 len = unix_maxlen;
1148 memcpy(addr, target_saddr, len);
1149 addr->sa_family = sa_family;
1150 if (sa_family == AF_PACKET) {
1151 struct target_sockaddr_ll *lladdr;
1153 lladdr = (struct target_sockaddr_ll *)addr;
1154 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1155 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1157 unlock_user(target_saddr, target_addr, 0);
1159 return 0;
1162 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1163 struct sockaddr *addr,
1164 socklen_t len)
1166 struct target_sockaddr *target_saddr;
1168 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1169 if (!target_saddr)
1170 return -TARGET_EFAULT;
1171 memcpy(target_saddr, addr, len);
1172 target_saddr->sa_family = tswap16(addr->sa_family);
1173 unlock_user(target_saddr, target_addr, len);
1175 return 0;
1178 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1179 struct target_msghdr *target_msgh)
1181 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1182 abi_long msg_controllen;
1183 abi_ulong target_cmsg_addr;
1184 struct target_cmsghdr *target_cmsg;
1185 socklen_t space = 0;
1187 msg_controllen = tswapal(target_msgh->msg_controllen);
1188 if (msg_controllen < sizeof (struct target_cmsghdr))
1189 goto the_end;
1190 target_cmsg_addr = tswapal(target_msgh->msg_control);
1191 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1192 if (!target_cmsg)
1193 return -TARGET_EFAULT;
1195 while (cmsg && target_cmsg) {
1196 void *data = CMSG_DATA(cmsg);
1197 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1199 int len = tswapal(target_cmsg->cmsg_len)
1200 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1202 space += CMSG_SPACE(len);
1203 if (space > msgh->msg_controllen) {
1204 space -= CMSG_SPACE(len);
1205 gemu_log("Host cmsg overflow\n");
1206 break;
1209 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1210 cmsg->cmsg_level = SOL_SOCKET;
1211 } else {
1212 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1214 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1215 cmsg->cmsg_len = CMSG_LEN(len);
1217 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1218 int *fd = (int *)data;
1219 int *target_fd = (int *)target_data;
1220 int i, numfds = len / sizeof(int);
1222 for (i = 0; i < numfds; i++)
1223 fd[i] = tswap32(target_fd[i]);
1224 } else if (cmsg->cmsg_level == SOL_SOCKET
1225 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1226 struct ucred *cred = (struct ucred *)data;
1227 struct target_ucred *target_cred =
1228 (struct target_ucred *)target_data;
1230 __put_user(target_cred->pid, &cred->pid);
1231 __put_user(target_cred->uid, &cred->uid);
1232 __put_user(target_cred->gid, &cred->gid);
1233 } else {
1234 gemu_log("Unsupported ancillary data: %d/%d\n",
1235 cmsg->cmsg_level, cmsg->cmsg_type);
1236 memcpy(data, target_data, len);
1239 cmsg = CMSG_NXTHDR(msgh, cmsg);
1240 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1242 unlock_user(target_cmsg, target_cmsg_addr, 0);
1243 the_end:
1244 msgh->msg_controllen = space;
1245 return 0;
1248 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1249 struct msghdr *msgh)
1251 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1252 abi_long msg_controllen;
1253 abi_ulong target_cmsg_addr;
1254 struct target_cmsghdr *target_cmsg;
1255 socklen_t space = 0;
1257 msg_controllen = tswapal(target_msgh->msg_controllen);
1258 if (msg_controllen < sizeof (struct target_cmsghdr))
1259 goto the_end;
1260 target_cmsg_addr = tswapal(target_msgh->msg_control);
1261 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1262 if (!target_cmsg)
1263 return -TARGET_EFAULT;
1265 while (cmsg && target_cmsg) {
1266 void *data = CMSG_DATA(cmsg);
1267 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1269 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1271 space += TARGET_CMSG_SPACE(len);
1272 if (space > msg_controllen) {
1273 space -= TARGET_CMSG_SPACE(len);
1274 gemu_log("Target cmsg overflow\n");
1275 break;
1278 if (cmsg->cmsg_level == SOL_SOCKET) {
1279 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1280 } else {
1281 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1283 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1284 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1286 switch (cmsg->cmsg_level) {
1287 case SOL_SOCKET:
1288 switch (cmsg->cmsg_type) {
1289 case SCM_RIGHTS:
1291 int *fd = (int *)data;
1292 int *target_fd = (int *)target_data;
1293 int i, numfds = len / sizeof(int);
1295 for (i = 0; i < numfds; i++)
1296 target_fd[i] = tswap32(fd[i]);
1297 break;
1299 case SO_TIMESTAMP:
1301 struct timeval *tv = (struct timeval *)data;
1302 struct target_timeval *target_tv =
1303 (struct target_timeval *)target_data;
1305 if (len != sizeof(struct timeval))
1306 goto unimplemented;
1308 /* copy struct timeval to target */
1309 target_tv->tv_sec = tswapal(tv->tv_sec);
1310 target_tv->tv_usec = tswapal(tv->tv_usec);
1311 break;
1313 case SCM_CREDENTIALS:
1315 struct ucred *cred = (struct ucred *)data;
1316 struct target_ucred *target_cred =
1317 (struct target_ucred *)target_data;
1319 __put_user(cred->pid, &target_cred->pid);
1320 __put_user(cred->uid, &target_cred->uid);
1321 __put_user(cred->gid, &target_cred->gid);
1322 break;
1324 default:
1325 goto unimplemented;
1327 break;
1329 default:
1330 unimplemented:
1331 gemu_log("Unsupported ancillary data: %d/%d\n",
1332 cmsg->cmsg_level, cmsg->cmsg_type);
1333 memcpy(target_data, data, len);
1336 cmsg = CMSG_NXTHDR(msgh, cmsg);
1337 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1339 unlock_user(target_cmsg, target_cmsg_addr, space);
1340 the_end:
1341 target_msgh->msg_controllen = tswapal(space);
1342 return 0;
1345 /* do_setsockopt() Must return target values and target errnos. */
1346 static abi_long do_setsockopt(int sockfd, int level, int optname,
1347 abi_ulong optval_addr, socklen_t optlen)
1349 abi_long ret;
1350 int val;
1351 struct ip_mreqn *ip_mreq;
1352 struct ip_mreq_source *ip_mreq_source;
1354 switch(level) {
1355 case SOL_TCP:
1356 /* TCP options all take an 'int' value. */
1357 if (optlen < sizeof(uint32_t))
1358 return -TARGET_EINVAL;
1360 if (get_user_u32(val, optval_addr))
1361 return -TARGET_EFAULT;
1362 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1363 break;
1364 case SOL_IP:
1365 switch(optname) {
1366 case IP_TOS:
1367 case IP_TTL:
1368 case IP_HDRINCL:
1369 case IP_ROUTER_ALERT:
1370 case IP_RECVOPTS:
1371 case IP_RETOPTS:
1372 case IP_PKTINFO:
1373 case IP_MTU_DISCOVER:
1374 case IP_RECVERR:
1375 case IP_RECVTOS:
1376 #ifdef IP_FREEBIND
1377 case IP_FREEBIND:
1378 #endif
1379 case IP_MULTICAST_TTL:
1380 case IP_MULTICAST_LOOP:
1381 val = 0;
1382 if (optlen >= sizeof(uint32_t)) {
1383 if (get_user_u32(val, optval_addr))
1384 return -TARGET_EFAULT;
1385 } else if (optlen >= 1) {
1386 if (get_user_u8(val, optval_addr))
1387 return -TARGET_EFAULT;
1389 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1390 break;
1391 case IP_ADD_MEMBERSHIP:
1392 case IP_DROP_MEMBERSHIP:
1393 if (optlen < sizeof (struct target_ip_mreq) ||
1394 optlen > sizeof (struct target_ip_mreqn))
1395 return -TARGET_EINVAL;
1397 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1398 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1399 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1400 break;
1402 case IP_BLOCK_SOURCE:
1403 case IP_UNBLOCK_SOURCE:
1404 case IP_ADD_SOURCE_MEMBERSHIP:
1405 case IP_DROP_SOURCE_MEMBERSHIP:
1406 if (optlen != sizeof (struct target_ip_mreq_source))
1407 return -TARGET_EINVAL;
1409 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1410 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1411 unlock_user (ip_mreq_source, optval_addr, 0);
1412 break;
1414 default:
1415 goto unimplemented;
1417 break;
1418 case SOL_IPV6:
1419 switch (optname) {
1420 case IPV6_MTU_DISCOVER:
1421 case IPV6_MTU:
1422 case IPV6_V6ONLY:
1423 case IPV6_RECVPKTINFO:
1424 val = 0;
1425 if (optlen < sizeof(uint32_t)) {
1426 return -TARGET_EINVAL;
1428 if (get_user_u32(val, optval_addr)) {
1429 return -TARGET_EFAULT;
1431 ret = get_errno(setsockopt(sockfd, level, optname,
1432 &val, sizeof(val)));
1433 break;
1434 default:
1435 goto unimplemented;
1437 break;
1438 case SOL_RAW:
1439 switch (optname) {
1440 case ICMP_FILTER:
1441 /* struct icmp_filter takes an u32 value */
1442 if (optlen < sizeof(uint32_t)) {
1443 return -TARGET_EINVAL;
1446 if (get_user_u32(val, optval_addr)) {
1447 return -TARGET_EFAULT;
1449 ret = get_errno(setsockopt(sockfd, level, optname,
1450 &val, sizeof(val)));
1451 break;
1453 default:
1454 goto unimplemented;
1456 break;
1457 case TARGET_SOL_SOCKET:
1458 switch (optname) {
1459 case TARGET_SO_RCVTIMEO:
1461 struct timeval tv;
1463 optname = SO_RCVTIMEO;
1465 set_timeout:
1466 if (optlen != sizeof(struct target_timeval)) {
1467 return -TARGET_EINVAL;
1470 if (copy_from_user_timeval(&tv, optval_addr)) {
1471 return -TARGET_EFAULT;
1474 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1475 &tv, sizeof(tv)));
1476 return ret;
1478 case TARGET_SO_SNDTIMEO:
1479 optname = SO_SNDTIMEO;
1480 goto set_timeout;
1481 case TARGET_SO_ATTACH_FILTER:
1483 struct target_sock_fprog *tfprog;
1484 struct target_sock_filter *tfilter;
1485 struct sock_fprog fprog;
1486 struct sock_filter *filter;
1487 int i;
1489 if (optlen != sizeof(*tfprog)) {
1490 return -TARGET_EINVAL;
1492 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1493 return -TARGET_EFAULT;
1495 if (!lock_user_struct(VERIFY_READ, tfilter,
1496 tswapal(tfprog->filter), 0)) {
1497 unlock_user_struct(tfprog, optval_addr, 1);
1498 return -TARGET_EFAULT;
1501 fprog.len = tswap16(tfprog->len);
1502 filter = malloc(fprog.len * sizeof(*filter));
1503 if (filter == NULL) {
1504 unlock_user_struct(tfilter, tfprog->filter, 1);
1505 unlock_user_struct(tfprog, optval_addr, 1);
1506 return -TARGET_ENOMEM;
1508 for (i = 0; i < fprog.len; i++) {
1509 filter[i].code = tswap16(tfilter[i].code);
1510 filter[i].jt = tfilter[i].jt;
1511 filter[i].jf = tfilter[i].jf;
1512 filter[i].k = tswap32(tfilter[i].k);
1514 fprog.filter = filter;
1516 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1517 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1518 free(filter);
1520 unlock_user_struct(tfilter, tfprog->filter, 1);
1521 unlock_user_struct(tfprog, optval_addr, 1);
1522 return ret;
1524 case TARGET_SO_BINDTODEVICE:
1526 char *dev_ifname, *addr_ifname;
1528 if (optlen > IFNAMSIZ - 1) {
1529 optlen = IFNAMSIZ - 1;
1531 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1532 if (!dev_ifname) {
1533 return -TARGET_EFAULT;
1535 optname = SO_BINDTODEVICE;
1536 addr_ifname = alloca(IFNAMSIZ);
1537 memcpy(addr_ifname, dev_ifname, optlen);
1538 addr_ifname[optlen] = 0;
1539 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
1540 unlock_user (dev_ifname, optval_addr, 0);
1541 return ret;
1543 /* Options with 'int' argument. */
1544 case TARGET_SO_DEBUG:
1545 optname = SO_DEBUG;
1546 break;
1547 case TARGET_SO_REUSEADDR:
1548 optname = SO_REUSEADDR;
1549 break;
1550 case TARGET_SO_TYPE:
1551 optname = SO_TYPE;
1552 break;
1553 case TARGET_SO_ERROR:
1554 optname = SO_ERROR;
1555 break;
1556 case TARGET_SO_DONTROUTE:
1557 optname = SO_DONTROUTE;
1558 break;
1559 case TARGET_SO_BROADCAST:
1560 optname = SO_BROADCAST;
1561 break;
1562 case TARGET_SO_SNDBUF:
1563 optname = SO_SNDBUF;
1564 break;
1565 case TARGET_SO_SNDBUFFORCE:
1566 optname = SO_SNDBUFFORCE;
1567 break;
1568 case TARGET_SO_RCVBUF:
1569 optname = SO_RCVBUF;
1570 break;
1571 case TARGET_SO_RCVBUFFORCE:
1572 optname = SO_RCVBUFFORCE;
1573 break;
1574 case TARGET_SO_KEEPALIVE:
1575 optname = SO_KEEPALIVE;
1576 break;
1577 case TARGET_SO_OOBINLINE:
1578 optname = SO_OOBINLINE;
1579 break;
1580 case TARGET_SO_NO_CHECK:
1581 optname = SO_NO_CHECK;
1582 break;
1583 case TARGET_SO_PRIORITY:
1584 optname = SO_PRIORITY;
1585 break;
1586 #ifdef SO_BSDCOMPAT
1587 case TARGET_SO_BSDCOMPAT:
1588 optname = SO_BSDCOMPAT;
1589 break;
1590 #endif
1591 case TARGET_SO_PASSCRED:
1592 optname = SO_PASSCRED;
1593 break;
1594 case TARGET_SO_PASSSEC:
1595 optname = SO_PASSSEC;
1596 break;
1597 case TARGET_SO_TIMESTAMP:
1598 optname = SO_TIMESTAMP;
1599 break;
1600 case TARGET_SO_RCVLOWAT:
1601 optname = SO_RCVLOWAT;
1602 break;
1603 break;
1604 default:
1605 goto unimplemented;
1607 if (optlen < sizeof(uint32_t))
1608 return -TARGET_EINVAL;
1610 if (get_user_u32(val, optval_addr))
1611 return -TARGET_EFAULT;
1612 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1613 break;
1614 default:
1615 unimplemented:
1616 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1617 ret = -TARGET_ENOPROTOOPT;
1619 return ret;
1622 /* do_getsockopt() Must return target values and target errnos. */
1623 static abi_long do_getsockopt(int sockfd, int level, int optname,
1624 abi_ulong optval_addr, abi_ulong optlen)
1626 abi_long ret;
1627 int len, val;
1628 socklen_t lv;
1630 switch(level) {
1631 case TARGET_SOL_SOCKET:
1632 level = SOL_SOCKET;
1633 switch (optname) {
1634 /* These don't just return a single integer */
1635 case TARGET_SO_LINGER:
1636 case TARGET_SO_RCVTIMEO:
1637 case TARGET_SO_SNDTIMEO:
1638 case TARGET_SO_PEERNAME:
1639 goto unimplemented;
1640 case TARGET_SO_PEERCRED: {
1641 struct ucred cr;
1642 socklen_t crlen;
1643 struct target_ucred *tcr;
1645 if (get_user_u32(len, optlen)) {
1646 return -TARGET_EFAULT;
1648 if (len < 0) {
1649 return -TARGET_EINVAL;
1652 crlen = sizeof(cr);
1653 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1654 &cr, &crlen));
1655 if (ret < 0) {
1656 return ret;
1658 if (len > crlen) {
1659 len = crlen;
1661 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1662 return -TARGET_EFAULT;
1664 __put_user(cr.pid, &tcr->pid);
1665 __put_user(cr.uid, &tcr->uid);
1666 __put_user(cr.gid, &tcr->gid);
1667 unlock_user_struct(tcr, optval_addr, 1);
1668 if (put_user_u32(len, optlen)) {
1669 return -TARGET_EFAULT;
1671 break;
1673 /* Options with 'int' argument. */
1674 case TARGET_SO_DEBUG:
1675 optname = SO_DEBUG;
1676 goto int_case;
1677 case TARGET_SO_REUSEADDR:
1678 optname = SO_REUSEADDR;
1679 goto int_case;
1680 case TARGET_SO_TYPE:
1681 optname = SO_TYPE;
1682 goto int_case;
1683 case TARGET_SO_ERROR:
1684 optname = SO_ERROR;
1685 goto int_case;
1686 case TARGET_SO_DONTROUTE:
1687 optname = SO_DONTROUTE;
1688 goto int_case;
1689 case TARGET_SO_BROADCAST:
1690 optname = SO_BROADCAST;
1691 goto int_case;
1692 case TARGET_SO_SNDBUF:
1693 optname = SO_SNDBUF;
1694 goto int_case;
1695 case TARGET_SO_RCVBUF:
1696 optname = SO_RCVBUF;
1697 goto int_case;
1698 case TARGET_SO_KEEPALIVE:
1699 optname = SO_KEEPALIVE;
1700 goto int_case;
1701 case TARGET_SO_OOBINLINE:
1702 optname = SO_OOBINLINE;
1703 goto int_case;
1704 case TARGET_SO_NO_CHECK:
1705 optname = SO_NO_CHECK;
1706 goto int_case;
1707 case TARGET_SO_PRIORITY:
1708 optname = SO_PRIORITY;
1709 goto int_case;
1710 #ifdef SO_BSDCOMPAT
1711 case TARGET_SO_BSDCOMPAT:
1712 optname = SO_BSDCOMPAT;
1713 goto int_case;
1714 #endif
1715 case TARGET_SO_PASSCRED:
1716 optname = SO_PASSCRED;
1717 goto int_case;
1718 case TARGET_SO_TIMESTAMP:
1719 optname = SO_TIMESTAMP;
1720 goto int_case;
1721 case TARGET_SO_RCVLOWAT:
1722 optname = SO_RCVLOWAT;
1723 goto int_case;
1724 case TARGET_SO_ACCEPTCONN:
1725 optname = SO_ACCEPTCONN;
1726 goto int_case;
1727 default:
1728 goto int_case;
1730 break;
1731 case SOL_TCP:
1732 /* TCP options all take an 'int' value. */
1733 int_case:
1734 if (get_user_u32(len, optlen))
1735 return -TARGET_EFAULT;
1736 if (len < 0)
1737 return -TARGET_EINVAL;
1738 lv = sizeof(lv);
1739 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1740 if (ret < 0)
1741 return ret;
1742 if (optname == SO_TYPE) {
1743 val = host_to_target_sock_type(val);
1745 if (len > lv)
1746 len = lv;
1747 if (len == 4) {
1748 if (put_user_u32(val, optval_addr))
1749 return -TARGET_EFAULT;
1750 } else {
1751 if (put_user_u8(val, optval_addr))
1752 return -TARGET_EFAULT;
1754 if (put_user_u32(len, optlen))
1755 return -TARGET_EFAULT;
1756 break;
1757 case SOL_IP:
1758 switch(optname) {
1759 case IP_TOS:
1760 case IP_TTL:
1761 case IP_HDRINCL:
1762 case IP_ROUTER_ALERT:
1763 case IP_RECVOPTS:
1764 case IP_RETOPTS:
1765 case IP_PKTINFO:
1766 case IP_MTU_DISCOVER:
1767 case IP_RECVERR:
1768 case IP_RECVTOS:
1769 #ifdef IP_FREEBIND
1770 case IP_FREEBIND:
1771 #endif
1772 case IP_MULTICAST_TTL:
1773 case IP_MULTICAST_LOOP:
1774 if (get_user_u32(len, optlen))
1775 return -TARGET_EFAULT;
1776 if (len < 0)
1777 return -TARGET_EINVAL;
1778 lv = sizeof(lv);
1779 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1780 if (ret < 0)
1781 return ret;
1782 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1783 len = 1;
1784 if (put_user_u32(len, optlen)
1785 || put_user_u8(val, optval_addr))
1786 return -TARGET_EFAULT;
1787 } else {
1788 if (len > sizeof(int))
1789 len = sizeof(int);
1790 if (put_user_u32(len, optlen)
1791 || put_user_u32(val, optval_addr))
1792 return -TARGET_EFAULT;
1794 break;
1795 default:
1796 ret = -TARGET_ENOPROTOOPT;
1797 break;
1799 break;
1800 default:
1801 unimplemented:
1802 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1803 level, optname);
1804 ret = -TARGET_EOPNOTSUPP;
1805 break;
1807 return ret;
1810 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1811 int count, int copy)
1813 struct target_iovec *target_vec;
1814 struct iovec *vec;
1815 abi_ulong total_len, max_len;
1816 int i;
1817 int err = 0;
1818 bool bad_address = false;
1820 if (count == 0) {
1821 errno = 0;
1822 return NULL;
1824 if (count < 0 || count > IOV_MAX) {
1825 errno = EINVAL;
1826 return NULL;
1829 vec = calloc(count, sizeof(struct iovec));
1830 if (vec == NULL) {
1831 errno = ENOMEM;
1832 return NULL;
1835 target_vec = lock_user(VERIFY_READ, target_addr,
1836 count * sizeof(struct target_iovec), 1);
1837 if (target_vec == NULL) {
1838 err = EFAULT;
1839 goto fail2;
1842 /* ??? If host page size > target page size, this will result in a
1843 value larger than what we can actually support. */
1844 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1845 total_len = 0;
1847 for (i = 0; i < count; i++) {
1848 abi_ulong base = tswapal(target_vec[i].iov_base);
1849 abi_long len = tswapal(target_vec[i].iov_len);
1851 if (len < 0) {
1852 err = EINVAL;
1853 goto fail;
1854 } else if (len == 0) {
1855 /* Zero length pointer is ignored. */
1856 vec[i].iov_base = 0;
1857 } else {
1858 vec[i].iov_base = lock_user(type, base, len, copy);
1859 /* If the first buffer pointer is bad, this is a fault. But
1860 * subsequent bad buffers will result in a partial write; this
1861 * is realized by filling the vector with null pointers and
1862 * zero lengths. */
1863 if (!vec[i].iov_base) {
1864 if (i == 0) {
1865 err = EFAULT;
1866 goto fail;
1867 } else {
1868 bad_address = true;
1871 if (bad_address) {
1872 len = 0;
1874 if (len > max_len - total_len) {
1875 len = max_len - total_len;
1878 vec[i].iov_len = len;
1879 total_len += len;
1882 unlock_user(target_vec, target_addr, 0);
1883 return vec;
1885 fail:
1886 while (--i >= 0) {
1887 if (tswapal(target_vec[i].iov_len) > 0) {
1888 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
1891 unlock_user(target_vec, target_addr, 0);
1892 fail2:
1893 free(vec);
1894 errno = err;
1895 return NULL;
1898 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1899 int count, int copy)
1901 struct target_iovec *target_vec;
1902 int i;
1904 target_vec = lock_user(VERIFY_READ, target_addr,
1905 count * sizeof(struct target_iovec), 1);
1906 if (target_vec) {
1907 for (i = 0; i < count; i++) {
1908 abi_ulong base = tswapal(target_vec[i].iov_base);
1909 abi_long len = tswapal(target_vec[i].iov_len);
1910 if (len < 0) {
1911 break;
1913 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1915 unlock_user(target_vec, target_addr, 0);
1918 free(vec);
1921 static inline int target_to_host_sock_type(int *type)
1923 int host_type = 0;
1924 int target_type = *type;
1926 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1927 case TARGET_SOCK_DGRAM:
1928 host_type = SOCK_DGRAM;
1929 break;
1930 case TARGET_SOCK_STREAM:
1931 host_type = SOCK_STREAM;
1932 break;
1933 default:
1934 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1935 break;
1937 if (target_type & TARGET_SOCK_CLOEXEC) {
1938 #if defined(SOCK_CLOEXEC)
1939 host_type |= SOCK_CLOEXEC;
1940 #else
1941 return -TARGET_EINVAL;
1942 #endif
1944 if (target_type & TARGET_SOCK_NONBLOCK) {
1945 #if defined(SOCK_NONBLOCK)
1946 host_type |= SOCK_NONBLOCK;
1947 #elif !defined(O_NONBLOCK)
1948 return -TARGET_EINVAL;
1949 #endif
1951 *type = host_type;
1952 return 0;
1955 /* Try to emulate socket type flags after socket creation. */
1956 static int sock_flags_fixup(int fd, int target_type)
1958 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1959 if (target_type & TARGET_SOCK_NONBLOCK) {
1960 int flags = fcntl(fd, F_GETFL);
1961 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1962 close(fd);
1963 return -TARGET_EINVAL;
1966 #endif
1967 return fd;
1970 /* do_socket() Must return target values and target errnos. */
1971 static abi_long do_socket(int domain, int type, int protocol)
1973 int target_type = type;
1974 int ret;
1976 ret = target_to_host_sock_type(&type);
1977 if (ret) {
1978 return ret;
1981 if (domain == PF_NETLINK)
1982 return -TARGET_EAFNOSUPPORT;
1983 ret = get_errno(socket(domain, type, protocol));
1984 if (ret >= 0) {
1985 ret = sock_flags_fixup(ret, target_type);
1987 return ret;
1990 /* do_bind() Must return target values and target errnos. */
1991 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1992 socklen_t addrlen)
1994 void *addr;
1995 abi_long ret;
1997 if ((int)addrlen < 0) {
1998 return -TARGET_EINVAL;
2001 addr = alloca(addrlen+1);
2003 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2004 if (ret)
2005 return ret;
2007 return get_errno(bind(sockfd, addr, addrlen));
2010 /* do_connect() Must return target values and target errnos. */
2011 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2012 socklen_t addrlen)
2014 void *addr;
2015 abi_long ret;
2017 if ((int)addrlen < 0) {
2018 return -TARGET_EINVAL;
2021 addr = alloca(addrlen+1);
2023 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2024 if (ret)
2025 return ret;
2027 return get_errno(connect(sockfd, addr, addrlen));
2030 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2031 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2032 int flags, int send)
2034 abi_long ret, len;
2035 struct msghdr msg;
2036 int count;
2037 struct iovec *vec;
2038 abi_ulong target_vec;
2040 if (msgp->msg_name) {
2041 msg.msg_namelen = tswap32(msgp->msg_namelen);
2042 msg.msg_name = alloca(msg.msg_namelen+1);
2043 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
2044 msg.msg_namelen);
2045 if (ret) {
2046 goto out2;
2048 } else {
2049 msg.msg_name = NULL;
2050 msg.msg_namelen = 0;
2052 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2053 msg.msg_control = alloca(msg.msg_controllen);
2054 msg.msg_flags = tswap32(msgp->msg_flags);
2056 count = tswapal(msgp->msg_iovlen);
2057 target_vec = tswapal(msgp->msg_iov);
2058 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2059 target_vec, count, send);
2060 if (vec == NULL) {
2061 ret = -host_to_target_errno(errno);
2062 goto out2;
2064 msg.msg_iovlen = count;
2065 msg.msg_iov = vec;
2067 if (send) {
2068 ret = target_to_host_cmsg(&msg, msgp);
2069 if (ret == 0)
2070 ret = get_errno(sendmsg(fd, &msg, flags));
2071 } else {
2072 ret = get_errno(recvmsg(fd, &msg, flags));
2073 if (!is_error(ret)) {
2074 len = ret;
2075 ret = host_to_target_cmsg(msgp, &msg);
2076 if (!is_error(ret)) {
2077 msgp->msg_namelen = tswap32(msg.msg_namelen);
2078 if (msg.msg_name != NULL) {
2079 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2080 msg.msg_name, msg.msg_namelen);
2081 if (ret) {
2082 goto out;
2086 ret = len;
2091 out:
2092 unlock_iovec(vec, target_vec, count, !send);
2093 out2:
2094 return ret;
2097 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2098 int flags, int send)
2100 abi_long ret;
2101 struct target_msghdr *msgp;
2103 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2104 msgp,
2105 target_msg,
2106 send ? 1 : 0)) {
2107 return -TARGET_EFAULT;
2109 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2110 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2111 return ret;
2114 #ifdef TARGET_NR_sendmmsg
2115 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2116 * so it might not have this *mmsg-specific flag either.
2118 #ifndef MSG_WAITFORONE
2119 #define MSG_WAITFORONE 0x10000
2120 #endif
2122 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2123 unsigned int vlen, unsigned int flags,
2124 int send)
2126 struct target_mmsghdr *mmsgp;
2127 abi_long ret = 0;
2128 int i;
2130 if (vlen > UIO_MAXIOV) {
2131 vlen = UIO_MAXIOV;
2134 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2135 if (!mmsgp) {
2136 return -TARGET_EFAULT;
2139 for (i = 0; i < vlen; i++) {
2140 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2141 if (is_error(ret)) {
2142 break;
2144 mmsgp[i].msg_len = tswap32(ret);
2145 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2146 if (flags & MSG_WAITFORONE) {
2147 flags |= MSG_DONTWAIT;
2151 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2153 /* Return number of datagrams sent if we sent any at all;
2154 * otherwise return the error.
2156 if (i) {
2157 return i;
2159 return ret;
2161 #endif
2163 /* If we don't have a system accept4() then just call accept.
2164 * The callsites to do_accept4() will ensure that they don't
2165 * pass a non-zero flags argument in this config.
2167 #ifndef CONFIG_ACCEPT4
2168 static inline int accept4(int sockfd, struct sockaddr *addr,
2169 socklen_t *addrlen, int flags)
2171 assert(flags == 0);
2172 return accept(sockfd, addr, addrlen);
2174 #endif
2176 /* do_accept4() Must return target values and target errnos. */
2177 static abi_long do_accept4(int fd, abi_ulong target_addr,
2178 abi_ulong target_addrlen_addr, int flags)
2180 socklen_t addrlen;
2181 void *addr;
2182 abi_long ret;
2183 int host_flags;
2185 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2187 if (target_addr == 0) {
2188 return get_errno(accept4(fd, NULL, NULL, host_flags));
2191 /* linux returns EINVAL if addrlen pointer is invalid */
2192 if (get_user_u32(addrlen, target_addrlen_addr))
2193 return -TARGET_EINVAL;
2195 if ((int)addrlen < 0) {
2196 return -TARGET_EINVAL;
2199 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2200 return -TARGET_EINVAL;
2202 addr = alloca(addrlen);
2204 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2205 if (!is_error(ret)) {
2206 host_to_target_sockaddr(target_addr, addr, addrlen);
2207 if (put_user_u32(addrlen, target_addrlen_addr))
2208 ret = -TARGET_EFAULT;
2210 return ret;
2213 /* do_getpeername() Must return target values and target errnos. */
2214 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2215 abi_ulong target_addrlen_addr)
2217 socklen_t addrlen;
2218 void *addr;
2219 abi_long ret;
2221 if (get_user_u32(addrlen, target_addrlen_addr))
2222 return -TARGET_EFAULT;
2224 if ((int)addrlen < 0) {
2225 return -TARGET_EINVAL;
2228 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2229 return -TARGET_EFAULT;
2231 addr = alloca(addrlen);
2233 ret = get_errno(getpeername(fd, addr, &addrlen));
2234 if (!is_error(ret)) {
2235 host_to_target_sockaddr(target_addr, addr, addrlen);
2236 if (put_user_u32(addrlen, target_addrlen_addr))
2237 ret = -TARGET_EFAULT;
2239 return ret;
2242 /* do_getsockname() Must return target values and target errnos. */
2243 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2244 abi_ulong target_addrlen_addr)
2246 socklen_t addrlen;
2247 void *addr;
2248 abi_long ret;
2250 if (get_user_u32(addrlen, target_addrlen_addr))
2251 return -TARGET_EFAULT;
2253 if ((int)addrlen < 0) {
2254 return -TARGET_EINVAL;
2257 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2258 return -TARGET_EFAULT;
2260 addr = alloca(addrlen);
2262 ret = get_errno(getsockname(fd, addr, &addrlen));
2263 if (!is_error(ret)) {
2264 host_to_target_sockaddr(target_addr, addr, addrlen);
2265 if (put_user_u32(addrlen, target_addrlen_addr))
2266 ret = -TARGET_EFAULT;
2268 return ret;
2271 /* do_socketpair() Must return target values and target errnos. */
2272 static abi_long do_socketpair(int domain, int type, int protocol,
2273 abi_ulong target_tab_addr)
2275 int tab[2];
2276 abi_long ret;
2278 target_to_host_sock_type(&type);
2280 ret = get_errno(socketpair(domain, type, protocol, tab));
2281 if (!is_error(ret)) {
2282 if (put_user_s32(tab[0], target_tab_addr)
2283 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2284 ret = -TARGET_EFAULT;
2286 return ret;
2289 /* do_sendto() Must return target values and target errnos. */
2290 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2291 abi_ulong target_addr, socklen_t addrlen)
2293 void *addr;
2294 void *host_msg;
2295 abi_long ret;
2297 if ((int)addrlen < 0) {
2298 return -TARGET_EINVAL;
2301 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2302 if (!host_msg)
2303 return -TARGET_EFAULT;
2304 if (target_addr) {
2305 addr = alloca(addrlen+1);
2306 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2307 if (ret) {
2308 unlock_user(host_msg, msg, 0);
2309 return ret;
2311 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2312 } else {
2313 ret = get_errno(send(fd, host_msg, len, flags));
2315 unlock_user(host_msg, msg, 0);
2316 return ret;
2319 /* do_recvfrom() Must return target values and target errnos. */
2320 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2321 abi_ulong target_addr,
2322 abi_ulong target_addrlen)
2324 socklen_t addrlen;
2325 void *addr;
2326 void *host_msg;
2327 abi_long ret;
2329 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2330 if (!host_msg)
2331 return -TARGET_EFAULT;
2332 if (target_addr) {
2333 if (get_user_u32(addrlen, target_addrlen)) {
2334 ret = -TARGET_EFAULT;
2335 goto fail;
2337 if ((int)addrlen < 0) {
2338 ret = -TARGET_EINVAL;
2339 goto fail;
2341 addr = alloca(addrlen);
2342 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2343 } else {
2344 addr = NULL; /* To keep compiler quiet. */
2345 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2347 if (!is_error(ret)) {
2348 if (target_addr) {
2349 host_to_target_sockaddr(target_addr, addr, addrlen);
2350 if (put_user_u32(addrlen, target_addrlen)) {
2351 ret = -TARGET_EFAULT;
2352 goto fail;
2355 unlock_user(host_msg, msg, len);
2356 } else {
2357 fail:
2358 unlock_user(host_msg, msg, 0);
2360 return ret;
2363 #ifdef TARGET_NR_socketcall
2364 /* do_socketcall() Must return target values and target errnos. */
2365 static abi_long do_socketcall(int num, abi_ulong vptr)
2367 static const unsigned ac[] = { /* number of arguments per call */
2368 [SOCKOP_socket] = 3, /* domain, type, protocol */
2369 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2370 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2371 [SOCKOP_listen] = 2, /* sockfd, backlog */
2372 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2373 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2374 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2375 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2376 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2377 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2378 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2379 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2380 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2381 [SOCKOP_shutdown] = 2, /* sockfd, how */
2382 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2383 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2384 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2385 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2387 abi_long a[6]; /* max 6 args */
2389 /* first, collect the arguments in a[] according to ac[] */
2390 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2391 unsigned i;
2392 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2393 for (i = 0; i < ac[num]; ++i) {
2394 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2395 return -TARGET_EFAULT;
2400 /* now when we have the args, actually handle the call */
2401 switch (num) {
2402 case SOCKOP_socket: /* domain, type, protocol */
2403 return do_socket(a[0], a[1], a[2]);
2404 case SOCKOP_bind: /* sockfd, addr, addrlen */
2405 return do_bind(a[0], a[1], a[2]);
2406 case SOCKOP_connect: /* sockfd, addr, addrlen */
2407 return do_connect(a[0], a[1], a[2]);
2408 case SOCKOP_listen: /* sockfd, backlog */
2409 return get_errno(listen(a[0], a[1]));
2410 case SOCKOP_accept: /* sockfd, addr, addrlen */
2411 return do_accept4(a[0], a[1], a[2], 0);
2412 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2413 return do_accept4(a[0], a[1], a[2], a[3]);
2414 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2415 return do_getsockname(a[0], a[1], a[2]);
2416 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2417 return do_getpeername(a[0], a[1], a[2]);
2418 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2419 return do_socketpair(a[0], a[1], a[2], a[3]);
2420 case SOCKOP_send: /* sockfd, msg, len, flags */
2421 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2422 case SOCKOP_recv: /* sockfd, msg, len, flags */
2423 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2424 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2425 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2426 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2427 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2428 case SOCKOP_shutdown: /* sockfd, how */
2429 return get_errno(shutdown(a[0], a[1]));
2430 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2431 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2432 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2433 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2434 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2435 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2436 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2437 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2438 default:
2439 gemu_log("Unsupported socketcall: %d\n", num);
2440 return -TARGET_ENOSYS;
2443 #endif
2445 #define N_SHM_REGIONS 32
2447 static struct shm_region {
2448 abi_ulong start;
2449 abi_ulong size;
2450 } shm_regions[N_SHM_REGIONS];
2452 struct target_semid_ds
2454 struct target_ipc_perm sem_perm;
2455 abi_ulong sem_otime;
2456 #if !defined(TARGET_PPC64)
2457 abi_ulong __unused1;
2458 #endif
2459 abi_ulong sem_ctime;
2460 #if !defined(TARGET_PPC64)
2461 abi_ulong __unused2;
2462 #endif
2463 abi_ulong sem_nsems;
2464 abi_ulong __unused3;
2465 abi_ulong __unused4;
2468 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2469 abi_ulong target_addr)
2471 struct target_ipc_perm *target_ip;
2472 struct target_semid_ds *target_sd;
2474 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2475 return -TARGET_EFAULT;
2476 target_ip = &(target_sd->sem_perm);
2477 host_ip->__key = tswap32(target_ip->__key);
2478 host_ip->uid = tswap32(target_ip->uid);
2479 host_ip->gid = tswap32(target_ip->gid);
2480 host_ip->cuid = tswap32(target_ip->cuid);
2481 host_ip->cgid = tswap32(target_ip->cgid);
2482 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2483 host_ip->mode = tswap32(target_ip->mode);
2484 #else
2485 host_ip->mode = tswap16(target_ip->mode);
2486 #endif
2487 #if defined(TARGET_PPC)
2488 host_ip->__seq = tswap32(target_ip->__seq);
2489 #else
2490 host_ip->__seq = tswap16(target_ip->__seq);
2491 #endif
2492 unlock_user_struct(target_sd, target_addr, 0);
2493 return 0;
2496 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2497 struct ipc_perm *host_ip)
2499 struct target_ipc_perm *target_ip;
2500 struct target_semid_ds *target_sd;
2502 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2503 return -TARGET_EFAULT;
2504 target_ip = &(target_sd->sem_perm);
2505 target_ip->__key = tswap32(host_ip->__key);
2506 target_ip->uid = tswap32(host_ip->uid);
2507 target_ip->gid = tswap32(host_ip->gid);
2508 target_ip->cuid = tswap32(host_ip->cuid);
2509 target_ip->cgid = tswap32(host_ip->cgid);
2510 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2511 target_ip->mode = tswap32(host_ip->mode);
2512 #else
2513 target_ip->mode = tswap16(host_ip->mode);
2514 #endif
2515 #if defined(TARGET_PPC)
2516 target_ip->__seq = tswap32(host_ip->__seq);
2517 #else
2518 target_ip->__seq = tswap16(host_ip->__seq);
2519 #endif
2520 unlock_user_struct(target_sd, target_addr, 1);
2521 return 0;
2524 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2525 abi_ulong target_addr)
2527 struct target_semid_ds *target_sd;
2529 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2530 return -TARGET_EFAULT;
2531 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2532 return -TARGET_EFAULT;
2533 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2534 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2535 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2536 unlock_user_struct(target_sd, target_addr, 0);
2537 return 0;
2540 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2541 struct semid_ds *host_sd)
2543 struct target_semid_ds *target_sd;
2545 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2546 return -TARGET_EFAULT;
2547 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2548 return -TARGET_EFAULT;
2549 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2550 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2551 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2552 unlock_user_struct(target_sd, target_addr, 1);
2553 return 0;
2556 struct target_seminfo {
2557 int semmap;
2558 int semmni;
2559 int semmns;
2560 int semmnu;
2561 int semmsl;
2562 int semopm;
2563 int semume;
2564 int semusz;
2565 int semvmx;
2566 int semaem;
2569 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2570 struct seminfo *host_seminfo)
2572 struct target_seminfo *target_seminfo;
2573 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2574 return -TARGET_EFAULT;
2575 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2576 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2577 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2578 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2579 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2580 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2581 __put_user(host_seminfo->semume, &target_seminfo->semume);
2582 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2583 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2584 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2585 unlock_user_struct(target_seminfo, target_addr, 1);
2586 return 0;
2589 union semun {
2590 int val;
2591 struct semid_ds *buf;
2592 unsigned short *array;
2593 struct seminfo *__buf;
2596 union target_semun {
2597 int val;
2598 abi_ulong buf;
2599 abi_ulong array;
2600 abi_ulong __buf;
2603 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2604 abi_ulong target_addr)
2606 int nsems;
2607 unsigned short *array;
2608 union semun semun;
2609 struct semid_ds semid_ds;
2610 int i, ret;
2612 semun.buf = &semid_ds;
2614 ret = semctl(semid, 0, IPC_STAT, semun);
2615 if (ret == -1)
2616 return get_errno(ret);
2618 nsems = semid_ds.sem_nsems;
2620 *host_array = malloc(nsems*sizeof(unsigned short));
2621 if (!*host_array) {
2622 return -TARGET_ENOMEM;
2624 array = lock_user(VERIFY_READ, target_addr,
2625 nsems*sizeof(unsigned short), 1);
2626 if (!array) {
2627 free(*host_array);
2628 return -TARGET_EFAULT;
2631 for(i=0; i<nsems; i++) {
2632 __get_user((*host_array)[i], &array[i]);
2634 unlock_user(array, target_addr, 0);
2636 return 0;
2639 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2640 unsigned short **host_array)
2642 int nsems;
2643 unsigned short *array;
2644 union semun semun;
2645 struct semid_ds semid_ds;
2646 int i, ret;
2648 semun.buf = &semid_ds;
2650 ret = semctl(semid, 0, IPC_STAT, semun);
2651 if (ret == -1)
2652 return get_errno(ret);
2654 nsems = semid_ds.sem_nsems;
2656 array = lock_user(VERIFY_WRITE, target_addr,
2657 nsems*sizeof(unsigned short), 0);
2658 if (!array)
2659 return -TARGET_EFAULT;
2661 for(i=0; i<nsems; i++) {
2662 __put_user((*host_array)[i], &array[i]);
2664 free(*host_array);
2665 unlock_user(array, target_addr, 1);
2667 return 0;
2670 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2671 union target_semun target_su)
2673 union semun arg;
2674 struct semid_ds dsarg;
2675 unsigned short *array = NULL;
2676 struct seminfo seminfo;
2677 abi_long ret = -TARGET_EINVAL;
2678 abi_long err;
2679 cmd &= 0xff;
2681 switch( cmd ) {
2682 case GETVAL:
2683 case SETVAL:
2684 /* In 64 bit cross-endian situations, we will erroneously pick up
2685 * the wrong half of the union for the "val" element. To rectify
2686 * this, the entire 8-byte structure is byteswapped, followed by
2687 * a swap of the 4 byte val field. In other cases, the data is
2688 * already in proper host byte order. */
2689 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2690 target_su.buf = tswapal(target_su.buf);
2691 arg.val = tswap32(target_su.val);
2692 } else {
2693 arg.val = target_su.val;
2695 ret = get_errno(semctl(semid, semnum, cmd, arg));
2696 break;
2697 case GETALL:
2698 case SETALL:
2699 err = target_to_host_semarray(semid, &array, target_su.array);
2700 if (err)
2701 return err;
2702 arg.array = array;
2703 ret = get_errno(semctl(semid, semnum, cmd, arg));
2704 err = host_to_target_semarray(semid, target_su.array, &array);
2705 if (err)
2706 return err;
2707 break;
2708 case IPC_STAT:
2709 case IPC_SET:
2710 case SEM_STAT:
2711 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2712 if (err)
2713 return err;
2714 arg.buf = &dsarg;
2715 ret = get_errno(semctl(semid, semnum, cmd, arg));
2716 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2717 if (err)
2718 return err;
2719 break;
2720 case IPC_INFO:
2721 case SEM_INFO:
2722 arg.__buf = &seminfo;
2723 ret = get_errno(semctl(semid, semnum, cmd, arg));
2724 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2725 if (err)
2726 return err;
2727 break;
2728 case IPC_RMID:
2729 case GETPID:
2730 case GETNCNT:
2731 case GETZCNT:
2732 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2733 break;
2736 return ret;
2739 struct target_sembuf {
2740 unsigned short sem_num;
2741 short sem_op;
2742 short sem_flg;
2745 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2746 abi_ulong target_addr,
2747 unsigned nsops)
2749 struct target_sembuf *target_sembuf;
2750 int i;
2752 target_sembuf = lock_user(VERIFY_READ, target_addr,
2753 nsops*sizeof(struct target_sembuf), 1);
2754 if (!target_sembuf)
2755 return -TARGET_EFAULT;
2757 for(i=0; i<nsops; i++) {
2758 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2759 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2760 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2763 unlock_user(target_sembuf, target_addr, 0);
2765 return 0;
2768 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2770 struct sembuf sops[nsops];
2772 if (target_to_host_sembuf(sops, ptr, nsops))
2773 return -TARGET_EFAULT;
2775 return get_errno(semop(semid, sops, nsops));
2778 struct target_msqid_ds
2780 struct target_ipc_perm msg_perm;
2781 abi_ulong msg_stime;
2782 #if TARGET_ABI_BITS == 32
2783 abi_ulong __unused1;
2784 #endif
2785 abi_ulong msg_rtime;
2786 #if TARGET_ABI_BITS == 32
2787 abi_ulong __unused2;
2788 #endif
2789 abi_ulong msg_ctime;
2790 #if TARGET_ABI_BITS == 32
2791 abi_ulong __unused3;
2792 #endif
2793 abi_ulong __msg_cbytes;
2794 abi_ulong msg_qnum;
2795 abi_ulong msg_qbytes;
2796 abi_ulong msg_lspid;
2797 abi_ulong msg_lrpid;
2798 abi_ulong __unused4;
2799 abi_ulong __unused5;
2802 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2803 abi_ulong target_addr)
2805 struct target_msqid_ds *target_md;
2807 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2808 return -TARGET_EFAULT;
2809 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2810 return -TARGET_EFAULT;
2811 host_md->msg_stime = tswapal(target_md->msg_stime);
2812 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2813 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2814 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2815 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2816 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2817 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2818 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2819 unlock_user_struct(target_md, target_addr, 0);
2820 return 0;
2823 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2824 struct msqid_ds *host_md)
2826 struct target_msqid_ds *target_md;
2828 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2829 return -TARGET_EFAULT;
2830 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2831 return -TARGET_EFAULT;
2832 target_md->msg_stime = tswapal(host_md->msg_stime);
2833 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2834 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2835 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2836 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2837 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2838 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2839 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2840 unlock_user_struct(target_md, target_addr, 1);
2841 return 0;
2844 struct target_msginfo {
2845 int msgpool;
2846 int msgmap;
2847 int msgmax;
2848 int msgmnb;
2849 int msgmni;
2850 int msgssz;
2851 int msgtql;
2852 unsigned short int msgseg;
2855 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2856 struct msginfo *host_msginfo)
2858 struct target_msginfo *target_msginfo;
2859 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2860 return -TARGET_EFAULT;
2861 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2862 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2863 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2864 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2865 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2866 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2867 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2868 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2869 unlock_user_struct(target_msginfo, target_addr, 1);
2870 return 0;
2873 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2875 struct msqid_ds dsarg;
2876 struct msginfo msginfo;
2877 abi_long ret = -TARGET_EINVAL;
2879 cmd &= 0xff;
2881 switch (cmd) {
2882 case IPC_STAT:
2883 case IPC_SET:
2884 case MSG_STAT:
2885 if (target_to_host_msqid_ds(&dsarg,ptr))
2886 return -TARGET_EFAULT;
2887 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2888 if (host_to_target_msqid_ds(ptr,&dsarg))
2889 return -TARGET_EFAULT;
2890 break;
2891 case IPC_RMID:
2892 ret = get_errno(msgctl(msgid, cmd, NULL));
2893 break;
2894 case IPC_INFO:
2895 case MSG_INFO:
2896 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2897 if (host_to_target_msginfo(ptr, &msginfo))
2898 return -TARGET_EFAULT;
2899 break;
2902 return ret;
2905 struct target_msgbuf {
2906 abi_long mtype;
2907 char mtext[1];
2910 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2911 ssize_t msgsz, int msgflg)
2913 struct target_msgbuf *target_mb;
2914 struct msgbuf *host_mb;
2915 abi_long ret = 0;
2917 if (msgsz < 0) {
2918 return -TARGET_EINVAL;
2921 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2922 return -TARGET_EFAULT;
2923 host_mb = malloc(msgsz+sizeof(long));
2924 if (!host_mb) {
2925 unlock_user_struct(target_mb, msgp, 0);
2926 return -TARGET_ENOMEM;
2928 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2929 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2930 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2931 free(host_mb);
2932 unlock_user_struct(target_mb, msgp, 0);
2934 return ret;
2937 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2938 unsigned int msgsz, abi_long msgtyp,
2939 int msgflg)
2941 struct target_msgbuf *target_mb;
2942 char *target_mtext;
2943 struct msgbuf *host_mb;
2944 abi_long ret = 0;
2946 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2947 return -TARGET_EFAULT;
2949 host_mb = g_malloc(msgsz+sizeof(long));
2950 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2952 if (ret > 0) {
2953 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2954 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2955 if (!target_mtext) {
2956 ret = -TARGET_EFAULT;
2957 goto end;
2959 memcpy(target_mb->mtext, host_mb->mtext, ret);
2960 unlock_user(target_mtext, target_mtext_addr, ret);
2963 target_mb->mtype = tswapal(host_mb->mtype);
2965 end:
2966 if (target_mb)
2967 unlock_user_struct(target_mb, msgp, 1);
2968 g_free(host_mb);
2969 return ret;
2972 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2973 abi_ulong target_addr)
2975 struct target_shmid_ds *target_sd;
2977 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2978 return -TARGET_EFAULT;
2979 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2980 return -TARGET_EFAULT;
2981 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2982 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2983 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2984 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2985 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2986 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2987 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2988 unlock_user_struct(target_sd, target_addr, 0);
2989 return 0;
2992 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2993 struct shmid_ds *host_sd)
2995 struct target_shmid_ds *target_sd;
2997 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2998 return -TARGET_EFAULT;
2999 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3000 return -TARGET_EFAULT;
3001 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3002 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3003 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3004 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3005 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3006 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3007 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3008 unlock_user_struct(target_sd, target_addr, 1);
3009 return 0;
3012 struct target_shminfo {
3013 abi_ulong shmmax;
3014 abi_ulong shmmin;
3015 abi_ulong shmmni;
3016 abi_ulong shmseg;
3017 abi_ulong shmall;
3020 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3021 struct shminfo *host_shminfo)
3023 struct target_shminfo *target_shminfo;
3024 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3025 return -TARGET_EFAULT;
3026 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3027 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3028 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3029 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3030 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3031 unlock_user_struct(target_shminfo, target_addr, 1);
3032 return 0;
3035 struct target_shm_info {
3036 int used_ids;
3037 abi_ulong shm_tot;
3038 abi_ulong shm_rss;
3039 abi_ulong shm_swp;
3040 abi_ulong swap_attempts;
3041 abi_ulong swap_successes;
3044 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3045 struct shm_info *host_shm_info)
3047 struct target_shm_info *target_shm_info;
3048 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3049 return -TARGET_EFAULT;
3050 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3051 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3052 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3053 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3054 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3055 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3056 unlock_user_struct(target_shm_info, target_addr, 1);
3057 return 0;
3060 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3062 struct shmid_ds dsarg;
3063 struct shminfo shminfo;
3064 struct shm_info shm_info;
3065 abi_long ret = -TARGET_EINVAL;
3067 cmd &= 0xff;
3069 switch(cmd) {
3070 case IPC_STAT:
3071 case IPC_SET:
3072 case SHM_STAT:
3073 if (target_to_host_shmid_ds(&dsarg, buf))
3074 return -TARGET_EFAULT;
3075 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3076 if (host_to_target_shmid_ds(buf, &dsarg))
3077 return -TARGET_EFAULT;
3078 break;
3079 case IPC_INFO:
3080 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3081 if (host_to_target_shminfo(buf, &shminfo))
3082 return -TARGET_EFAULT;
3083 break;
3084 case SHM_INFO:
3085 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3086 if (host_to_target_shm_info(buf, &shm_info))
3087 return -TARGET_EFAULT;
3088 break;
3089 case IPC_RMID:
3090 case SHM_LOCK:
3091 case SHM_UNLOCK:
3092 ret = get_errno(shmctl(shmid, cmd, NULL));
3093 break;
3096 return ret;
3099 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3101 abi_long raddr;
3102 void *host_raddr;
3103 struct shmid_ds shm_info;
3104 int i,ret;
3106 /* find out the length of the shared memory segment */
3107 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3108 if (is_error(ret)) {
3109 /* can't get length, bail out */
3110 return ret;
3113 mmap_lock();
3115 if (shmaddr)
3116 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3117 else {
3118 abi_ulong mmap_start;
3120 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3122 if (mmap_start == -1) {
3123 errno = ENOMEM;
3124 host_raddr = (void *)-1;
3125 } else
3126 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3129 if (host_raddr == (void *)-1) {
3130 mmap_unlock();
3131 return get_errno((long)host_raddr);
3133 raddr=h2g((unsigned long)host_raddr);
3135 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3136 PAGE_VALID | PAGE_READ |
3137 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3139 for (i = 0; i < N_SHM_REGIONS; i++) {
3140 if (shm_regions[i].start == 0) {
3141 shm_regions[i].start = raddr;
3142 shm_regions[i].size = shm_info.shm_segsz;
3143 break;
3147 mmap_unlock();
3148 return raddr;
3152 static inline abi_long do_shmdt(abi_ulong shmaddr)
3154 int i;
3156 for (i = 0; i < N_SHM_REGIONS; ++i) {
3157 if (shm_regions[i].start == shmaddr) {
3158 shm_regions[i].start = 0;
3159 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3160 break;
3164 return get_errno(shmdt(g2h(shmaddr)));
3167 #ifdef TARGET_NR_ipc
3168 /* ??? This only works with linear mappings. */
3169 /* do_ipc() must return target values and target errnos. */
3170 static abi_long do_ipc(unsigned int call, abi_long first,
3171 abi_long second, abi_long third,
3172 abi_long ptr, abi_long fifth)
3174 int version;
3175 abi_long ret = 0;
3177 version = call >> 16;
3178 call &= 0xffff;
3180 switch (call) {
3181 case IPCOP_semop:
3182 ret = do_semop(first, ptr, second);
3183 break;
3185 case IPCOP_semget:
3186 ret = get_errno(semget(first, second, third));
3187 break;
3189 case IPCOP_semctl: {
3190 /* The semun argument to semctl is passed by value, so dereference the
3191 * ptr argument. */
3192 abi_ulong atptr;
3193 get_user_ual(atptr, ptr);
3194 ret = do_semctl(first, second, third,
3195 (union target_semun) atptr);
3196 break;
3199 case IPCOP_msgget:
3200 ret = get_errno(msgget(first, second));
3201 break;
3203 case IPCOP_msgsnd:
3204 ret = do_msgsnd(first, ptr, second, third);
3205 break;
3207 case IPCOP_msgctl:
3208 ret = do_msgctl(first, second, ptr);
3209 break;
3211 case IPCOP_msgrcv:
3212 switch (version) {
3213 case 0:
3215 struct target_ipc_kludge {
3216 abi_long msgp;
3217 abi_long msgtyp;
3218 } *tmp;
3220 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3221 ret = -TARGET_EFAULT;
3222 break;
3225 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3227 unlock_user_struct(tmp, ptr, 0);
3228 break;
3230 default:
3231 ret = do_msgrcv(first, ptr, second, fifth, third);
3233 break;
3235 case IPCOP_shmat:
3236 switch (version) {
3237 default:
3239 abi_ulong raddr;
3240 raddr = do_shmat(first, ptr, second);
3241 if (is_error(raddr))
3242 return get_errno(raddr);
3243 if (put_user_ual(raddr, third))
3244 return -TARGET_EFAULT;
3245 break;
3247 case 1:
3248 ret = -TARGET_EINVAL;
3249 break;
3251 break;
3252 case IPCOP_shmdt:
3253 ret = do_shmdt(ptr);
3254 break;
3256 case IPCOP_shmget:
3257 /* IPC_* flag values are the same on all linux platforms */
3258 ret = get_errno(shmget(first, second, third));
3259 break;
3261 /* IPC_* and SHM_* command values are the same on all linux platforms */
3262 case IPCOP_shmctl:
3263 ret = do_shmctl(first, second, ptr);
3264 break;
3265 default:
3266 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3267 ret = -TARGET_ENOSYS;
3268 break;
3270 return ret;
3272 #endif
3274 /* kernel structure types definitions */
3276 #define STRUCT(name, ...) STRUCT_ ## name,
3277 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3278 enum {
3279 #include "syscall_types.h"
3280 STRUCT_MAX
3282 #undef STRUCT
3283 #undef STRUCT_SPECIAL
3285 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3286 #define STRUCT_SPECIAL(name)
3287 #include "syscall_types.h"
3288 #undef STRUCT
3289 #undef STRUCT_SPECIAL
3291 typedef struct IOCTLEntry IOCTLEntry;
3293 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3294 int fd, abi_long cmd, abi_long arg);
3296 struct IOCTLEntry {
3297 int target_cmd;
3298 unsigned int host_cmd;
3299 const char *name;
3300 int access;
3301 do_ioctl_fn *do_ioctl;
3302 const argtype arg_type[5];
3305 #define IOC_R 0x0001
3306 #define IOC_W 0x0002
3307 #define IOC_RW (IOC_R | IOC_W)
3309 #define MAX_STRUCT_SIZE 4096
3311 #ifdef CONFIG_FIEMAP
3312 /* So fiemap access checks don't overflow on 32 bit systems.
3313 * This is very slightly smaller than the limit imposed by
3314 * the underlying kernel.
3316 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3317 / sizeof(struct fiemap_extent))
3319 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3320 int fd, abi_long cmd, abi_long arg)
3322 /* The parameter for this ioctl is a struct fiemap followed
3323 * by an array of struct fiemap_extent whose size is set
3324 * in fiemap->fm_extent_count. The array is filled in by the
3325 * ioctl.
3327 int target_size_in, target_size_out;
3328 struct fiemap *fm;
3329 const argtype *arg_type = ie->arg_type;
3330 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3331 void *argptr, *p;
3332 abi_long ret;
3333 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3334 uint32_t outbufsz;
3335 int free_fm = 0;
3337 assert(arg_type[0] == TYPE_PTR);
3338 assert(ie->access == IOC_RW);
3339 arg_type++;
3340 target_size_in = thunk_type_size(arg_type, 0);
3341 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3342 if (!argptr) {
3343 return -TARGET_EFAULT;
3345 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3346 unlock_user(argptr, arg, 0);
3347 fm = (struct fiemap *)buf_temp;
3348 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3349 return -TARGET_EINVAL;
3352 outbufsz = sizeof (*fm) +
3353 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3355 if (outbufsz > MAX_STRUCT_SIZE) {
3356 /* We can't fit all the extents into the fixed size buffer.
3357 * Allocate one that is large enough and use it instead.
3359 fm = malloc(outbufsz);
3360 if (!fm) {
3361 return -TARGET_ENOMEM;
3363 memcpy(fm, buf_temp, sizeof(struct fiemap));
3364 free_fm = 1;
3366 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3367 if (!is_error(ret)) {
3368 target_size_out = target_size_in;
3369 /* An extent_count of 0 means we were only counting the extents
3370 * so there are no structs to copy
3372 if (fm->fm_extent_count != 0) {
3373 target_size_out += fm->fm_mapped_extents * extent_size;
3375 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3376 if (!argptr) {
3377 ret = -TARGET_EFAULT;
3378 } else {
3379 /* Convert the struct fiemap */
3380 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3381 if (fm->fm_extent_count != 0) {
3382 p = argptr + target_size_in;
3383 /* ...and then all the struct fiemap_extents */
3384 for (i = 0; i < fm->fm_mapped_extents; i++) {
3385 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3386 THUNK_TARGET);
3387 p += extent_size;
3390 unlock_user(argptr, arg, target_size_out);
3393 if (free_fm) {
3394 free(fm);
3396 return ret;
3398 #endif
3400 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3401 int fd, abi_long cmd, abi_long arg)
3403 const argtype *arg_type = ie->arg_type;
3404 int target_size;
3405 void *argptr;
3406 int ret;
3407 struct ifconf *host_ifconf;
3408 uint32_t outbufsz;
3409 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3410 int target_ifreq_size;
3411 int nb_ifreq;
3412 int free_buf = 0;
3413 int i;
3414 int target_ifc_len;
3415 abi_long target_ifc_buf;
3416 int host_ifc_len;
3417 char *host_ifc_buf;
3419 assert(arg_type[0] == TYPE_PTR);
3420 assert(ie->access == IOC_RW);
3422 arg_type++;
3423 target_size = thunk_type_size(arg_type, 0);
3425 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3426 if (!argptr)
3427 return -TARGET_EFAULT;
3428 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3429 unlock_user(argptr, arg, 0);
3431 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3432 target_ifc_len = host_ifconf->ifc_len;
3433 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3435 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3436 nb_ifreq = target_ifc_len / target_ifreq_size;
3437 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3439 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3440 if (outbufsz > MAX_STRUCT_SIZE) {
3441 /* We can't fit all the extents into the fixed size buffer.
3442 * Allocate one that is large enough and use it instead.
3444 host_ifconf = malloc(outbufsz);
3445 if (!host_ifconf) {
3446 return -TARGET_ENOMEM;
3448 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3449 free_buf = 1;
3451 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3453 host_ifconf->ifc_len = host_ifc_len;
3454 host_ifconf->ifc_buf = host_ifc_buf;
3456 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3457 if (!is_error(ret)) {
3458 /* convert host ifc_len to target ifc_len */
3460 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3461 target_ifc_len = nb_ifreq * target_ifreq_size;
3462 host_ifconf->ifc_len = target_ifc_len;
3464 /* restore target ifc_buf */
3466 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3468 /* copy struct ifconf to target user */
3470 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3471 if (!argptr)
3472 return -TARGET_EFAULT;
3473 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3474 unlock_user(argptr, arg, target_size);
3476 /* copy ifreq[] to target user */
3478 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3479 for (i = 0; i < nb_ifreq ; i++) {
3480 thunk_convert(argptr + i * target_ifreq_size,
3481 host_ifc_buf + i * sizeof(struct ifreq),
3482 ifreq_arg_type, THUNK_TARGET);
3484 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3487 if (free_buf) {
3488 free(host_ifconf);
3491 return ret;
3494 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3495 abi_long cmd, abi_long arg)
3497 void *argptr;
3498 struct dm_ioctl *host_dm;
3499 abi_long guest_data;
3500 uint32_t guest_data_size;
3501 int target_size;
3502 const argtype *arg_type = ie->arg_type;
3503 abi_long ret;
3504 void *big_buf = NULL;
3505 char *host_data;
3507 arg_type++;
3508 target_size = thunk_type_size(arg_type, 0);
3509 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3510 if (!argptr) {
3511 ret = -TARGET_EFAULT;
3512 goto out;
3514 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3515 unlock_user(argptr, arg, 0);
3517 /* buf_temp is too small, so fetch things into a bigger buffer */
3518 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3519 memcpy(big_buf, buf_temp, target_size);
3520 buf_temp = big_buf;
3521 host_dm = big_buf;
3523 guest_data = arg + host_dm->data_start;
3524 if ((guest_data - arg) < 0) {
3525 ret = -EINVAL;
3526 goto out;
3528 guest_data_size = host_dm->data_size - host_dm->data_start;
3529 host_data = (char*)host_dm + host_dm->data_start;
3531 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3532 switch (ie->host_cmd) {
3533 case DM_REMOVE_ALL:
3534 case DM_LIST_DEVICES:
3535 case DM_DEV_CREATE:
3536 case DM_DEV_REMOVE:
3537 case DM_DEV_SUSPEND:
3538 case DM_DEV_STATUS:
3539 case DM_DEV_WAIT:
3540 case DM_TABLE_STATUS:
3541 case DM_TABLE_CLEAR:
3542 case DM_TABLE_DEPS:
3543 case DM_LIST_VERSIONS:
3544 /* no input data */
3545 break;
3546 case DM_DEV_RENAME:
3547 case DM_DEV_SET_GEOMETRY:
3548 /* data contains only strings */
3549 memcpy(host_data, argptr, guest_data_size);
3550 break;
3551 case DM_TARGET_MSG:
3552 memcpy(host_data, argptr, guest_data_size);
3553 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3554 break;
3555 case DM_TABLE_LOAD:
3557 void *gspec = argptr;
3558 void *cur_data = host_data;
3559 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3560 int spec_size = thunk_type_size(arg_type, 0);
3561 int i;
3563 for (i = 0; i < host_dm->target_count; i++) {
3564 struct dm_target_spec *spec = cur_data;
3565 uint32_t next;
3566 int slen;
3568 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3569 slen = strlen((char*)gspec + spec_size) + 1;
3570 next = spec->next;
3571 spec->next = sizeof(*spec) + slen;
3572 strcpy((char*)&spec[1], gspec + spec_size);
3573 gspec += next;
3574 cur_data += spec->next;
3576 break;
3578 default:
3579 ret = -TARGET_EINVAL;
3580 unlock_user(argptr, guest_data, 0);
3581 goto out;
3583 unlock_user(argptr, guest_data, 0);
3585 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3586 if (!is_error(ret)) {
3587 guest_data = arg + host_dm->data_start;
3588 guest_data_size = host_dm->data_size - host_dm->data_start;
3589 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3590 switch (ie->host_cmd) {
3591 case DM_REMOVE_ALL:
3592 case DM_DEV_CREATE:
3593 case DM_DEV_REMOVE:
3594 case DM_DEV_RENAME:
3595 case DM_DEV_SUSPEND:
3596 case DM_DEV_STATUS:
3597 case DM_TABLE_LOAD:
3598 case DM_TABLE_CLEAR:
3599 case DM_TARGET_MSG:
3600 case DM_DEV_SET_GEOMETRY:
3601 /* no return data */
3602 break;
3603 case DM_LIST_DEVICES:
3605 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3606 uint32_t remaining_data = guest_data_size;
3607 void *cur_data = argptr;
3608 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3609 int nl_size = 12; /* can't use thunk_size due to alignment */
3611 while (1) {
3612 uint32_t next = nl->next;
3613 if (next) {
3614 nl->next = nl_size + (strlen(nl->name) + 1);
3616 if (remaining_data < nl->next) {
3617 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3618 break;
3620 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3621 strcpy(cur_data + nl_size, nl->name);
3622 cur_data += nl->next;
3623 remaining_data -= nl->next;
3624 if (!next) {
3625 break;
3627 nl = (void*)nl + next;
3629 break;
3631 case DM_DEV_WAIT:
3632 case DM_TABLE_STATUS:
3634 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3635 void *cur_data = argptr;
3636 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3637 int spec_size = thunk_type_size(arg_type, 0);
3638 int i;
3640 for (i = 0; i < host_dm->target_count; i++) {
3641 uint32_t next = spec->next;
3642 int slen = strlen((char*)&spec[1]) + 1;
3643 spec->next = (cur_data - argptr) + spec_size + slen;
3644 if (guest_data_size < spec->next) {
3645 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3646 break;
3648 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3649 strcpy(cur_data + spec_size, (char*)&spec[1]);
3650 cur_data = argptr + spec->next;
3651 spec = (void*)host_dm + host_dm->data_start + next;
3653 break;
3655 case DM_TABLE_DEPS:
3657 void *hdata = (void*)host_dm + host_dm->data_start;
3658 int count = *(uint32_t*)hdata;
3659 uint64_t *hdev = hdata + 8;
3660 uint64_t *gdev = argptr + 8;
3661 int i;
3663 *(uint32_t*)argptr = tswap32(count);
3664 for (i = 0; i < count; i++) {
3665 *gdev = tswap64(*hdev);
3666 gdev++;
3667 hdev++;
3669 break;
3671 case DM_LIST_VERSIONS:
3673 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3674 uint32_t remaining_data = guest_data_size;
3675 void *cur_data = argptr;
3676 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3677 int vers_size = thunk_type_size(arg_type, 0);
3679 while (1) {
3680 uint32_t next = vers->next;
3681 if (next) {
3682 vers->next = vers_size + (strlen(vers->name) + 1);
3684 if (remaining_data < vers->next) {
3685 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3686 break;
3688 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3689 strcpy(cur_data + vers_size, vers->name);
3690 cur_data += vers->next;
3691 remaining_data -= vers->next;
3692 if (!next) {
3693 break;
3695 vers = (void*)vers + next;
3697 break;
3699 default:
3700 unlock_user(argptr, guest_data, 0);
3701 ret = -TARGET_EINVAL;
3702 goto out;
3704 unlock_user(argptr, guest_data, guest_data_size);
3706 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3707 if (!argptr) {
3708 ret = -TARGET_EFAULT;
3709 goto out;
3711 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3712 unlock_user(argptr, arg, target_size);
3714 out:
3715 g_free(big_buf);
3716 return ret;
3719 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3720 abi_long cmd, abi_long arg)
3722 void *argptr;
3723 int target_size;
3724 const argtype *arg_type = ie->arg_type;
3725 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3726 abi_long ret;
3728 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3729 struct blkpg_partition host_part;
3731 /* Read and convert blkpg */
3732 arg_type++;
3733 target_size = thunk_type_size(arg_type, 0);
3734 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3735 if (!argptr) {
3736 ret = -TARGET_EFAULT;
3737 goto out;
3739 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3740 unlock_user(argptr, arg, 0);
3742 switch (host_blkpg->op) {
3743 case BLKPG_ADD_PARTITION:
3744 case BLKPG_DEL_PARTITION:
3745 /* payload is struct blkpg_partition */
3746 break;
3747 default:
3748 /* Unknown opcode */
3749 ret = -TARGET_EINVAL;
3750 goto out;
3753 /* Read and convert blkpg->data */
3754 arg = (abi_long)(uintptr_t)host_blkpg->data;
3755 target_size = thunk_type_size(part_arg_type, 0);
3756 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3757 if (!argptr) {
3758 ret = -TARGET_EFAULT;
3759 goto out;
3761 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3762 unlock_user(argptr, arg, 0);
3764 /* Swizzle the data pointer to our local copy and call! */
3765 host_blkpg->data = &host_part;
3766 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3768 out:
3769 return ret;
3772 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3773 int fd, abi_long cmd, abi_long arg)
3775 const argtype *arg_type = ie->arg_type;
3776 const StructEntry *se;
3777 const argtype *field_types;
3778 const int *dst_offsets, *src_offsets;
3779 int target_size;
3780 void *argptr;
3781 abi_ulong *target_rt_dev_ptr;
3782 unsigned long *host_rt_dev_ptr;
3783 abi_long ret;
3784 int i;
3786 assert(ie->access == IOC_W);
3787 assert(*arg_type == TYPE_PTR);
3788 arg_type++;
3789 assert(*arg_type == TYPE_STRUCT);
3790 target_size = thunk_type_size(arg_type, 0);
3791 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3792 if (!argptr) {
3793 return -TARGET_EFAULT;
3795 arg_type++;
3796 assert(*arg_type == (int)STRUCT_rtentry);
3797 se = struct_entries + *arg_type++;
3798 assert(se->convert[0] == NULL);
3799 /* convert struct here to be able to catch rt_dev string */
3800 field_types = se->field_types;
3801 dst_offsets = se->field_offsets[THUNK_HOST];
3802 src_offsets = se->field_offsets[THUNK_TARGET];
3803 for (i = 0; i < se->nb_fields; i++) {
3804 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3805 assert(*field_types == TYPE_PTRVOID);
3806 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3807 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3808 if (*target_rt_dev_ptr != 0) {
3809 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3810 tswapal(*target_rt_dev_ptr));
3811 if (!*host_rt_dev_ptr) {
3812 unlock_user(argptr, arg, 0);
3813 return -TARGET_EFAULT;
3815 } else {
3816 *host_rt_dev_ptr = 0;
3818 field_types++;
3819 continue;
3821 field_types = thunk_convert(buf_temp + dst_offsets[i],
3822 argptr + src_offsets[i],
3823 field_types, THUNK_HOST);
3825 unlock_user(argptr, arg, 0);
3827 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3828 if (*host_rt_dev_ptr != 0) {
3829 unlock_user((void *)*host_rt_dev_ptr,
3830 *target_rt_dev_ptr, 0);
3832 return ret;
3835 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3836 int fd, abi_long cmd, abi_long arg)
3838 int sig = target_to_host_signal(arg);
3839 return get_errno(ioctl(fd, ie->host_cmd, sig));
3842 static IOCTLEntry ioctl_entries[] = {
3843 #define IOCTL(cmd, access, ...) \
3844 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3845 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3846 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3847 #include "ioctls.h"
3848 { 0, 0, },
3851 /* ??? Implement proper locking for ioctls. */
3852 /* do_ioctl() Must return target values and target errnos. */
3853 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3855 const IOCTLEntry *ie;
3856 const argtype *arg_type;
3857 abi_long ret;
3858 uint8_t buf_temp[MAX_STRUCT_SIZE];
3859 int target_size;
3860 void *argptr;
3862 ie = ioctl_entries;
3863 for(;;) {
3864 if (ie->target_cmd == 0) {
3865 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3866 return -TARGET_ENOSYS;
3868 if (ie->target_cmd == cmd)
3869 break;
3870 ie++;
3872 arg_type = ie->arg_type;
3873 #if defined(DEBUG)
3874 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3875 #endif
3876 if (ie->do_ioctl) {
3877 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3880 switch(arg_type[0]) {
3881 case TYPE_NULL:
3882 /* no argument */
3883 ret = get_errno(ioctl(fd, ie->host_cmd));
3884 break;
3885 case TYPE_PTRVOID:
3886 case TYPE_INT:
3887 /* int argment */
3888 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3889 break;
3890 case TYPE_PTR:
3891 arg_type++;
3892 target_size = thunk_type_size(arg_type, 0);
3893 switch(ie->access) {
3894 case IOC_R:
3895 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3896 if (!is_error(ret)) {
3897 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3898 if (!argptr)
3899 return -TARGET_EFAULT;
3900 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3901 unlock_user(argptr, arg, target_size);
3903 break;
3904 case IOC_W:
3905 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3906 if (!argptr)
3907 return -TARGET_EFAULT;
3908 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3909 unlock_user(argptr, arg, 0);
3910 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3911 break;
3912 default:
3913 case IOC_RW:
3914 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3915 if (!argptr)
3916 return -TARGET_EFAULT;
3917 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3918 unlock_user(argptr, arg, 0);
3919 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3920 if (!is_error(ret)) {
3921 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3922 if (!argptr)
3923 return -TARGET_EFAULT;
3924 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3925 unlock_user(argptr, arg, target_size);
3927 break;
3929 break;
3930 default:
3931 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3932 (long)cmd, arg_type[0]);
3933 ret = -TARGET_ENOSYS;
3934 break;
3936 return ret;
3939 static const bitmask_transtbl iflag_tbl[] = {
3940 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3941 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3942 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3943 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3944 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3945 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3946 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3947 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3948 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3949 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3950 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3951 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3952 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3953 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3954 { 0, 0, 0, 0 }
3957 static const bitmask_transtbl oflag_tbl[] = {
3958 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3959 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3960 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3961 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3962 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3963 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3964 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3965 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3966 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3967 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3968 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3969 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3970 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3971 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3972 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3973 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3974 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3975 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3976 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3977 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3978 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3979 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3980 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3981 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3982 { 0, 0, 0, 0 }
3985 static const bitmask_transtbl cflag_tbl[] = {
3986 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3987 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3988 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3989 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3990 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3991 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3992 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3993 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3994 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3995 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3996 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3997 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3998 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3999 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4000 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4001 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4002 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4003 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4004 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4005 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4006 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4007 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4008 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4009 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4010 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4011 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4012 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4013 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4014 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4015 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4016 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4017 { 0, 0, 0, 0 }
4020 static const bitmask_transtbl lflag_tbl[] = {
4021 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4022 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4023 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4024 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4025 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4026 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4027 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4028 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4029 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4030 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4031 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4032 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4033 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4034 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4035 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4036 { 0, 0, 0, 0 }
4039 static void target_to_host_termios (void *dst, const void *src)
4041 struct host_termios *host = dst;
4042 const struct target_termios *target = src;
4044 host->c_iflag =
4045 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4046 host->c_oflag =
4047 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4048 host->c_cflag =
4049 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4050 host->c_lflag =
4051 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4052 host->c_line = target->c_line;
4054 memset(host->c_cc, 0, sizeof(host->c_cc));
4055 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4056 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4057 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4058 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4059 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4060 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4061 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4062 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4063 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4064 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4065 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4066 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4067 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4068 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4069 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4070 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4071 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4074 static void host_to_target_termios (void *dst, const void *src)
4076 struct target_termios *target = dst;
4077 const struct host_termios *host = src;
4079 target->c_iflag =
4080 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4081 target->c_oflag =
4082 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4083 target->c_cflag =
4084 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4085 target->c_lflag =
4086 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4087 target->c_line = host->c_line;
4089 memset(target->c_cc, 0, sizeof(target->c_cc));
4090 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4091 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4092 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4093 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4094 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4095 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4096 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4097 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4098 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4099 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4100 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4101 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4102 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4103 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4104 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4105 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4106 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4109 static const StructEntry struct_termios_def = {
4110 .convert = { host_to_target_termios, target_to_host_termios },
4111 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4112 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4115 static bitmask_transtbl mmap_flags_tbl[] = {
4116 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4117 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4118 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4119 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4120 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4121 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4122 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4123 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4124 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4125 MAP_NORESERVE },
4126 { 0, 0, 0, 0 }
4129 #if defined(TARGET_I386)
4131 /* NOTE: there is really one LDT for all the threads */
4132 static uint8_t *ldt_table;
4134 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4136 int size;
4137 void *p;
4139 if (!ldt_table)
4140 return 0;
4141 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4142 if (size > bytecount)
4143 size = bytecount;
4144 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4145 if (!p)
4146 return -TARGET_EFAULT;
4147 /* ??? Should this by byteswapped? */
4148 memcpy(p, ldt_table, size);
4149 unlock_user(p, ptr, size);
4150 return size;
4153 /* XXX: add locking support */
4154 static abi_long write_ldt(CPUX86State *env,
4155 abi_ulong ptr, unsigned long bytecount, int oldmode)
4157 struct target_modify_ldt_ldt_s ldt_info;
4158 struct target_modify_ldt_ldt_s *target_ldt_info;
4159 int seg_32bit, contents, read_exec_only, limit_in_pages;
4160 int seg_not_present, useable, lm;
4161 uint32_t *lp, entry_1, entry_2;
4163 if (bytecount != sizeof(ldt_info))
4164 return -TARGET_EINVAL;
4165 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4166 return -TARGET_EFAULT;
4167 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4168 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4169 ldt_info.limit = tswap32(target_ldt_info->limit);
4170 ldt_info.flags = tswap32(target_ldt_info->flags);
4171 unlock_user_struct(target_ldt_info, ptr, 0);
4173 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4174 return -TARGET_EINVAL;
4175 seg_32bit = ldt_info.flags & 1;
4176 contents = (ldt_info.flags >> 1) & 3;
4177 read_exec_only = (ldt_info.flags >> 3) & 1;
4178 limit_in_pages = (ldt_info.flags >> 4) & 1;
4179 seg_not_present = (ldt_info.flags >> 5) & 1;
4180 useable = (ldt_info.flags >> 6) & 1;
4181 #ifdef TARGET_ABI32
4182 lm = 0;
4183 #else
4184 lm = (ldt_info.flags >> 7) & 1;
4185 #endif
4186 if (contents == 3) {
4187 if (oldmode)
4188 return -TARGET_EINVAL;
4189 if (seg_not_present == 0)
4190 return -TARGET_EINVAL;
4192 /* allocate the LDT */
4193 if (!ldt_table) {
4194 env->ldt.base = target_mmap(0,
4195 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4196 PROT_READ|PROT_WRITE,
4197 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4198 if (env->ldt.base == -1)
4199 return -TARGET_ENOMEM;
4200 memset(g2h(env->ldt.base), 0,
4201 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4202 env->ldt.limit = 0xffff;
4203 ldt_table = g2h(env->ldt.base);
4206 /* NOTE: same code as Linux kernel */
4207 /* Allow LDTs to be cleared by the user. */
4208 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4209 if (oldmode ||
4210 (contents == 0 &&
4211 read_exec_only == 1 &&
4212 seg_32bit == 0 &&
4213 limit_in_pages == 0 &&
4214 seg_not_present == 1 &&
4215 useable == 0 )) {
4216 entry_1 = 0;
4217 entry_2 = 0;
4218 goto install;
4222 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4223 (ldt_info.limit & 0x0ffff);
4224 entry_2 = (ldt_info.base_addr & 0xff000000) |
4225 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4226 (ldt_info.limit & 0xf0000) |
4227 ((read_exec_only ^ 1) << 9) |
4228 (contents << 10) |
4229 ((seg_not_present ^ 1) << 15) |
4230 (seg_32bit << 22) |
4231 (limit_in_pages << 23) |
4232 (lm << 21) |
4233 0x7000;
4234 if (!oldmode)
4235 entry_2 |= (useable << 20);
4237 /* Install the new entry ... */
4238 install:
4239 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4240 lp[0] = tswap32(entry_1);
4241 lp[1] = tswap32(entry_2);
4242 return 0;
4245 /* specific and weird i386 syscalls */
4246 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4247 unsigned long bytecount)
4249 abi_long ret;
4251 switch (func) {
4252 case 0:
4253 ret = read_ldt(ptr, bytecount);
4254 break;
4255 case 1:
4256 ret = write_ldt(env, ptr, bytecount, 1);
4257 break;
4258 case 0x11:
4259 ret = write_ldt(env, ptr, bytecount, 0);
4260 break;
4261 default:
4262 ret = -TARGET_ENOSYS;
4263 break;
4265 return ret;
4268 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4269 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4271 uint64_t *gdt_table = g2h(env->gdt.base);
4272 struct target_modify_ldt_ldt_s ldt_info;
4273 struct target_modify_ldt_ldt_s *target_ldt_info;
4274 int seg_32bit, contents, read_exec_only, limit_in_pages;
4275 int seg_not_present, useable, lm;
4276 uint32_t *lp, entry_1, entry_2;
4277 int i;
4279 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4280 if (!target_ldt_info)
4281 return -TARGET_EFAULT;
4282 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4283 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4284 ldt_info.limit = tswap32(target_ldt_info->limit);
4285 ldt_info.flags = tswap32(target_ldt_info->flags);
4286 if (ldt_info.entry_number == -1) {
4287 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4288 if (gdt_table[i] == 0) {
4289 ldt_info.entry_number = i;
4290 target_ldt_info->entry_number = tswap32(i);
4291 break;
4295 unlock_user_struct(target_ldt_info, ptr, 1);
4297 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4298 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4299 return -TARGET_EINVAL;
4300 seg_32bit = ldt_info.flags & 1;
4301 contents = (ldt_info.flags >> 1) & 3;
4302 read_exec_only = (ldt_info.flags >> 3) & 1;
4303 limit_in_pages = (ldt_info.flags >> 4) & 1;
4304 seg_not_present = (ldt_info.flags >> 5) & 1;
4305 useable = (ldt_info.flags >> 6) & 1;
4306 #ifdef TARGET_ABI32
4307 lm = 0;
4308 #else
4309 lm = (ldt_info.flags >> 7) & 1;
4310 #endif
4312 if (contents == 3) {
4313 if (seg_not_present == 0)
4314 return -TARGET_EINVAL;
4317 /* NOTE: same code as Linux kernel */
4318 /* Allow LDTs to be cleared by the user. */
4319 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4320 if ((contents == 0 &&
4321 read_exec_only == 1 &&
4322 seg_32bit == 0 &&
4323 limit_in_pages == 0 &&
4324 seg_not_present == 1 &&
4325 useable == 0 )) {
4326 entry_1 = 0;
4327 entry_2 = 0;
4328 goto install;
4332 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4333 (ldt_info.limit & 0x0ffff);
4334 entry_2 = (ldt_info.base_addr & 0xff000000) |
4335 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4336 (ldt_info.limit & 0xf0000) |
4337 ((read_exec_only ^ 1) << 9) |
4338 (contents << 10) |
4339 ((seg_not_present ^ 1) << 15) |
4340 (seg_32bit << 22) |
4341 (limit_in_pages << 23) |
4342 (useable << 20) |
4343 (lm << 21) |
4344 0x7000;
4346 /* Install the new entry ... */
4347 install:
4348 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4349 lp[0] = tswap32(entry_1);
4350 lp[1] = tswap32(entry_2);
4351 return 0;
4354 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4356 struct target_modify_ldt_ldt_s *target_ldt_info;
4357 uint64_t *gdt_table = g2h(env->gdt.base);
4358 uint32_t base_addr, limit, flags;
4359 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4360 int seg_not_present, useable, lm;
4361 uint32_t *lp, entry_1, entry_2;
4363 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4364 if (!target_ldt_info)
4365 return -TARGET_EFAULT;
4366 idx = tswap32(target_ldt_info->entry_number);
4367 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4368 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4369 unlock_user_struct(target_ldt_info, ptr, 1);
4370 return -TARGET_EINVAL;
4372 lp = (uint32_t *)(gdt_table + idx);
4373 entry_1 = tswap32(lp[0]);
4374 entry_2 = tswap32(lp[1]);
4376 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4377 contents = (entry_2 >> 10) & 3;
4378 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4379 seg_32bit = (entry_2 >> 22) & 1;
4380 limit_in_pages = (entry_2 >> 23) & 1;
4381 useable = (entry_2 >> 20) & 1;
4382 #ifdef TARGET_ABI32
4383 lm = 0;
4384 #else
4385 lm = (entry_2 >> 21) & 1;
4386 #endif
4387 flags = (seg_32bit << 0) | (contents << 1) |
4388 (read_exec_only << 3) | (limit_in_pages << 4) |
4389 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4390 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4391 base_addr = (entry_1 >> 16) |
4392 (entry_2 & 0xff000000) |
4393 ((entry_2 & 0xff) << 16);
4394 target_ldt_info->base_addr = tswapal(base_addr);
4395 target_ldt_info->limit = tswap32(limit);
4396 target_ldt_info->flags = tswap32(flags);
4397 unlock_user_struct(target_ldt_info, ptr, 1);
4398 return 0;
4400 #endif /* TARGET_I386 && TARGET_ABI32 */
4402 #ifndef TARGET_ABI32
4403 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4405 abi_long ret = 0;
4406 abi_ulong val;
4407 int idx;
4409 switch(code) {
4410 case TARGET_ARCH_SET_GS:
4411 case TARGET_ARCH_SET_FS:
4412 if (code == TARGET_ARCH_SET_GS)
4413 idx = R_GS;
4414 else
4415 idx = R_FS;
4416 cpu_x86_load_seg(env, idx, 0);
4417 env->segs[idx].base = addr;
4418 break;
4419 case TARGET_ARCH_GET_GS:
4420 case TARGET_ARCH_GET_FS:
4421 if (code == TARGET_ARCH_GET_GS)
4422 idx = R_GS;
4423 else
4424 idx = R_FS;
4425 val = env->segs[idx].base;
4426 if (put_user(val, addr, abi_ulong))
4427 ret = -TARGET_EFAULT;
4428 break;
4429 default:
4430 ret = -TARGET_EINVAL;
4431 break;
4433 return ret;
4435 #endif
4437 #endif /* defined(TARGET_I386) */
4439 #define NEW_STACK_SIZE 0x40000
4442 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4443 typedef struct {
4444 CPUArchState *env;
4445 pthread_mutex_t mutex;
4446 pthread_cond_t cond;
4447 pthread_t thread;
4448 uint32_t tid;
4449 abi_ulong child_tidptr;
4450 abi_ulong parent_tidptr;
4451 sigset_t sigmask;
4452 } new_thread_info;
4454 static void *clone_func(void *arg)
4456 new_thread_info *info = arg;
4457 CPUArchState *env;
4458 CPUState *cpu;
4459 TaskState *ts;
4461 env = info->env;
4462 cpu = ENV_GET_CPU(env);
4463 thread_cpu = cpu;
4464 ts = (TaskState *)cpu->opaque;
4465 info->tid = gettid();
4466 cpu->host_tid = info->tid;
4467 task_settid(ts);
4468 if (info->child_tidptr)
4469 put_user_u32(info->tid, info->child_tidptr);
4470 if (info->parent_tidptr)
4471 put_user_u32(info->tid, info->parent_tidptr);
4472 /* Enable signals. */
4473 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4474 /* Signal to the parent that we're ready. */
4475 pthread_mutex_lock(&info->mutex);
4476 pthread_cond_broadcast(&info->cond);
4477 pthread_mutex_unlock(&info->mutex);
4478 /* Wait until the parent has finshed initializing the tls state. */
4479 pthread_mutex_lock(&clone_lock);
4480 pthread_mutex_unlock(&clone_lock);
4481 cpu_loop(env);
4482 /* never exits */
4483 return NULL;
4486 /* do_fork() Must return host values and target errnos (unlike most
4487 do_*() functions). */
4488 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4489 abi_ulong parent_tidptr, target_ulong newtls,
4490 abi_ulong child_tidptr)
4492 CPUState *cpu = ENV_GET_CPU(env);
4493 int ret;
4494 TaskState *ts;
4495 CPUState *new_cpu;
4496 CPUArchState *new_env;
4497 unsigned int nptl_flags;
4498 sigset_t sigmask;
4500 /* Emulate vfork() with fork() */
4501 if (flags & CLONE_VFORK)
4502 flags &= ~(CLONE_VFORK | CLONE_VM);
4504 if (flags & CLONE_VM) {
4505 TaskState *parent_ts = (TaskState *)cpu->opaque;
4506 new_thread_info info;
4507 pthread_attr_t attr;
4509 ts = g_malloc0(sizeof(TaskState));
4510 init_task_state(ts);
4511 /* we create a new CPU instance. */
4512 new_env = cpu_copy(env);
4513 /* Init regs that differ from the parent. */
4514 cpu_clone_regs(new_env, newsp);
4515 new_cpu = ENV_GET_CPU(new_env);
4516 new_cpu->opaque = ts;
4517 ts->bprm = parent_ts->bprm;
4518 ts->info = parent_ts->info;
4519 nptl_flags = flags;
4520 flags &= ~CLONE_NPTL_FLAGS2;
4522 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4523 ts->child_tidptr = child_tidptr;
4526 if (nptl_flags & CLONE_SETTLS)
4527 cpu_set_tls (new_env, newtls);
4529 /* Grab a mutex so that thread setup appears atomic. */
4530 pthread_mutex_lock(&clone_lock);
4532 memset(&info, 0, sizeof(info));
4533 pthread_mutex_init(&info.mutex, NULL);
4534 pthread_mutex_lock(&info.mutex);
4535 pthread_cond_init(&info.cond, NULL);
4536 info.env = new_env;
4537 if (nptl_flags & CLONE_CHILD_SETTID)
4538 info.child_tidptr = child_tidptr;
4539 if (nptl_flags & CLONE_PARENT_SETTID)
4540 info.parent_tidptr = parent_tidptr;
4542 ret = pthread_attr_init(&attr);
4543 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4544 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4545 /* It is not safe to deliver signals until the child has finished
4546 initializing, so temporarily block all signals. */
4547 sigfillset(&sigmask);
4548 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4550 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4551 /* TODO: Free new CPU state if thread creation failed. */
4553 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4554 pthread_attr_destroy(&attr);
4555 if (ret == 0) {
4556 /* Wait for the child to initialize. */
4557 pthread_cond_wait(&info.cond, &info.mutex);
4558 ret = info.tid;
4559 if (flags & CLONE_PARENT_SETTID)
4560 put_user_u32(ret, parent_tidptr);
4561 } else {
4562 ret = -1;
4564 pthread_mutex_unlock(&info.mutex);
4565 pthread_cond_destroy(&info.cond);
4566 pthread_mutex_destroy(&info.mutex);
4567 pthread_mutex_unlock(&clone_lock);
4568 } else {
4569 /* if no CLONE_VM, we consider it is a fork */
4570 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4571 return -EINVAL;
4572 fork_start();
4573 ret = fork();
4574 if (ret == 0) {
4575 /* Child Process. */
4576 rcu_after_fork();
4577 cpu_clone_regs(env, newsp);
4578 fork_end(1);
4579 /* There is a race condition here. The parent process could
4580 theoretically read the TID in the child process before the child
4581 tid is set. This would require using either ptrace
4582 (not implemented) or having *_tidptr to point at a shared memory
4583 mapping. We can't repeat the spinlock hack used above because
4584 the child process gets its own copy of the lock. */
4585 if (flags & CLONE_CHILD_SETTID)
4586 put_user_u32(gettid(), child_tidptr);
4587 if (flags & CLONE_PARENT_SETTID)
4588 put_user_u32(gettid(), parent_tidptr);
4589 ts = (TaskState *)cpu->opaque;
4590 if (flags & CLONE_SETTLS)
4591 cpu_set_tls (env, newtls);
4592 if (flags & CLONE_CHILD_CLEARTID)
4593 ts->child_tidptr = child_tidptr;
4594 } else {
4595 fork_end(0);
4598 return ret;
4601 /* warning : doesn't handle linux specific flags... */
4602 static int target_to_host_fcntl_cmd(int cmd)
4604 switch(cmd) {
4605 case TARGET_F_DUPFD:
4606 case TARGET_F_GETFD:
4607 case TARGET_F_SETFD:
4608 case TARGET_F_GETFL:
4609 case TARGET_F_SETFL:
4610 return cmd;
4611 case TARGET_F_GETLK:
4612 return F_GETLK;
4613 case TARGET_F_SETLK:
4614 return F_SETLK;
4615 case TARGET_F_SETLKW:
4616 return F_SETLKW;
4617 case TARGET_F_GETOWN:
4618 return F_GETOWN;
4619 case TARGET_F_SETOWN:
4620 return F_SETOWN;
4621 case TARGET_F_GETSIG:
4622 return F_GETSIG;
4623 case TARGET_F_SETSIG:
4624 return F_SETSIG;
4625 #if TARGET_ABI_BITS == 32
4626 case TARGET_F_GETLK64:
4627 return F_GETLK64;
4628 case TARGET_F_SETLK64:
4629 return F_SETLK64;
4630 case TARGET_F_SETLKW64:
4631 return F_SETLKW64;
4632 #endif
4633 case TARGET_F_SETLEASE:
4634 return F_SETLEASE;
4635 case TARGET_F_GETLEASE:
4636 return F_GETLEASE;
4637 #ifdef F_DUPFD_CLOEXEC
4638 case TARGET_F_DUPFD_CLOEXEC:
4639 return F_DUPFD_CLOEXEC;
4640 #endif
4641 case TARGET_F_NOTIFY:
4642 return F_NOTIFY;
4643 #ifdef F_GETOWN_EX
4644 case TARGET_F_GETOWN_EX:
4645 return F_GETOWN_EX;
4646 #endif
4647 #ifdef F_SETOWN_EX
4648 case TARGET_F_SETOWN_EX:
4649 return F_SETOWN_EX;
4650 #endif
4651 default:
4652 return -TARGET_EINVAL;
4654 return -TARGET_EINVAL;
4657 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4658 static const bitmask_transtbl flock_tbl[] = {
4659 TRANSTBL_CONVERT(F_RDLCK),
4660 TRANSTBL_CONVERT(F_WRLCK),
4661 TRANSTBL_CONVERT(F_UNLCK),
4662 TRANSTBL_CONVERT(F_EXLCK),
4663 TRANSTBL_CONVERT(F_SHLCK),
4664 { 0, 0, 0, 0 }
4667 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4669 struct flock fl;
4670 struct target_flock *target_fl;
4671 struct flock64 fl64;
4672 struct target_flock64 *target_fl64;
4673 #ifdef F_GETOWN_EX
4674 struct f_owner_ex fox;
4675 struct target_f_owner_ex *target_fox;
4676 #endif
4677 abi_long ret;
4678 int host_cmd = target_to_host_fcntl_cmd(cmd);
4680 if (host_cmd == -TARGET_EINVAL)
4681 return host_cmd;
4683 switch(cmd) {
4684 case TARGET_F_GETLK:
4685 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4686 return -TARGET_EFAULT;
4687 fl.l_type =
4688 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4689 fl.l_whence = tswap16(target_fl->l_whence);
4690 fl.l_start = tswapal(target_fl->l_start);
4691 fl.l_len = tswapal(target_fl->l_len);
4692 fl.l_pid = tswap32(target_fl->l_pid);
4693 unlock_user_struct(target_fl, arg, 0);
4694 ret = get_errno(fcntl(fd, host_cmd, &fl));
4695 if (ret == 0) {
4696 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4697 return -TARGET_EFAULT;
4698 target_fl->l_type =
4699 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4700 target_fl->l_whence = tswap16(fl.l_whence);
4701 target_fl->l_start = tswapal(fl.l_start);
4702 target_fl->l_len = tswapal(fl.l_len);
4703 target_fl->l_pid = tswap32(fl.l_pid);
4704 unlock_user_struct(target_fl, arg, 1);
4706 break;
4708 case TARGET_F_SETLK:
4709 case TARGET_F_SETLKW:
4710 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4711 return -TARGET_EFAULT;
4712 fl.l_type =
4713 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4714 fl.l_whence = tswap16(target_fl->l_whence);
4715 fl.l_start = tswapal(target_fl->l_start);
4716 fl.l_len = tswapal(target_fl->l_len);
4717 fl.l_pid = tswap32(target_fl->l_pid);
4718 unlock_user_struct(target_fl, arg, 0);
4719 ret = get_errno(fcntl(fd, host_cmd, &fl));
4720 break;
4722 case TARGET_F_GETLK64:
4723 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4724 return -TARGET_EFAULT;
4725 fl64.l_type =
4726 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4727 fl64.l_whence = tswap16(target_fl64->l_whence);
4728 fl64.l_start = tswap64(target_fl64->l_start);
4729 fl64.l_len = tswap64(target_fl64->l_len);
4730 fl64.l_pid = tswap32(target_fl64->l_pid);
4731 unlock_user_struct(target_fl64, arg, 0);
4732 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4733 if (ret == 0) {
4734 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4735 return -TARGET_EFAULT;
4736 target_fl64->l_type =
4737 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4738 target_fl64->l_whence = tswap16(fl64.l_whence);
4739 target_fl64->l_start = tswap64(fl64.l_start);
4740 target_fl64->l_len = tswap64(fl64.l_len);
4741 target_fl64->l_pid = tswap32(fl64.l_pid);
4742 unlock_user_struct(target_fl64, arg, 1);
4744 break;
4745 case TARGET_F_SETLK64:
4746 case TARGET_F_SETLKW64:
4747 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4748 return -TARGET_EFAULT;
4749 fl64.l_type =
4750 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4751 fl64.l_whence = tswap16(target_fl64->l_whence);
4752 fl64.l_start = tswap64(target_fl64->l_start);
4753 fl64.l_len = tswap64(target_fl64->l_len);
4754 fl64.l_pid = tswap32(target_fl64->l_pid);
4755 unlock_user_struct(target_fl64, arg, 0);
4756 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4757 break;
4759 case TARGET_F_GETFL:
4760 ret = get_errno(fcntl(fd, host_cmd, arg));
4761 if (ret >= 0) {
4762 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4764 break;
4766 case TARGET_F_SETFL:
4767 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4768 break;
4770 #ifdef F_GETOWN_EX
4771 case TARGET_F_GETOWN_EX:
4772 ret = get_errno(fcntl(fd, host_cmd, &fox));
4773 if (ret >= 0) {
4774 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4775 return -TARGET_EFAULT;
4776 target_fox->type = tswap32(fox.type);
4777 target_fox->pid = tswap32(fox.pid);
4778 unlock_user_struct(target_fox, arg, 1);
4780 break;
4781 #endif
4783 #ifdef F_SETOWN_EX
4784 case TARGET_F_SETOWN_EX:
4785 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4786 return -TARGET_EFAULT;
4787 fox.type = tswap32(target_fox->type);
4788 fox.pid = tswap32(target_fox->pid);
4789 unlock_user_struct(target_fox, arg, 0);
4790 ret = get_errno(fcntl(fd, host_cmd, &fox));
4791 break;
4792 #endif
4794 case TARGET_F_SETOWN:
4795 case TARGET_F_GETOWN:
4796 case TARGET_F_SETSIG:
4797 case TARGET_F_GETSIG:
4798 case TARGET_F_SETLEASE:
4799 case TARGET_F_GETLEASE:
4800 ret = get_errno(fcntl(fd, host_cmd, arg));
4801 break;
4803 default:
4804 ret = get_errno(fcntl(fd, cmd, arg));
4805 break;
4807 return ret;
4810 #ifdef USE_UID16
4812 static inline int high2lowuid(int uid)
4814 if (uid > 65535)
4815 return 65534;
4816 else
4817 return uid;
4820 static inline int high2lowgid(int gid)
4822 if (gid > 65535)
4823 return 65534;
4824 else
4825 return gid;
4828 static inline int low2highuid(int uid)
4830 if ((int16_t)uid == -1)
4831 return -1;
4832 else
4833 return uid;
4836 static inline int low2highgid(int gid)
4838 if ((int16_t)gid == -1)
4839 return -1;
4840 else
4841 return gid;
4843 static inline int tswapid(int id)
4845 return tswap16(id);
4848 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4850 #else /* !USE_UID16 */
4851 static inline int high2lowuid(int uid)
4853 return uid;
4855 static inline int high2lowgid(int gid)
4857 return gid;
4859 static inline int low2highuid(int uid)
4861 return uid;
4863 static inline int low2highgid(int gid)
4865 return gid;
4867 static inline int tswapid(int id)
4869 return tswap32(id);
4872 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4874 #endif /* USE_UID16 */
4876 void syscall_init(void)
4878 IOCTLEntry *ie;
4879 const argtype *arg_type;
4880 int size;
4881 int i;
4883 thunk_init(STRUCT_MAX);
4885 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4886 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4887 #include "syscall_types.h"
4888 #undef STRUCT
4889 #undef STRUCT_SPECIAL
4891 /* Build target_to_host_errno_table[] table from
4892 * host_to_target_errno_table[]. */
4893 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4894 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4897 /* we patch the ioctl size if necessary. We rely on the fact that
4898 no ioctl has all the bits at '1' in the size field */
4899 ie = ioctl_entries;
4900 while (ie->target_cmd != 0) {
4901 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4902 TARGET_IOC_SIZEMASK) {
4903 arg_type = ie->arg_type;
4904 if (arg_type[0] != TYPE_PTR) {
4905 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4906 ie->target_cmd);
4907 exit(1);
4909 arg_type++;
4910 size = thunk_type_size(arg_type, 0);
4911 ie->target_cmd = (ie->target_cmd &
4912 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4913 (size << TARGET_IOC_SIZESHIFT);
4916 /* automatic consistency check if same arch */
4917 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4918 (defined(__x86_64__) && defined(TARGET_X86_64))
4919 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4920 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4921 ie->name, ie->target_cmd, ie->host_cmd);
4923 #endif
4924 ie++;
4928 #if TARGET_ABI_BITS == 32
4929 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4931 #ifdef TARGET_WORDS_BIGENDIAN
4932 return ((uint64_t)word0 << 32) | word1;
4933 #else
4934 return ((uint64_t)word1 << 32) | word0;
4935 #endif
4937 #else /* TARGET_ABI_BITS == 32 */
4938 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4940 return word0;
4942 #endif /* TARGET_ABI_BITS != 32 */
4944 #ifdef TARGET_NR_truncate64
4945 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4946 abi_long arg2,
4947 abi_long arg3,
4948 abi_long arg4)
4950 if (regpairs_aligned(cpu_env)) {
4951 arg2 = arg3;
4952 arg3 = arg4;
4954 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4956 #endif
4958 #ifdef TARGET_NR_ftruncate64
4959 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4960 abi_long arg2,
4961 abi_long arg3,
4962 abi_long arg4)
4964 if (regpairs_aligned(cpu_env)) {
4965 arg2 = arg3;
4966 arg3 = arg4;
4968 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4970 #endif
4972 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4973 abi_ulong target_addr)
4975 struct target_timespec *target_ts;
4977 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4978 return -TARGET_EFAULT;
4979 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4980 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4981 unlock_user_struct(target_ts, target_addr, 0);
4982 return 0;
4985 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4986 struct timespec *host_ts)
4988 struct target_timespec *target_ts;
4990 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4991 return -TARGET_EFAULT;
4992 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4993 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4994 unlock_user_struct(target_ts, target_addr, 1);
4995 return 0;
4998 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
4999 abi_ulong target_addr)
5001 struct target_itimerspec *target_itspec;
5003 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5004 return -TARGET_EFAULT;
5007 host_itspec->it_interval.tv_sec =
5008 tswapal(target_itspec->it_interval.tv_sec);
5009 host_itspec->it_interval.tv_nsec =
5010 tswapal(target_itspec->it_interval.tv_nsec);
5011 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5012 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5014 unlock_user_struct(target_itspec, target_addr, 1);
5015 return 0;
5018 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5019 struct itimerspec *host_its)
5021 struct target_itimerspec *target_itspec;
5023 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5024 return -TARGET_EFAULT;
5027 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5028 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5030 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5031 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5033 unlock_user_struct(target_itspec, target_addr, 0);
5034 return 0;
5037 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5038 abi_ulong target_addr)
5040 struct target_sigevent *target_sevp;
5042 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5043 return -TARGET_EFAULT;
5046 /* This union is awkward on 64 bit systems because it has a 32 bit
5047 * integer and a pointer in it; we follow the conversion approach
5048 * used for handling sigval types in signal.c so the guest should get
5049 * the correct value back even if we did a 64 bit byteswap and it's
5050 * using the 32 bit integer.
5052 host_sevp->sigev_value.sival_ptr =
5053 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5054 host_sevp->sigev_signo =
5055 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5056 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5057 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5059 unlock_user_struct(target_sevp, target_addr, 1);
5060 return 0;
5063 #if defined(TARGET_NR_mlockall)
5064 static inline int target_to_host_mlockall_arg(int arg)
5066 int result = 0;
5068 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5069 result |= MCL_CURRENT;
5071 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5072 result |= MCL_FUTURE;
5074 return result;
5076 #endif
5078 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5079 static inline abi_long host_to_target_stat64(void *cpu_env,
5080 abi_ulong target_addr,
5081 struct stat *host_st)
5083 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5084 if (((CPUARMState *)cpu_env)->eabi) {
5085 struct target_eabi_stat64 *target_st;
5087 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5088 return -TARGET_EFAULT;
5089 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5090 __put_user(host_st->st_dev, &target_st->st_dev);
5091 __put_user(host_st->st_ino, &target_st->st_ino);
5092 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5093 __put_user(host_st->st_ino, &target_st->__st_ino);
5094 #endif
5095 __put_user(host_st->st_mode, &target_st->st_mode);
5096 __put_user(host_st->st_nlink, &target_st->st_nlink);
5097 __put_user(host_st->st_uid, &target_st->st_uid);
5098 __put_user(host_st->st_gid, &target_st->st_gid);
5099 __put_user(host_st->st_rdev, &target_st->st_rdev);
5100 __put_user(host_st->st_size, &target_st->st_size);
5101 __put_user(host_st->st_blksize, &target_st->st_blksize);
5102 __put_user(host_st->st_blocks, &target_st->st_blocks);
5103 __put_user(host_st->st_atime, &target_st->target_st_atime);
5104 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5105 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5106 unlock_user_struct(target_st, target_addr, 1);
5107 } else
5108 #endif
5110 #if defined(TARGET_HAS_STRUCT_STAT64)
5111 struct target_stat64 *target_st;
5112 #else
5113 struct target_stat *target_st;
5114 #endif
5116 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5117 return -TARGET_EFAULT;
5118 memset(target_st, 0, sizeof(*target_st));
5119 __put_user(host_st->st_dev, &target_st->st_dev);
5120 __put_user(host_st->st_ino, &target_st->st_ino);
5121 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5122 __put_user(host_st->st_ino, &target_st->__st_ino);
5123 #endif
5124 __put_user(host_st->st_mode, &target_st->st_mode);
5125 __put_user(host_st->st_nlink, &target_st->st_nlink);
5126 __put_user(host_st->st_uid, &target_st->st_uid);
5127 __put_user(host_st->st_gid, &target_st->st_gid);
5128 __put_user(host_st->st_rdev, &target_st->st_rdev);
5129 /* XXX: better use of kernel struct */
5130 __put_user(host_st->st_size, &target_st->st_size);
5131 __put_user(host_st->st_blksize, &target_st->st_blksize);
5132 __put_user(host_st->st_blocks, &target_st->st_blocks);
5133 __put_user(host_st->st_atime, &target_st->target_st_atime);
5134 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5135 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5136 unlock_user_struct(target_st, target_addr, 1);
5139 return 0;
5141 #endif
5143 /* ??? Using host futex calls even when target atomic operations
5144 are not really atomic probably breaks things. However implementing
5145 futexes locally would make futexes shared between multiple processes
5146 tricky. However they're probably useless because guest atomic
5147 operations won't work either. */
5148 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5149 target_ulong uaddr2, int val3)
5151 struct timespec ts, *pts;
5152 int base_op;
5154 /* ??? We assume FUTEX_* constants are the same on both host
5155 and target. */
5156 #ifdef FUTEX_CMD_MASK
5157 base_op = op & FUTEX_CMD_MASK;
5158 #else
5159 base_op = op;
5160 #endif
5161 switch (base_op) {
5162 case FUTEX_WAIT:
5163 case FUTEX_WAIT_BITSET:
5164 if (timeout) {
5165 pts = &ts;
5166 target_to_host_timespec(pts, timeout);
5167 } else {
5168 pts = NULL;
5170 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5171 pts, NULL, val3));
5172 case FUTEX_WAKE:
5173 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5174 case FUTEX_FD:
5175 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5176 case FUTEX_REQUEUE:
5177 case FUTEX_CMP_REQUEUE:
5178 case FUTEX_WAKE_OP:
5179 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5180 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5181 But the prototype takes a `struct timespec *'; insert casts
5182 to satisfy the compiler. We do not need to tswap TIMEOUT
5183 since it's not compared to guest memory. */
5184 pts = (struct timespec *)(uintptr_t) timeout;
5185 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5186 g2h(uaddr2),
5187 (base_op == FUTEX_CMP_REQUEUE
5188 ? tswap32(val3)
5189 : val3)));
5190 default:
5191 return -TARGET_ENOSYS;
5195 /* Map host to target signal numbers for the wait family of syscalls.
5196 Assume all other status bits are the same. */
5197 int host_to_target_waitstatus(int status)
5199 if (WIFSIGNALED(status)) {
5200 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5202 if (WIFSTOPPED(status)) {
5203 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5204 | (status & 0xff);
5206 return status;
5209 static int open_self_cmdline(void *cpu_env, int fd)
5211 int fd_orig = -1;
5212 bool word_skipped = false;
5214 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5215 if (fd_orig < 0) {
5216 return fd_orig;
5219 while (true) {
5220 ssize_t nb_read;
5221 char buf[128];
5222 char *cp_buf = buf;
5224 nb_read = read(fd_orig, buf, sizeof(buf));
5225 if (nb_read < 0) {
5226 fd_orig = close(fd_orig);
5227 return -1;
5228 } else if (nb_read == 0) {
5229 break;
5232 if (!word_skipped) {
5233 /* Skip the first string, which is the path to qemu-*-static
5234 instead of the actual command. */
5235 cp_buf = memchr(buf, 0, sizeof(buf));
5236 if (cp_buf) {
5237 /* Null byte found, skip one string */
5238 cp_buf++;
5239 nb_read -= cp_buf - buf;
5240 word_skipped = true;
5244 if (word_skipped) {
5245 if (write(fd, cp_buf, nb_read) != nb_read) {
5246 close(fd_orig);
5247 return -1;
5252 return close(fd_orig);
5255 static int open_self_maps(void *cpu_env, int fd)
5257 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5258 TaskState *ts = cpu->opaque;
5259 FILE *fp;
5260 char *line = NULL;
5261 size_t len = 0;
5262 ssize_t read;
5264 fp = fopen("/proc/self/maps", "r");
5265 if (fp == NULL) {
5266 return -EACCES;
5269 while ((read = getline(&line, &len, fp)) != -1) {
5270 int fields, dev_maj, dev_min, inode;
5271 uint64_t min, max, offset;
5272 char flag_r, flag_w, flag_x, flag_p;
5273 char path[512] = "";
5274 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5275 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5276 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5278 if ((fields < 10) || (fields > 11)) {
5279 continue;
5281 if (h2g_valid(min)) {
5282 int flags = page_get_flags(h2g(min));
5283 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5284 if (page_check_range(h2g(min), max - min, flags) == -1) {
5285 continue;
5287 if (h2g(min) == ts->info->stack_limit) {
5288 pstrcpy(path, sizeof(path), " [stack]");
5290 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5291 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5292 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5293 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5294 path[0] ? " " : "", path);
5298 free(line);
5299 fclose(fp);
5301 return 0;
5304 static int open_self_stat(void *cpu_env, int fd)
5306 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5307 TaskState *ts = cpu->opaque;
5308 abi_ulong start_stack = ts->info->start_stack;
5309 int i;
5311 for (i = 0; i < 44; i++) {
5312 char buf[128];
5313 int len;
5314 uint64_t val = 0;
5316 if (i == 0) {
5317 /* pid */
5318 val = getpid();
5319 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5320 } else if (i == 1) {
5321 /* app name */
5322 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5323 } else if (i == 27) {
5324 /* stack bottom */
5325 val = start_stack;
5326 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5327 } else {
5328 /* for the rest, there is MasterCard */
5329 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5332 len = strlen(buf);
5333 if (write(fd, buf, len) != len) {
5334 return -1;
5338 return 0;
5341 static int open_self_auxv(void *cpu_env, int fd)
5343 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5344 TaskState *ts = cpu->opaque;
5345 abi_ulong auxv = ts->info->saved_auxv;
5346 abi_ulong len = ts->info->auxv_len;
5347 char *ptr;
5350 * Auxiliary vector is stored in target process stack.
5351 * read in whole auxv vector and copy it to file
5353 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5354 if (ptr != NULL) {
5355 while (len > 0) {
5356 ssize_t r;
5357 r = write(fd, ptr, len);
5358 if (r <= 0) {
5359 break;
5361 len -= r;
5362 ptr += r;
5364 lseek(fd, 0, SEEK_SET);
5365 unlock_user(ptr, auxv, len);
5368 return 0;
5371 static int is_proc_myself(const char *filename, const char *entry)
5373 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5374 filename += strlen("/proc/");
5375 if (!strncmp(filename, "self/", strlen("self/"))) {
5376 filename += strlen("self/");
5377 } else if (*filename >= '1' && *filename <= '9') {
5378 char myself[80];
5379 snprintf(myself, sizeof(myself), "%d/", getpid());
5380 if (!strncmp(filename, myself, strlen(myself))) {
5381 filename += strlen(myself);
5382 } else {
5383 return 0;
5385 } else {
5386 return 0;
5388 if (!strcmp(filename, entry)) {
5389 return 1;
5392 return 0;
5395 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5396 static int is_proc(const char *filename, const char *entry)
5398 return strcmp(filename, entry) == 0;
5401 static int open_net_route(void *cpu_env, int fd)
5403 FILE *fp;
5404 char *line = NULL;
5405 size_t len = 0;
5406 ssize_t read;
5408 fp = fopen("/proc/net/route", "r");
5409 if (fp == NULL) {
5410 return -EACCES;
5413 /* read header */
5415 read = getline(&line, &len, fp);
5416 dprintf(fd, "%s", line);
5418 /* read routes */
5420 while ((read = getline(&line, &len, fp)) != -1) {
5421 char iface[16];
5422 uint32_t dest, gw, mask;
5423 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5424 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5425 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5426 &mask, &mtu, &window, &irtt);
5427 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5428 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5429 metric, tswap32(mask), mtu, window, irtt);
5432 free(line);
5433 fclose(fp);
5435 return 0;
5437 #endif
5439 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5441 struct fake_open {
5442 const char *filename;
5443 int (*fill)(void *cpu_env, int fd);
5444 int (*cmp)(const char *s1, const char *s2);
5446 const struct fake_open *fake_open;
5447 static const struct fake_open fakes[] = {
5448 { "maps", open_self_maps, is_proc_myself },
5449 { "stat", open_self_stat, is_proc_myself },
5450 { "auxv", open_self_auxv, is_proc_myself },
5451 { "cmdline", open_self_cmdline, is_proc_myself },
5452 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5453 { "/proc/net/route", open_net_route, is_proc },
5454 #endif
5455 { NULL, NULL, NULL }
5458 if (is_proc_myself(pathname, "exe")) {
5459 int execfd = qemu_getauxval(AT_EXECFD);
5460 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5463 for (fake_open = fakes; fake_open->filename; fake_open++) {
5464 if (fake_open->cmp(pathname, fake_open->filename)) {
5465 break;
5469 if (fake_open->filename) {
5470 const char *tmpdir;
5471 char filename[PATH_MAX];
5472 int fd, r;
5474 /* create temporary file to map stat to */
5475 tmpdir = getenv("TMPDIR");
5476 if (!tmpdir)
5477 tmpdir = "/tmp";
5478 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5479 fd = mkstemp(filename);
5480 if (fd < 0) {
5481 return fd;
5483 unlink(filename);
5485 if ((r = fake_open->fill(cpu_env, fd))) {
5486 close(fd);
5487 return r;
5489 lseek(fd, 0, SEEK_SET);
5491 return fd;
5494 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5497 #define TIMER_MAGIC 0x0caf0000
5498 #define TIMER_MAGIC_MASK 0xffff0000
5500 /* Convert QEMU provided timer ID back to internal 16bit index format */
5501 static target_timer_t get_timer_id(abi_long arg)
5503 target_timer_t timerid = arg;
5505 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5506 return -TARGET_EINVAL;
5509 timerid &= 0xffff;
5511 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5512 return -TARGET_EINVAL;
5515 return timerid;
5518 /* do_syscall() should always have a single exit point at the end so
5519 that actions, such as logging of syscall results, can be performed.
5520 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5521 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5522 abi_long arg2, abi_long arg3, abi_long arg4,
5523 abi_long arg5, abi_long arg6, abi_long arg7,
5524 abi_long arg8)
5526 CPUState *cpu = ENV_GET_CPU(cpu_env);
5527 abi_long ret;
5528 struct stat st;
5529 struct statfs stfs;
5530 void *p;
5532 #ifdef DEBUG
5533 gemu_log("syscall %d", num);
5534 #endif
5535 if(do_strace)
5536 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5538 switch(num) {
5539 case TARGET_NR_exit:
5540 /* In old applications this may be used to implement _exit(2).
5541 However in threaded applictions it is used for thread termination,
5542 and _exit_group is used for application termination.
5543 Do thread termination if we have more then one thread. */
5544 /* FIXME: This probably breaks if a signal arrives. We should probably
5545 be disabling signals. */
5546 if (CPU_NEXT(first_cpu)) {
5547 TaskState *ts;
5549 cpu_list_lock();
5550 /* Remove the CPU from the list. */
5551 QTAILQ_REMOVE(&cpus, cpu, node);
5552 cpu_list_unlock();
5553 ts = cpu->opaque;
5554 if (ts->child_tidptr) {
5555 put_user_u32(0, ts->child_tidptr);
5556 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5557 NULL, NULL, 0);
5559 thread_cpu = NULL;
5560 object_unref(OBJECT(cpu));
5561 g_free(ts);
5562 pthread_exit(NULL);
5564 #ifdef TARGET_GPROF
5565 _mcleanup();
5566 #endif
5567 gdb_exit(cpu_env, arg1);
5568 _exit(arg1);
5569 ret = 0; /* avoid warning */
5570 break;
5571 case TARGET_NR_read:
5572 if (arg3 == 0)
5573 ret = 0;
5574 else {
5575 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5576 goto efault;
5577 ret = get_errno(read(arg1, p, arg3));
5578 unlock_user(p, arg2, ret);
5580 break;
5581 case TARGET_NR_write:
5582 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5583 goto efault;
5584 ret = get_errno(write(arg1, p, arg3));
5585 unlock_user(p, arg2, 0);
5586 break;
5587 case TARGET_NR_open:
5588 if (!(p = lock_user_string(arg1)))
5589 goto efault;
5590 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5591 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5592 arg3));
5593 unlock_user(p, arg1, 0);
5594 break;
5595 case TARGET_NR_openat:
5596 if (!(p = lock_user_string(arg2)))
5597 goto efault;
5598 ret = get_errno(do_openat(cpu_env, arg1, p,
5599 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5600 arg4));
5601 unlock_user(p, arg2, 0);
5602 break;
5603 case TARGET_NR_close:
5604 ret = get_errno(close(arg1));
5605 break;
5606 case TARGET_NR_brk:
5607 ret = do_brk(arg1);
5608 break;
5609 case TARGET_NR_fork:
5610 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5611 break;
5612 #ifdef TARGET_NR_waitpid
5613 case TARGET_NR_waitpid:
5615 int status;
5616 ret = get_errno(waitpid(arg1, &status, arg3));
5617 if (!is_error(ret) && arg2 && ret
5618 && put_user_s32(host_to_target_waitstatus(status), arg2))
5619 goto efault;
5621 break;
5622 #endif
5623 #ifdef TARGET_NR_waitid
5624 case TARGET_NR_waitid:
5626 siginfo_t info;
5627 info.si_pid = 0;
5628 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5629 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5630 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5631 goto efault;
5632 host_to_target_siginfo(p, &info);
5633 unlock_user(p, arg3, sizeof(target_siginfo_t));
5636 break;
5637 #endif
5638 #ifdef TARGET_NR_creat /* not on alpha */
5639 case TARGET_NR_creat:
5640 if (!(p = lock_user_string(arg1)))
5641 goto efault;
5642 ret = get_errno(creat(p, arg2));
5643 unlock_user(p, arg1, 0);
5644 break;
5645 #endif
5646 case TARGET_NR_link:
5648 void * p2;
5649 p = lock_user_string(arg1);
5650 p2 = lock_user_string(arg2);
5651 if (!p || !p2)
5652 ret = -TARGET_EFAULT;
5653 else
5654 ret = get_errno(link(p, p2));
5655 unlock_user(p2, arg2, 0);
5656 unlock_user(p, arg1, 0);
5658 break;
5659 #if defined(TARGET_NR_linkat)
5660 case TARGET_NR_linkat:
5662 void * p2 = NULL;
5663 if (!arg2 || !arg4)
5664 goto efault;
5665 p = lock_user_string(arg2);
5666 p2 = lock_user_string(arg4);
5667 if (!p || !p2)
5668 ret = -TARGET_EFAULT;
5669 else
5670 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5671 unlock_user(p, arg2, 0);
5672 unlock_user(p2, arg4, 0);
5674 break;
5675 #endif
5676 case TARGET_NR_unlink:
5677 if (!(p = lock_user_string(arg1)))
5678 goto efault;
5679 ret = get_errno(unlink(p));
5680 unlock_user(p, arg1, 0);
5681 break;
5682 #if defined(TARGET_NR_unlinkat)
5683 case TARGET_NR_unlinkat:
5684 if (!(p = lock_user_string(arg2)))
5685 goto efault;
5686 ret = get_errno(unlinkat(arg1, p, arg3));
5687 unlock_user(p, arg2, 0);
5688 break;
5689 #endif
5690 case TARGET_NR_execve:
5692 char **argp, **envp;
5693 int argc, envc;
5694 abi_ulong gp;
5695 abi_ulong guest_argp;
5696 abi_ulong guest_envp;
5697 abi_ulong addr;
5698 char **q;
5699 int total_size = 0;
5701 argc = 0;
5702 guest_argp = arg2;
5703 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5704 if (get_user_ual(addr, gp))
5705 goto efault;
5706 if (!addr)
5707 break;
5708 argc++;
5710 envc = 0;
5711 guest_envp = arg3;
5712 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5713 if (get_user_ual(addr, gp))
5714 goto efault;
5715 if (!addr)
5716 break;
5717 envc++;
5720 argp = alloca((argc + 1) * sizeof(void *));
5721 envp = alloca((envc + 1) * sizeof(void *));
5723 for (gp = guest_argp, q = argp; gp;
5724 gp += sizeof(abi_ulong), q++) {
5725 if (get_user_ual(addr, gp))
5726 goto execve_efault;
5727 if (!addr)
5728 break;
5729 if (!(*q = lock_user_string(addr)))
5730 goto execve_efault;
5731 total_size += strlen(*q) + 1;
5733 *q = NULL;
5735 for (gp = guest_envp, q = envp; gp;
5736 gp += sizeof(abi_ulong), q++) {
5737 if (get_user_ual(addr, gp))
5738 goto execve_efault;
5739 if (!addr)
5740 break;
5741 if (!(*q = lock_user_string(addr)))
5742 goto execve_efault;
5743 total_size += strlen(*q) + 1;
5745 *q = NULL;
5747 /* This case will not be caught by the host's execve() if its
5748 page size is bigger than the target's. */
5749 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5750 ret = -TARGET_E2BIG;
5751 goto execve_end;
5753 if (!(p = lock_user_string(arg1)))
5754 goto execve_efault;
5755 ret = get_errno(execve(p, argp, envp));
5756 unlock_user(p, arg1, 0);
5758 goto execve_end;
5760 execve_efault:
5761 ret = -TARGET_EFAULT;
5763 execve_end:
5764 for (gp = guest_argp, q = argp; *q;
5765 gp += sizeof(abi_ulong), q++) {
5766 if (get_user_ual(addr, gp)
5767 || !addr)
5768 break;
5769 unlock_user(*q, addr, 0);
5771 for (gp = guest_envp, q = envp; *q;
5772 gp += sizeof(abi_ulong), q++) {
5773 if (get_user_ual(addr, gp)
5774 || !addr)
5775 break;
5776 unlock_user(*q, addr, 0);
5779 break;
5780 case TARGET_NR_chdir:
5781 if (!(p = lock_user_string(arg1)))
5782 goto efault;
5783 ret = get_errno(chdir(p));
5784 unlock_user(p, arg1, 0);
5785 break;
5786 #ifdef TARGET_NR_time
5787 case TARGET_NR_time:
5789 time_t host_time;
5790 ret = get_errno(time(&host_time));
5791 if (!is_error(ret)
5792 && arg1
5793 && put_user_sal(host_time, arg1))
5794 goto efault;
5796 break;
5797 #endif
5798 case TARGET_NR_mknod:
5799 if (!(p = lock_user_string(arg1)))
5800 goto efault;
5801 ret = get_errno(mknod(p, arg2, arg3));
5802 unlock_user(p, arg1, 0);
5803 break;
5804 #if defined(TARGET_NR_mknodat)
5805 case TARGET_NR_mknodat:
5806 if (!(p = lock_user_string(arg2)))
5807 goto efault;
5808 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5809 unlock_user(p, arg2, 0);
5810 break;
5811 #endif
5812 case TARGET_NR_chmod:
5813 if (!(p = lock_user_string(arg1)))
5814 goto efault;
5815 ret = get_errno(chmod(p, arg2));
5816 unlock_user(p, arg1, 0);
5817 break;
5818 #ifdef TARGET_NR_break
5819 case TARGET_NR_break:
5820 goto unimplemented;
5821 #endif
5822 #ifdef TARGET_NR_oldstat
5823 case TARGET_NR_oldstat:
5824 goto unimplemented;
5825 #endif
5826 case TARGET_NR_lseek:
5827 ret = get_errno(lseek(arg1, arg2, arg3));
5828 break;
5829 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5830 /* Alpha specific */
5831 case TARGET_NR_getxpid:
5832 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5833 ret = get_errno(getpid());
5834 break;
5835 #endif
5836 #ifdef TARGET_NR_getpid
5837 case TARGET_NR_getpid:
5838 ret = get_errno(getpid());
5839 break;
5840 #endif
5841 case TARGET_NR_mount:
5843 /* need to look at the data field */
5844 void *p2, *p3;
5846 if (arg1) {
5847 p = lock_user_string(arg1);
5848 if (!p) {
5849 goto efault;
5851 } else {
5852 p = NULL;
5855 p2 = lock_user_string(arg2);
5856 if (!p2) {
5857 if (arg1) {
5858 unlock_user(p, arg1, 0);
5860 goto efault;
5863 if (arg3) {
5864 p3 = lock_user_string(arg3);
5865 if (!p3) {
5866 if (arg1) {
5867 unlock_user(p, arg1, 0);
5869 unlock_user(p2, arg2, 0);
5870 goto efault;
5872 } else {
5873 p3 = NULL;
5876 /* FIXME - arg5 should be locked, but it isn't clear how to
5877 * do that since it's not guaranteed to be a NULL-terminated
5878 * string.
5880 if (!arg5) {
5881 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
5882 } else {
5883 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
5885 ret = get_errno(ret);
5887 if (arg1) {
5888 unlock_user(p, arg1, 0);
5890 unlock_user(p2, arg2, 0);
5891 if (arg3) {
5892 unlock_user(p3, arg3, 0);
5895 break;
5896 #ifdef TARGET_NR_umount
5897 case TARGET_NR_umount:
5898 if (!(p = lock_user_string(arg1)))
5899 goto efault;
5900 ret = get_errno(umount(p));
5901 unlock_user(p, arg1, 0);
5902 break;
5903 #endif
5904 #ifdef TARGET_NR_stime /* not on alpha */
5905 case TARGET_NR_stime:
5907 time_t host_time;
5908 if (get_user_sal(host_time, arg1))
5909 goto efault;
5910 ret = get_errno(stime(&host_time));
5912 break;
5913 #endif
5914 case TARGET_NR_ptrace:
5915 goto unimplemented;
5916 #ifdef TARGET_NR_alarm /* not on alpha */
5917 case TARGET_NR_alarm:
5918 ret = alarm(arg1);
5919 break;
5920 #endif
5921 #ifdef TARGET_NR_oldfstat
5922 case TARGET_NR_oldfstat:
5923 goto unimplemented;
5924 #endif
5925 #ifdef TARGET_NR_pause /* not on alpha */
5926 case TARGET_NR_pause:
5927 ret = get_errno(pause());
5928 break;
5929 #endif
5930 #ifdef TARGET_NR_utime
5931 case TARGET_NR_utime:
5933 struct utimbuf tbuf, *host_tbuf;
5934 struct target_utimbuf *target_tbuf;
5935 if (arg2) {
5936 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5937 goto efault;
5938 tbuf.actime = tswapal(target_tbuf->actime);
5939 tbuf.modtime = tswapal(target_tbuf->modtime);
5940 unlock_user_struct(target_tbuf, arg2, 0);
5941 host_tbuf = &tbuf;
5942 } else {
5943 host_tbuf = NULL;
5945 if (!(p = lock_user_string(arg1)))
5946 goto efault;
5947 ret = get_errno(utime(p, host_tbuf));
5948 unlock_user(p, arg1, 0);
5950 break;
5951 #endif
5952 case TARGET_NR_utimes:
5954 struct timeval *tvp, tv[2];
5955 if (arg2) {
5956 if (copy_from_user_timeval(&tv[0], arg2)
5957 || copy_from_user_timeval(&tv[1],
5958 arg2 + sizeof(struct target_timeval)))
5959 goto efault;
5960 tvp = tv;
5961 } else {
5962 tvp = NULL;
5964 if (!(p = lock_user_string(arg1)))
5965 goto efault;
5966 ret = get_errno(utimes(p, tvp));
5967 unlock_user(p, arg1, 0);
5969 break;
5970 #if defined(TARGET_NR_futimesat)
5971 case TARGET_NR_futimesat:
5973 struct timeval *tvp, tv[2];
5974 if (arg3) {
5975 if (copy_from_user_timeval(&tv[0], arg3)
5976 || copy_from_user_timeval(&tv[1],
5977 arg3 + sizeof(struct target_timeval)))
5978 goto efault;
5979 tvp = tv;
5980 } else {
5981 tvp = NULL;
5983 if (!(p = lock_user_string(arg2)))
5984 goto efault;
5985 ret = get_errno(futimesat(arg1, path(p), tvp));
5986 unlock_user(p, arg2, 0);
5988 break;
5989 #endif
5990 #ifdef TARGET_NR_stty
5991 case TARGET_NR_stty:
5992 goto unimplemented;
5993 #endif
5994 #ifdef TARGET_NR_gtty
5995 case TARGET_NR_gtty:
5996 goto unimplemented;
5997 #endif
5998 case TARGET_NR_access:
5999 if (!(p = lock_user_string(arg1)))
6000 goto efault;
6001 ret = get_errno(access(path(p), arg2));
6002 unlock_user(p, arg1, 0);
6003 break;
6004 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6005 case TARGET_NR_faccessat:
6006 if (!(p = lock_user_string(arg2)))
6007 goto efault;
6008 ret = get_errno(faccessat(arg1, p, arg3, 0));
6009 unlock_user(p, arg2, 0);
6010 break;
6011 #endif
6012 #ifdef TARGET_NR_nice /* not on alpha */
6013 case TARGET_NR_nice:
6014 ret = get_errno(nice(arg1));
6015 break;
6016 #endif
6017 #ifdef TARGET_NR_ftime
6018 case TARGET_NR_ftime:
6019 goto unimplemented;
6020 #endif
6021 case TARGET_NR_sync:
6022 sync();
6023 ret = 0;
6024 break;
6025 case TARGET_NR_kill:
6026 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6027 break;
6028 case TARGET_NR_rename:
6030 void *p2;
6031 p = lock_user_string(arg1);
6032 p2 = lock_user_string(arg2);
6033 if (!p || !p2)
6034 ret = -TARGET_EFAULT;
6035 else
6036 ret = get_errno(rename(p, p2));
6037 unlock_user(p2, arg2, 0);
6038 unlock_user(p, arg1, 0);
6040 break;
6041 #if defined(TARGET_NR_renameat)
6042 case TARGET_NR_renameat:
6044 void *p2;
6045 p = lock_user_string(arg2);
6046 p2 = lock_user_string(arg4);
6047 if (!p || !p2)
6048 ret = -TARGET_EFAULT;
6049 else
6050 ret = get_errno(renameat(arg1, p, arg3, p2));
6051 unlock_user(p2, arg4, 0);
6052 unlock_user(p, arg2, 0);
6054 break;
6055 #endif
6056 case TARGET_NR_mkdir:
6057 if (!(p = lock_user_string(arg1)))
6058 goto efault;
6059 ret = get_errno(mkdir(p, arg2));
6060 unlock_user(p, arg1, 0);
6061 break;
6062 #if defined(TARGET_NR_mkdirat)
6063 case TARGET_NR_mkdirat:
6064 if (!(p = lock_user_string(arg2)))
6065 goto efault;
6066 ret = get_errno(mkdirat(arg1, p, arg3));
6067 unlock_user(p, arg2, 0);
6068 break;
6069 #endif
6070 case TARGET_NR_rmdir:
6071 if (!(p = lock_user_string(arg1)))
6072 goto efault;
6073 ret = get_errno(rmdir(p));
6074 unlock_user(p, arg1, 0);
6075 break;
6076 case TARGET_NR_dup:
6077 ret = get_errno(dup(arg1));
6078 break;
6079 case TARGET_NR_pipe:
6080 ret = do_pipe(cpu_env, arg1, 0, 0);
6081 break;
6082 #ifdef TARGET_NR_pipe2
6083 case TARGET_NR_pipe2:
6084 ret = do_pipe(cpu_env, arg1,
6085 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6086 break;
6087 #endif
6088 case TARGET_NR_times:
6090 struct target_tms *tmsp;
6091 struct tms tms;
6092 ret = get_errno(times(&tms));
6093 if (arg1) {
6094 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6095 if (!tmsp)
6096 goto efault;
6097 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6098 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6099 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6100 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6102 if (!is_error(ret))
6103 ret = host_to_target_clock_t(ret);
6105 break;
6106 #ifdef TARGET_NR_prof
6107 case TARGET_NR_prof:
6108 goto unimplemented;
6109 #endif
6110 #ifdef TARGET_NR_signal
6111 case TARGET_NR_signal:
6112 goto unimplemented;
6113 #endif
6114 case TARGET_NR_acct:
6115 if (arg1 == 0) {
6116 ret = get_errno(acct(NULL));
6117 } else {
6118 if (!(p = lock_user_string(arg1)))
6119 goto efault;
6120 ret = get_errno(acct(path(p)));
6121 unlock_user(p, arg1, 0);
6123 break;
6124 #ifdef TARGET_NR_umount2
6125 case TARGET_NR_umount2:
6126 if (!(p = lock_user_string(arg1)))
6127 goto efault;
6128 ret = get_errno(umount2(p, arg2));
6129 unlock_user(p, arg1, 0);
6130 break;
6131 #endif
6132 #ifdef TARGET_NR_lock
6133 case TARGET_NR_lock:
6134 goto unimplemented;
6135 #endif
6136 case TARGET_NR_ioctl:
6137 ret = do_ioctl(arg1, arg2, arg3);
6138 break;
6139 case TARGET_NR_fcntl:
6140 ret = do_fcntl(arg1, arg2, arg3);
6141 break;
6142 #ifdef TARGET_NR_mpx
6143 case TARGET_NR_mpx:
6144 goto unimplemented;
6145 #endif
6146 case TARGET_NR_setpgid:
6147 ret = get_errno(setpgid(arg1, arg2));
6148 break;
6149 #ifdef TARGET_NR_ulimit
6150 case TARGET_NR_ulimit:
6151 goto unimplemented;
6152 #endif
6153 #ifdef TARGET_NR_oldolduname
6154 case TARGET_NR_oldolduname:
6155 goto unimplemented;
6156 #endif
6157 case TARGET_NR_umask:
6158 ret = get_errno(umask(arg1));
6159 break;
6160 case TARGET_NR_chroot:
6161 if (!(p = lock_user_string(arg1)))
6162 goto efault;
6163 ret = get_errno(chroot(p));
6164 unlock_user(p, arg1, 0);
6165 break;
6166 case TARGET_NR_ustat:
6167 goto unimplemented;
6168 case TARGET_NR_dup2:
6169 ret = get_errno(dup2(arg1, arg2));
6170 break;
6171 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6172 case TARGET_NR_dup3:
6173 ret = get_errno(dup3(arg1, arg2, arg3));
6174 break;
6175 #endif
6176 #ifdef TARGET_NR_getppid /* not on alpha */
6177 case TARGET_NR_getppid:
6178 ret = get_errno(getppid());
6179 break;
6180 #endif
6181 case TARGET_NR_getpgrp:
6182 ret = get_errno(getpgrp());
6183 break;
6184 case TARGET_NR_setsid:
6185 ret = get_errno(setsid());
6186 break;
6187 #ifdef TARGET_NR_sigaction
6188 case TARGET_NR_sigaction:
6190 #if defined(TARGET_ALPHA)
6191 struct target_sigaction act, oact, *pact = 0;
6192 struct target_old_sigaction *old_act;
6193 if (arg2) {
6194 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6195 goto efault;
6196 act._sa_handler = old_act->_sa_handler;
6197 target_siginitset(&act.sa_mask, old_act->sa_mask);
6198 act.sa_flags = old_act->sa_flags;
6199 act.sa_restorer = 0;
6200 unlock_user_struct(old_act, arg2, 0);
6201 pact = &act;
6203 ret = get_errno(do_sigaction(arg1, pact, &oact));
6204 if (!is_error(ret) && arg3) {
6205 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6206 goto efault;
6207 old_act->_sa_handler = oact._sa_handler;
6208 old_act->sa_mask = oact.sa_mask.sig[0];
6209 old_act->sa_flags = oact.sa_flags;
6210 unlock_user_struct(old_act, arg3, 1);
6212 #elif defined(TARGET_MIPS)
6213 struct target_sigaction act, oact, *pact, *old_act;
6215 if (arg2) {
6216 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6217 goto efault;
6218 act._sa_handler = old_act->_sa_handler;
6219 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6220 act.sa_flags = old_act->sa_flags;
6221 unlock_user_struct(old_act, arg2, 0);
6222 pact = &act;
6223 } else {
6224 pact = NULL;
6227 ret = get_errno(do_sigaction(arg1, pact, &oact));
6229 if (!is_error(ret) && arg3) {
6230 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6231 goto efault;
6232 old_act->_sa_handler = oact._sa_handler;
6233 old_act->sa_flags = oact.sa_flags;
6234 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6235 old_act->sa_mask.sig[1] = 0;
6236 old_act->sa_mask.sig[2] = 0;
6237 old_act->sa_mask.sig[3] = 0;
6238 unlock_user_struct(old_act, arg3, 1);
6240 #else
6241 struct target_old_sigaction *old_act;
6242 struct target_sigaction act, oact, *pact;
6243 if (arg2) {
6244 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6245 goto efault;
6246 act._sa_handler = old_act->_sa_handler;
6247 target_siginitset(&act.sa_mask, old_act->sa_mask);
6248 act.sa_flags = old_act->sa_flags;
6249 act.sa_restorer = old_act->sa_restorer;
6250 unlock_user_struct(old_act, arg2, 0);
6251 pact = &act;
6252 } else {
6253 pact = NULL;
6255 ret = get_errno(do_sigaction(arg1, pact, &oact));
6256 if (!is_error(ret) && arg3) {
6257 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6258 goto efault;
6259 old_act->_sa_handler = oact._sa_handler;
6260 old_act->sa_mask = oact.sa_mask.sig[0];
6261 old_act->sa_flags = oact.sa_flags;
6262 old_act->sa_restorer = oact.sa_restorer;
6263 unlock_user_struct(old_act, arg3, 1);
6265 #endif
6267 break;
6268 #endif
6269 case TARGET_NR_rt_sigaction:
6271 #if defined(TARGET_ALPHA)
6272 struct target_sigaction act, oact, *pact = 0;
6273 struct target_rt_sigaction *rt_act;
6274 /* ??? arg4 == sizeof(sigset_t). */
6275 if (arg2) {
6276 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6277 goto efault;
6278 act._sa_handler = rt_act->_sa_handler;
6279 act.sa_mask = rt_act->sa_mask;
6280 act.sa_flags = rt_act->sa_flags;
6281 act.sa_restorer = arg5;
6282 unlock_user_struct(rt_act, arg2, 0);
6283 pact = &act;
6285 ret = get_errno(do_sigaction(arg1, pact, &oact));
6286 if (!is_error(ret) && arg3) {
6287 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6288 goto efault;
6289 rt_act->_sa_handler = oact._sa_handler;
6290 rt_act->sa_mask = oact.sa_mask;
6291 rt_act->sa_flags = oact.sa_flags;
6292 unlock_user_struct(rt_act, arg3, 1);
6294 #else
6295 struct target_sigaction *act;
6296 struct target_sigaction *oact;
6298 if (arg2) {
6299 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6300 goto efault;
6301 } else
6302 act = NULL;
6303 if (arg3) {
6304 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6305 ret = -TARGET_EFAULT;
6306 goto rt_sigaction_fail;
6308 } else
6309 oact = NULL;
6310 ret = get_errno(do_sigaction(arg1, act, oact));
6311 rt_sigaction_fail:
6312 if (act)
6313 unlock_user_struct(act, arg2, 0);
6314 if (oact)
6315 unlock_user_struct(oact, arg3, 1);
6316 #endif
6318 break;
6319 #ifdef TARGET_NR_sgetmask /* not on alpha */
6320 case TARGET_NR_sgetmask:
6322 sigset_t cur_set;
6323 abi_ulong target_set;
6324 do_sigprocmask(0, NULL, &cur_set);
6325 host_to_target_old_sigset(&target_set, &cur_set);
6326 ret = target_set;
6328 break;
6329 #endif
6330 #ifdef TARGET_NR_ssetmask /* not on alpha */
6331 case TARGET_NR_ssetmask:
6333 sigset_t set, oset, cur_set;
6334 abi_ulong target_set = arg1;
6335 do_sigprocmask(0, NULL, &cur_set);
6336 target_to_host_old_sigset(&set, &target_set);
6337 sigorset(&set, &set, &cur_set);
6338 do_sigprocmask(SIG_SETMASK, &set, &oset);
6339 host_to_target_old_sigset(&target_set, &oset);
6340 ret = target_set;
6342 break;
6343 #endif
6344 #ifdef TARGET_NR_sigprocmask
6345 case TARGET_NR_sigprocmask:
6347 #if defined(TARGET_ALPHA)
6348 sigset_t set, oldset;
6349 abi_ulong mask;
6350 int how;
6352 switch (arg1) {
6353 case TARGET_SIG_BLOCK:
6354 how = SIG_BLOCK;
6355 break;
6356 case TARGET_SIG_UNBLOCK:
6357 how = SIG_UNBLOCK;
6358 break;
6359 case TARGET_SIG_SETMASK:
6360 how = SIG_SETMASK;
6361 break;
6362 default:
6363 ret = -TARGET_EINVAL;
6364 goto fail;
6366 mask = arg2;
6367 target_to_host_old_sigset(&set, &mask);
6369 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6370 if (!is_error(ret)) {
6371 host_to_target_old_sigset(&mask, &oldset);
6372 ret = mask;
6373 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6375 #else
6376 sigset_t set, oldset, *set_ptr;
6377 int how;
6379 if (arg2) {
6380 switch (arg1) {
6381 case TARGET_SIG_BLOCK:
6382 how = SIG_BLOCK;
6383 break;
6384 case TARGET_SIG_UNBLOCK:
6385 how = SIG_UNBLOCK;
6386 break;
6387 case TARGET_SIG_SETMASK:
6388 how = SIG_SETMASK;
6389 break;
6390 default:
6391 ret = -TARGET_EINVAL;
6392 goto fail;
6394 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6395 goto efault;
6396 target_to_host_old_sigset(&set, p);
6397 unlock_user(p, arg2, 0);
6398 set_ptr = &set;
6399 } else {
6400 how = 0;
6401 set_ptr = NULL;
6403 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6404 if (!is_error(ret) && arg3) {
6405 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6406 goto efault;
6407 host_to_target_old_sigset(p, &oldset);
6408 unlock_user(p, arg3, sizeof(target_sigset_t));
6410 #endif
6412 break;
6413 #endif
6414 case TARGET_NR_rt_sigprocmask:
6416 int how = arg1;
6417 sigset_t set, oldset, *set_ptr;
6419 if (arg2) {
6420 switch(how) {
6421 case TARGET_SIG_BLOCK:
6422 how = SIG_BLOCK;
6423 break;
6424 case TARGET_SIG_UNBLOCK:
6425 how = SIG_UNBLOCK;
6426 break;
6427 case TARGET_SIG_SETMASK:
6428 how = SIG_SETMASK;
6429 break;
6430 default:
6431 ret = -TARGET_EINVAL;
6432 goto fail;
6434 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6435 goto efault;
6436 target_to_host_sigset(&set, p);
6437 unlock_user(p, arg2, 0);
6438 set_ptr = &set;
6439 } else {
6440 how = 0;
6441 set_ptr = NULL;
6443 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6444 if (!is_error(ret) && arg3) {
6445 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6446 goto efault;
6447 host_to_target_sigset(p, &oldset);
6448 unlock_user(p, arg3, sizeof(target_sigset_t));
6451 break;
6452 #ifdef TARGET_NR_sigpending
6453 case TARGET_NR_sigpending:
6455 sigset_t set;
6456 ret = get_errno(sigpending(&set));
6457 if (!is_error(ret)) {
6458 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6459 goto efault;
6460 host_to_target_old_sigset(p, &set);
6461 unlock_user(p, arg1, sizeof(target_sigset_t));
6464 break;
6465 #endif
6466 case TARGET_NR_rt_sigpending:
6468 sigset_t set;
6469 ret = get_errno(sigpending(&set));
6470 if (!is_error(ret)) {
6471 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6472 goto efault;
6473 host_to_target_sigset(p, &set);
6474 unlock_user(p, arg1, sizeof(target_sigset_t));
6477 break;
6478 #ifdef TARGET_NR_sigsuspend
6479 case TARGET_NR_sigsuspend:
6481 sigset_t set;
6482 #if defined(TARGET_ALPHA)
6483 abi_ulong mask = arg1;
6484 target_to_host_old_sigset(&set, &mask);
6485 #else
6486 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6487 goto efault;
6488 target_to_host_old_sigset(&set, p);
6489 unlock_user(p, arg1, 0);
6490 #endif
6491 ret = get_errno(sigsuspend(&set));
6493 break;
6494 #endif
6495 case TARGET_NR_rt_sigsuspend:
6497 sigset_t set;
6498 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6499 goto efault;
6500 target_to_host_sigset(&set, p);
6501 unlock_user(p, arg1, 0);
6502 ret = get_errno(sigsuspend(&set));
6504 break;
6505 case TARGET_NR_rt_sigtimedwait:
6507 sigset_t set;
6508 struct timespec uts, *puts;
6509 siginfo_t uinfo;
6511 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6512 goto efault;
6513 target_to_host_sigset(&set, p);
6514 unlock_user(p, arg1, 0);
6515 if (arg3) {
6516 puts = &uts;
6517 target_to_host_timespec(puts, arg3);
6518 } else {
6519 puts = NULL;
6521 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6522 if (!is_error(ret)) {
6523 if (arg2) {
6524 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6526 if (!p) {
6527 goto efault;
6529 host_to_target_siginfo(p, &uinfo);
6530 unlock_user(p, arg2, sizeof(target_siginfo_t));
6532 ret = host_to_target_signal(ret);
6535 break;
6536 case TARGET_NR_rt_sigqueueinfo:
6538 siginfo_t uinfo;
6539 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6540 goto efault;
6541 target_to_host_siginfo(&uinfo, p);
6542 unlock_user(p, arg1, 0);
6543 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6545 break;
6546 #ifdef TARGET_NR_sigreturn
6547 case TARGET_NR_sigreturn:
6548 /* NOTE: ret is eax, so not transcoding must be done */
6549 ret = do_sigreturn(cpu_env);
6550 break;
6551 #endif
6552 case TARGET_NR_rt_sigreturn:
6553 /* NOTE: ret is eax, so not transcoding must be done */
6554 ret = do_rt_sigreturn(cpu_env);
6555 break;
6556 case TARGET_NR_sethostname:
6557 if (!(p = lock_user_string(arg1)))
6558 goto efault;
6559 ret = get_errno(sethostname(p, arg2));
6560 unlock_user(p, arg1, 0);
6561 break;
6562 case TARGET_NR_setrlimit:
6564 int resource = target_to_host_resource(arg1);
6565 struct target_rlimit *target_rlim;
6566 struct rlimit rlim;
6567 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6568 goto efault;
6569 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6570 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6571 unlock_user_struct(target_rlim, arg2, 0);
6572 ret = get_errno(setrlimit(resource, &rlim));
6574 break;
6575 case TARGET_NR_getrlimit:
6577 int resource = target_to_host_resource(arg1);
6578 struct target_rlimit *target_rlim;
6579 struct rlimit rlim;
6581 ret = get_errno(getrlimit(resource, &rlim));
6582 if (!is_error(ret)) {
6583 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6584 goto efault;
6585 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6586 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6587 unlock_user_struct(target_rlim, arg2, 1);
6590 break;
6591 case TARGET_NR_getrusage:
6593 struct rusage rusage;
6594 ret = get_errno(getrusage(arg1, &rusage));
6595 if (!is_error(ret)) {
6596 ret = host_to_target_rusage(arg2, &rusage);
6599 break;
6600 case TARGET_NR_gettimeofday:
6602 struct timeval tv;
6603 ret = get_errno(gettimeofday(&tv, NULL));
6604 if (!is_error(ret)) {
6605 if (copy_to_user_timeval(arg1, &tv))
6606 goto efault;
6609 break;
6610 case TARGET_NR_settimeofday:
6612 struct timeval tv, *ptv = NULL;
6613 struct timezone tz, *ptz = NULL;
6615 if (arg1) {
6616 if (copy_from_user_timeval(&tv, arg1)) {
6617 goto efault;
6619 ptv = &tv;
6622 if (arg2) {
6623 if (copy_from_user_timezone(&tz, arg2)) {
6624 goto efault;
6626 ptz = &tz;
6629 ret = get_errno(settimeofday(ptv, ptz));
6631 break;
6632 #if defined(TARGET_NR_select)
6633 case TARGET_NR_select:
6634 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6635 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6636 #else
6638 struct target_sel_arg_struct *sel;
6639 abi_ulong inp, outp, exp, tvp;
6640 long nsel;
6642 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6643 goto efault;
6644 nsel = tswapal(sel->n);
6645 inp = tswapal(sel->inp);
6646 outp = tswapal(sel->outp);
6647 exp = tswapal(sel->exp);
6648 tvp = tswapal(sel->tvp);
6649 unlock_user_struct(sel, arg1, 0);
6650 ret = do_select(nsel, inp, outp, exp, tvp);
6652 #endif
6653 break;
6654 #endif
6655 #ifdef TARGET_NR_pselect6
6656 case TARGET_NR_pselect6:
6658 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6659 fd_set rfds, wfds, efds;
6660 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6661 struct timespec ts, *ts_ptr;
6664 * The 6th arg is actually two args smashed together,
6665 * so we cannot use the C library.
6667 sigset_t set;
6668 struct {
6669 sigset_t *set;
6670 size_t size;
6671 } sig, *sig_ptr;
6673 abi_ulong arg_sigset, arg_sigsize, *arg7;
6674 target_sigset_t *target_sigset;
6676 n = arg1;
6677 rfd_addr = arg2;
6678 wfd_addr = arg3;
6679 efd_addr = arg4;
6680 ts_addr = arg5;
6682 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6683 if (ret) {
6684 goto fail;
6686 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6687 if (ret) {
6688 goto fail;
6690 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6691 if (ret) {
6692 goto fail;
6696 * This takes a timespec, and not a timeval, so we cannot
6697 * use the do_select() helper ...
6699 if (ts_addr) {
6700 if (target_to_host_timespec(&ts, ts_addr)) {
6701 goto efault;
6703 ts_ptr = &ts;
6704 } else {
6705 ts_ptr = NULL;
6708 /* Extract the two packed args for the sigset */
6709 if (arg6) {
6710 sig_ptr = &sig;
6711 sig.size = _NSIG / 8;
6713 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6714 if (!arg7) {
6715 goto efault;
6717 arg_sigset = tswapal(arg7[0]);
6718 arg_sigsize = tswapal(arg7[1]);
6719 unlock_user(arg7, arg6, 0);
6721 if (arg_sigset) {
6722 sig.set = &set;
6723 if (arg_sigsize != sizeof(*target_sigset)) {
6724 /* Like the kernel, we enforce correct size sigsets */
6725 ret = -TARGET_EINVAL;
6726 goto fail;
6728 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6729 sizeof(*target_sigset), 1);
6730 if (!target_sigset) {
6731 goto efault;
6733 target_to_host_sigset(&set, target_sigset);
6734 unlock_user(target_sigset, arg_sigset, 0);
6735 } else {
6736 sig.set = NULL;
6738 } else {
6739 sig_ptr = NULL;
6742 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6743 ts_ptr, sig_ptr));
6745 if (!is_error(ret)) {
6746 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6747 goto efault;
6748 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6749 goto efault;
6750 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6751 goto efault;
6753 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6754 goto efault;
6757 break;
6758 #endif
6759 case TARGET_NR_symlink:
6761 void *p2;
6762 p = lock_user_string(arg1);
6763 p2 = lock_user_string(arg2);
6764 if (!p || !p2)
6765 ret = -TARGET_EFAULT;
6766 else
6767 ret = get_errno(symlink(p, p2));
6768 unlock_user(p2, arg2, 0);
6769 unlock_user(p, arg1, 0);
6771 break;
6772 #if defined(TARGET_NR_symlinkat)
6773 case TARGET_NR_symlinkat:
6775 void *p2;
6776 p = lock_user_string(arg1);
6777 p2 = lock_user_string(arg3);
6778 if (!p || !p2)
6779 ret = -TARGET_EFAULT;
6780 else
6781 ret = get_errno(symlinkat(p, arg2, p2));
6782 unlock_user(p2, arg3, 0);
6783 unlock_user(p, arg1, 0);
6785 break;
6786 #endif
6787 #ifdef TARGET_NR_oldlstat
6788 case TARGET_NR_oldlstat:
6789 goto unimplemented;
6790 #endif
6791 case TARGET_NR_readlink:
6793 void *p2;
6794 p = lock_user_string(arg1);
6795 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6796 if (!p || !p2) {
6797 ret = -TARGET_EFAULT;
6798 } else if (!arg3) {
6799 /* Short circuit this for the magic exe check. */
6800 ret = -TARGET_EINVAL;
6801 } else if (is_proc_myself((const char *)p, "exe")) {
6802 char real[PATH_MAX], *temp;
6803 temp = realpath(exec_path, real);
6804 /* Return value is # of bytes that we wrote to the buffer. */
6805 if (temp == NULL) {
6806 ret = get_errno(-1);
6807 } else {
6808 /* Don't worry about sign mismatch as earlier mapping
6809 * logic would have thrown a bad address error. */
6810 ret = MIN(strlen(real), arg3);
6811 /* We cannot NUL terminate the string. */
6812 memcpy(p2, real, ret);
6814 } else {
6815 ret = get_errno(readlink(path(p), p2, arg3));
6817 unlock_user(p2, arg2, ret);
6818 unlock_user(p, arg1, 0);
6820 break;
6821 #if defined(TARGET_NR_readlinkat)
6822 case TARGET_NR_readlinkat:
6824 void *p2;
6825 p = lock_user_string(arg2);
6826 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6827 if (!p || !p2) {
6828 ret = -TARGET_EFAULT;
6829 } else if (is_proc_myself((const char *)p, "exe")) {
6830 char real[PATH_MAX], *temp;
6831 temp = realpath(exec_path, real);
6832 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6833 snprintf((char *)p2, arg4, "%s", real);
6834 } else {
6835 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6837 unlock_user(p2, arg3, ret);
6838 unlock_user(p, arg2, 0);
6840 break;
6841 #endif
6842 #ifdef TARGET_NR_uselib
6843 case TARGET_NR_uselib:
6844 goto unimplemented;
6845 #endif
6846 #ifdef TARGET_NR_swapon
6847 case TARGET_NR_swapon:
6848 if (!(p = lock_user_string(arg1)))
6849 goto efault;
6850 ret = get_errno(swapon(p, arg2));
6851 unlock_user(p, arg1, 0);
6852 break;
6853 #endif
6854 case TARGET_NR_reboot:
6855 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6856 /* arg4 must be ignored in all other cases */
6857 p = lock_user_string(arg4);
6858 if (!p) {
6859 goto efault;
6861 ret = get_errno(reboot(arg1, arg2, arg3, p));
6862 unlock_user(p, arg4, 0);
6863 } else {
6864 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6866 break;
6867 #ifdef TARGET_NR_readdir
6868 case TARGET_NR_readdir:
6869 goto unimplemented;
6870 #endif
6871 #ifdef TARGET_NR_mmap
6872 case TARGET_NR_mmap:
6873 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6874 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6875 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6876 || defined(TARGET_S390X)
6878 abi_ulong *v;
6879 abi_ulong v1, v2, v3, v4, v5, v6;
6880 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6881 goto efault;
6882 v1 = tswapal(v[0]);
6883 v2 = tswapal(v[1]);
6884 v3 = tswapal(v[2]);
6885 v4 = tswapal(v[3]);
6886 v5 = tswapal(v[4]);
6887 v6 = tswapal(v[5]);
6888 unlock_user(v, arg1, 0);
6889 ret = get_errno(target_mmap(v1, v2, v3,
6890 target_to_host_bitmask(v4, mmap_flags_tbl),
6891 v5, v6));
6893 #else
6894 ret = get_errno(target_mmap(arg1, arg2, arg3,
6895 target_to_host_bitmask(arg4, mmap_flags_tbl),
6896 arg5,
6897 arg6));
6898 #endif
6899 break;
6900 #endif
6901 #ifdef TARGET_NR_mmap2
6902 case TARGET_NR_mmap2:
6903 #ifndef MMAP_SHIFT
6904 #define MMAP_SHIFT 12
6905 #endif
6906 ret = get_errno(target_mmap(arg1, arg2, arg3,
6907 target_to_host_bitmask(arg4, mmap_flags_tbl),
6908 arg5,
6909 arg6 << MMAP_SHIFT));
6910 break;
6911 #endif
6912 case TARGET_NR_munmap:
6913 ret = get_errno(target_munmap(arg1, arg2));
6914 break;
6915 case TARGET_NR_mprotect:
6917 TaskState *ts = cpu->opaque;
6918 /* Special hack to detect libc making the stack executable. */
6919 if ((arg3 & PROT_GROWSDOWN)
6920 && arg1 >= ts->info->stack_limit
6921 && arg1 <= ts->info->start_stack) {
6922 arg3 &= ~PROT_GROWSDOWN;
6923 arg2 = arg2 + arg1 - ts->info->stack_limit;
6924 arg1 = ts->info->stack_limit;
6927 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6928 break;
6929 #ifdef TARGET_NR_mremap
6930 case TARGET_NR_mremap:
6931 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6932 break;
6933 #endif
6934 /* ??? msync/mlock/munlock are broken for softmmu. */
6935 #ifdef TARGET_NR_msync
6936 case TARGET_NR_msync:
6937 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6938 break;
6939 #endif
6940 #ifdef TARGET_NR_mlock
6941 case TARGET_NR_mlock:
6942 ret = get_errno(mlock(g2h(arg1), arg2));
6943 break;
6944 #endif
6945 #ifdef TARGET_NR_munlock
6946 case TARGET_NR_munlock:
6947 ret = get_errno(munlock(g2h(arg1), arg2));
6948 break;
6949 #endif
6950 #ifdef TARGET_NR_mlockall
6951 case TARGET_NR_mlockall:
6952 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
6953 break;
6954 #endif
6955 #ifdef TARGET_NR_munlockall
6956 case TARGET_NR_munlockall:
6957 ret = get_errno(munlockall());
6958 break;
6959 #endif
6960 case TARGET_NR_truncate:
6961 if (!(p = lock_user_string(arg1)))
6962 goto efault;
6963 ret = get_errno(truncate(p, arg2));
6964 unlock_user(p, arg1, 0);
6965 break;
6966 case TARGET_NR_ftruncate:
6967 ret = get_errno(ftruncate(arg1, arg2));
6968 break;
6969 case TARGET_NR_fchmod:
6970 ret = get_errno(fchmod(arg1, arg2));
6971 break;
6972 #if defined(TARGET_NR_fchmodat)
6973 case TARGET_NR_fchmodat:
6974 if (!(p = lock_user_string(arg2)))
6975 goto efault;
6976 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6977 unlock_user(p, arg2, 0);
6978 break;
6979 #endif
6980 case TARGET_NR_getpriority:
6981 /* Note that negative values are valid for getpriority, so we must
6982 differentiate based on errno settings. */
6983 errno = 0;
6984 ret = getpriority(arg1, arg2);
6985 if (ret == -1 && errno != 0) {
6986 ret = -host_to_target_errno(errno);
6987 break;
6989 #ifdef TARGET_ALPHA
6990 /* Return value is the unbiased priority. Signal no error. */
6991 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6992 #else
6993 /* Return value is a biased priority to avoid negative numbers. */
6994 ret = 20 - ret;
6995 #endif
6996 break;
6997 case TARGET_NR_setpriority:
6998 ret = get_errno(setpriority(arg1, arg2, arg3));
6999 break;
7000 #ifdef TARGET_NR_profil
7001 case TARGET_NR_profil:
7002 goto unimplemented;
7003 #endif
7004 case TARGET_NR_statfs:
7005 if (!(p = lock_user_string(arg1)))
7006 goto efault;
7007 ret = get_errno(statfs(path(p), &stfs));
7008 unlock_user(p, arg1, 0);
7009 convert_statfs:
7010 if (!is_error(ret)) {
7011 struct target_statfs *target_stfs;
7013 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7014 goto efault;
7015 __put_user(stfs.f_type, &target_stfs->f_type);
7016 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7017 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7018 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7019 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7020 __put_user(stfs.f_files, &target_stfs->f_files);
7021 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7022 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7023 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7024 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7025 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7026 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7027 unlock_user_struct(target_stfs, arg2, 1);
7029 break;
7030 case TARGET_NR_fstatfs:
7031 ret = get_errno(fstatfs(arg1, &stfs));
7032 goto convert_statfs;
7033 #ifdef TARGET_NR_statfs64
7034 case TARGET_NR_statfs64:
7035 if (!(p = lock_user_string(arg1)))
7036 goto efault;
7037 ret = get_errno(statfs(path(p), &stfs));
7038 unlock_user(p, arg1, 0);
7039 convert_statfs64:
7040 if (!is_error(ret)) {
7041 struct target_statfs64 *target_stfs;
7043 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7044 goto efault;
7045 __put_user(stfs.f_type, &target_stfs->f_type);
7046 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7047 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7048 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7049 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7050 __put_user(stfs.f_files, &target_stfs->f_files);
7051 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7052 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7053 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7054 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7055 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7056 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7057 unlock_user_struct(target_stfs, arg3, 1);
7059 break;
7060 case TARGET_NR_fstatfs64:
7061 ret = get_errno(fstatfs(arg1, &stfs));
7062 goto convert_statfs64;
7063 #endif
7064 #ifdef TARGET_NR_ioperm
7065 case TARGET_NR_ioperm:
7066 goto unimplemented;
7067 #endif
7068 #ifdef TARGET_NR_socketcall
7069 case TARGET_NR_socketcall:
7070 ret = do_socketcall(arg1, arg2);
7071 break;
7072 #endif
7073 #ifdef TARGET_NR_accept
7074 case TARGET_NR_accept:
7075 ret = do_accept4(arg1, arg2, arg3, 0);
7076 break;
7077 #endif
7078 #ifdef TARGET_NR_accept4
7079 case TARGET_NR_accept4:
7080 #ifdef CONFIG_ACCEPT4
7081 ret = do_accept4(arg1, arg2, arg3, arg4);
7082 #else
7083 goto unimplemented;
7084 #endif
7085 break;
7086 #endif
7087 #ifdef TARGET_NR_bind
7088 case TARGET_NR_bind:
7089 ret = do_bind(arg1, arg2, arg3);
7090 break;
7091 #endif
7092 #ifdef TARGET_NR_connect
7093 case TARGET_NR_connect:
7094 ret = do_connect(arg1, arg2, arg3);
7095 break;
7096 #endif
7097 #ifdef TARGET_NR_getpeername
7098 case TARGET_NR_getpeername:
7099 ret = do_getpeername(arg1, arg2, arg3);
7100 break;
7101 #endif
7102 #ifdef TARGET_NR_getsockname
7103 case TARGET_NR_getsockname:
7104 ret = do_getsockname(arg1, arg2, arg3);
7105 break;
7106 #endif
7107 #ifdef TARGET_NR_getsockopt
7108 case TARGET_NR_getsockopt:
7109 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7110 break;
7111 #endif
7112 #ifdef TARGET_NR_listen
7113 case TARGET_NR_listen:
7114 ret = get_errno(listen(arg1, arg2));
7115 break;
7116 #endif
7117 #ifdef TARGET_NR_recv
7118 case TARGET_NR_recv:
7119 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7120 break;
7121 #endif
7122 #ifdef TARGET_NR_recvfrom
7123 case TARGET_NR_recvfrom:
7124 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7125 break;
7126 #endif
7127 #ifdef TARGET_NR_recvmsg
7128 case TARGET_NR_recvmsg:
7129 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7130 break;
7131 #endif
7132 #ifdef TARGET_NR_send
7133 case TARGET_NR_send:
7134 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7135 break;
7136 #endif
7137 #ifdef TARGET_NR_sendmsg
7138 case TARGET_NR_sendmsg:
7139 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7140 break;
7141 #endif
7142 #ifdef TARGET_NR_sendmmsg
7143 case TARGET_NR_sendmmsg:
7144 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7145 break;
7146 case TARGET_NR_recvmmsg:
7147 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7148 break;
7149 #endif
7150 #ifdef TARGET_NR_sendto
7151 case TARGET_NR_sendto:
7152 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7153 break;
7154 #endif
7155 #ifdef TARGET_NR_shutdown
7156 case TARGET_NR_shutdown:
7157 ret = get_errno(shutdown(arg1, arg2));
7158 break;
7159 #endif
7160 #ifdef TARGET_NR_socket
7161 case TARGET_NR_socket:
7162 ret = do_socket(arg1, arg2, arg3);
7163 break;
7164 #endif
7165 #ifdef TARGET_NR_socketpair
7166 case TARGET_NR_socketpair:
7167 ret = do_socketpair(arg1, arg2, arg3, arg4);
7168 break;
7169 #endif
7170 #ifdef TARGET_NR_setsockopt
7171 case TARGET_NR_setsockopt:
7172 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7173 break;
7174 #endif
7176 case TARGET_NR_syslog:
7177 if (!(p = lock_user_string(arg2)))
7178 goto efault;
7179 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7180 unlock_user(p, arg2, 0);
7181 break;
7183 case TARGET_NR_setitimer:
7185 struct itimerval value, ovalue, *pvalue;
7187 if (arg2) {
7188 pvalue = &value;
7189 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7190 || copy_from_user_timeval(&pvalue->it_value,
7191 arg2 + sizeof(struct target_timeval)))
7192 goto efault;
7193 } else {
7194 pvalue = NULL;
7196 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7197 if (!is_error(ret) && arg3) {
7198 if (copy_to_user_timeval(arg3,
7199 &ovalue.it_interval)
7200 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7201 &ovalue.it_value))
7202 goto efault;
7205 break;
7206 case TARGET_NR_getitimer:
7208 struct itimerval value;
7210 ret = get_errno(getitimer(arg1, &value));
7211 if (!is_error(ret) && arg2) {
7212 if (copy_to_user_timeval(arg2,
7213 &value.it_interval)
7214 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7215 &value.it_value))
7216 goto efault;
7219 break;
7220 case TARGET_NR_stat:
7221 if (!(p = lock_user_string(arg1)))
7222 goto efault;
7223 ret = get_errno(stat(path(p), &st));
7224 unlock_user(p, arg1, 0);
7225 goto do_stat;
7226 case TARGET_NR_lstat:
7227 if (!(p = lock_user_string(arg1)))
7228 goto efault;
7229 ret = get_errno(lstat(path(p), &st));
7230 unlock_user(p, arg1, 0);
7231 goto do_stat;
7232 case TARGET_NR_fstat:
7234 ret = get_errno(fstat(arg1, &st));
7235 do_stat:
7236 if (!is_error(ret)) {
7237 struct target_stat *target_st;
7239 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7240 goto efault;
7241 memset(target_st, 0, sizeof(*target_st));
7242 __put_user(st.st_dev, &target_st->st_dev);
7243 __put_user(st.st_ino, &target_st->st_ino);
7244 __put_user(st.st_mode, &target_st->st_mode);
7245 __put_user(st.st_uid, &target_st->st_uid);
7246 __put_user(st.st_gid, &target_st->st_gid);
7247 __put_user(st.st_nlink, &target_st->st_nlink);
7248 __put_user(st.st_rdev, &target_st->st_rdev);
7249 __put_user(st.st_size, &target_st->st_size);
7250 __put_user(st.st_blksize, &target_st->st_blksize);
7251 __put_user(st.st_blocks, &target_st->st_blocks);
7252 __put_user(st.st_atime, &target_st->target_st_atime);
7253 __put_user(st.st_mtime, &target_st->target_st_mtime);
7254 __put_user(st.st_ctime, &target_st->target_st_ctime);
7255 unlock_user_struct(target_st, arg2, 1);
7258 break;
7259 #ifdef TARGET_NR_olduname
7260 case TARGET_NR_olduname:
7261 goto unimplemented;
7262 #endif
7263 #ifdef TARGET_NR_iopl
7264 case TARGET_NR_iopl:
7265 goto unimplemented;
7266 #endif
7267 case TARGET_NR_vhangup:
7268 ret = get_errno(vhangup());
7269 break;
7270 #ifdef TARGET_NR_idle
7271 case TARGET_NR_idle:
7272 goto unimplemented;
7273 #endif
7274 #ifdef TARGET_NR_syscall
7275 case TARGET_NR_syscall:
7276 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7277 arg6, arg7, arg8, 0);
7278 break;
7279 #endif
7280 case TARGET_NR_wait4:
7282 int status;
7283 abi_long status_ptr = arg2;
7284 struct rusage rusage, *rusage_ptr;
7285 abi_ulong target_rusage = arg4;
7286 abi_long rusage_err;
7287 if (target_rusage)
7288 rusage_ptr = &rusage;
7289 else
7290 rusage_ptr = NULL;
7291 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7292 if (!is_error(ret)) {
7293 if (status_ptr && ret) {
7294 status = host_to_target_waitstatus(status);
7295 if (put_user_s32(status, status_ptr))
7296 goto efault;
7298 if (target_rusage) {
7299 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7300 if (rusage_err) {
7301 ret = rusage_err;
7306 break;
7307 #ifdef TARGET_NR_swapoff
7308 case TARGET_NR_swapoff:
7309 if (!(p = lock_user_string(arg1)))
7310 goto efault;
7311 ret = get_errno(swapoff(p));
7312 unlock_user(p, arg1, 0);
7313 break;
7314 #endif
7315 case TARGET_NR_sysinfo:
7317 struct target_sysinfo *target_value;
7318 struct sysinfo value;
7319 ret = get_errno(sysinfo(&value));
7320 if (!is_error(ret) && arg1)
7322 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7323 goto efault;
7324 __put_user(value.uptime, &target_value->uptime);
7325 __put_user(value.loads[0], &target_value->loads[0]);
7326 __put_user(value.loads[1], &target_value->loads[1]);
7327 __put_user(value.loads[2], &target_value->loads[2]);
7328 __put_user(value.totalram, &target_value->totalram);
7329 __put_user(value.freeram, &target_value->freeram);
7330 __put_user(value.sharedram, &target_value->sharedram);
7331 __put_user(value.bufferram, &target_value->bufferram);
7332 __put_user(value.totalswap, &target_value->totalswap);
7333 __put_user(value.freeswap, &target_value->freeswap);
7334 __put_user(value.procs, &target_value->procs);
7335 __put_user(value.totalhigh, &target_value->totalhigh);
7336 __put_user(value.freehigh, &target_value->freehigh);
7337 __put_user(value.mem_unit, &target_value->mem_unit);
7338 unlock_user_struct(target_value, arg1, 1);
7341 break;
7342 #ifdef TARGET_NR_ipc
7343 case TARGET_NR_ipc:
7344 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7345 break;
7346 #endif
7347 #ifdef TARGET_NR_semget
7348 case TARGET_NR_semget:
7349 ret = get_errno(semget(arg1, arg2, arg3));
7350 break;
7351 #endif
7352 #ifdef TARGET_NR_semop
7353 case TARGET_NR_semop:
7354 ret = do_semop(arg1, arg2, arg3);
7355 break;
7356 #endif
7357 #ifdef TARGET_NR_semctl
7358 case TARGET_NR_semctl:
7359 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7360 break;
7361 #endif
7362 #ifdef TARGET_NR_msgctl
7363 case TARGET_NR_msgctl:
7364 ret = do_msgctl(arg1, arg2, arg3);
7365 break;
7366 #endif
7367 #ifdef TARGET_NR_msgget
7368 case TARGET_NR_msgget:
7369 ret = get_errno(msgget(arg1, arg2));
7370 break;
7371 #endif
7372 #ifdef TARGET_NR_msgrcv
7373 case TARGET_NR_msgrcv:
7374 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7375 break;
7376 #endif
7377 #ifdef TARGET_NR_msgsnd
7378 case TARGET_NR_msgsnd:
7379 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7380 break;
7381 #endif
7382 #ifdef TARGET_NR_shmget
7383 case TARGET_NR_shmget:
7384 ret = get_errno(shmget(arg1, arg2, arg3));
7385 break;
7386 #endif
7387 #ifdef TARGET_NR_shmctl
7388 case TARGET_NR_shmctl:
7389 ret = do_shmctl(arg1, arg2, arg3);
7390 break;
7391 #endif
7392 #ifdef TARGET_NR_shmat
7393 case TARGET_NR_shmat:
7394 ret = do_shmat(arg1, arg2, arg3);
7395 break;
7396 #endif
7397 #ifdef TARGET_NR_shmdt
7398 case TARGET_NR_shmdt:
7399 ret = do_shmdt(arg1);
7400 break;
7401 #endif
7402 case TARGET_NR_fsync:
7403 ret = get_errno(fsync(arg1));
7404 break;
7405 case TARGET_NR_clone:
7406 /* Linux manages to have three different orderings for its
7407 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7408 * match the kernel's CONFIG_CLONE_* settings.
7409 * Microblaze is further special in that it uses a sixth
7410 * implicit argument to clone for the TLS pointer.
7412 #if defined(TARGET_MICROBLAZE)
7413 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7414 #elif defined(TARGET_CLONE_BACKWARDS)
7415 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7416 #elif defined(TARGET_CLONE_BACKWARDS2)
7417 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7418 #else
7419 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7420 #endif
7421 break;
7422 #ifdef __NR_exit_group
7423 /* new thread calls */
7424 case TARGET_NR_exit_group:
7425 #ifdef TARGET_GPROF
7426 _mcleanup();
7427 #endif
7428 gdb_exit(cpu_env, arg1);
7429 ret = get_errno(exit_group(arg1));
7430 break;
7431 #endif
7432 case TARGET_NR_setdomainname:
7433 if (!(p = lock_user_string(arg1)))
7434 goto efault;
7435 ret = get_errno(setdomainname(p, arg2));
7436 unlock_user(p, arg1, 0);
7437 break;
7438 case TARGET_NR_uname:
7439 /* no need to transcode because we use the linux syscall */
7441 struct new_utsname * buf;
7443 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7444 goto efault;
7445 ret = get_errno(sys_uname(buf));
7446 if (!is_error(ret)) {
7447 /* Overrite the native machine name with whatever is being
7448 emulated. */
7449 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7450 /* Allow the user to override the reported release. */
7451 if (qemu_uname_release && *qemu_uname_release)
7452 strcpy (buf->release, qemu_uname_release);
7454 unlock_user_struct(buf, arg1, 1);
7456 break;
7457 #ifdef TARGET_I386
7458 case TARGET_NR_modify_ldt:
7459 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7460 break;
7461 #if !defined(TARGET_X86_64)
7462 case TARGET_NR_vm86old:
7463 goto unimplemented;
7464 case TARGET_NR_vm86:
7465 ret = do_vm86(cpu_env, arg1, arg2);
7466 break;
7467 #endif
7468 #endif
7469 case TARGET_NR_adjtimex:
7470 goto unimplemented;
7471 #ifdef TARGET_NR_create_module
7472 case TARGET_NR_create_module:
7473 #endif
7474 case TARGET_NR_init_module:
7475 case TARGET_NR_delete_module:
7476 #ifdef TARGET_NR_get_kernel_syms
7477 case TARGET_NR_get_kernel_syms:
7478 #endif
7479 goto unimplemented;
7480 case TARGET_NR_quotactl:
7481 goto unimplemented;
7482 case TARGET_NR_getpgid:
7483 ret = get_errno(getpgid(arg1));
7484 break;
7485 case TARGET_NR_fchdir:
7486 ret = get_errno(fchdir(arg1));
7487 break;
7488 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7489 case TARGET_NR_bdflush:
7490 goto unimplemented;
7491 #endif
7492 #ifdef TARGET_NR_sysfs
7493 case TARGET_NR_sysfs:
7494 goto unimplemented;
7495 #endif
7496 case TARGET_NR_personality:
7497 ret = get_errno(personality(arg1));
7498 break;
7499 #ifdef TARGET_NR_afs_syscall
7500 case TARGET_NR_afs_syscall:
7501 goto unimplemented;
7502 #endif
7503 #ifdef TARGET_NR__llseek /* Not on alpha */
7504 case TARGET_NR__llseek:
7506 int64_t res;
7507 #if !defined(__NR_llseek)
7508 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7509 if (res == -1) {
7510 ret = get_errno(res);
7511 } else {
7512 ret = 0;
7514 #else
7515 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7516 #endif
7517 if ((ret == 0) && put_user_s64(res, arg4)) {
7518 goto efault;
7521 break;
7522 #endif
7523 case TARGET_NR_getdents:
7524 #ifdef __NR_getdents
7525 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7527 struct target_dirent *target_dirp;
7528 struct linux_dirent *dirp;
7529 abi_long count = arg3;
7531 dirp = malloc(count);
7532 if (!dirp) {
7533 ret = -TARGET_ENOMEM;
7534 goto fail;
7537 ret = get_errno(sys_getdents(arg1, dirp, count));
7538 if (!is_error(ret)) {
7539 struct linux_dirent *de;
7540 struct target_dirent *tde;
7541 int len = ret;
7542 int reclen, treclen;
7543 int count1, tnamelen;
7545 count1 = 0;
7546 de = dirp;
7547 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7548 goto efault;
7549 tde = target_dirp;
7550 while (len > 0) {
7551 reclen = de->d_reclen;
7552 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7553 assert(tnamelen >= 0);
7554 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7555 assert(count1 + treclen <= count);
7556 tde->d_reclen = tswap16(treclen);
7557 tde->d_ino = tswapal(de->d_ino);
7558 tde->d_off = tswapal(de->d_off);
7559 memcpy(tde->d_name, de->d_name, tnamelen);
7560 de = (struct linux_dirent *)((char *)de + reclen);
7561 len -= reclen;
7562 tde = (struct target_dirent *)((char *)tde + treclen);
7563 count1 += treclen;
7565 ret = count1;
7566 unlock_user(target_dirp, arg2, ret);
7568 free(dirp);
7570 #else
7572 struct linux_dirent *dirp;
7573 abi_long count = arg3;
7575 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7576 goto efault;
7577 ret = get_errno(sys_getdents(arg1, dirp, count));
7578 if (!is_error(ret)) {
7579 struct linux_dirent *de;
7580 int len = ret;
7581 int reclen;
7582 de = dirp;
7583 while (len > 0) {
7584 reclen = de->d_reclen;
7585 if (reclen > len)
7586 break;
7587 de->d_reclen = tswap16(reclen);
7588 tswapls(&de->d_ino);
7589 tswapls(&de->d_off);
7590 de = (struct linux_dirent *)((char *)de + reclen);
7591 len -= reclen;
7594 unlock_user(dirp, arg2, ret);
7596 #endif
7597 #else
7598 /* Implement getdents in terms of getdents64 */
7600 struct linux_dirent64 *dirp;
7601 abi_long count = arg3;
7603 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7604 if (!dirp) {
7605 goto efault;
7607 ret = get_errno(sys_getdents64(arg1, dirp, count));
7608 if (!is_error(ret)) {
7609 /* Convert the dirent64 structs to target dirent. We do this
7610 * in-place, since we can guarantee that a target_dirent is no
7611 * larger than a dirent64; however this means we have to be
7612 * careful to read everything before writing in the new format.
7614 struct linux_dirent64 *de;
7615 struct target_dirent *tde;
7616 int len = ret;
7617 int tlen = 0;
7619 de = dirp;
7620 tde = (struct target_dirent *)dirp;
7621 while (len > 0) {
7622 int namelen, treclen;
7623 int reclen = de->d_reclen;
7624 uint64_t ino = de->d_ino;
7625 int64_t off = de->d_off;
7626 uint8_t type = de->d_type;
7628 namelen = strlen(de->d_name);
7629 treclen = offsetof(struct target_dirent, d_name)
7630 + namelen + 2;
7631 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7633 memmove(tde->d_name, de->d_name, namelen + 1);
7634 tde->d_ino = tswapal(ino);
7635 tde->d_off = tswapal(off);
7636 tde->d_reclen = tswap16(treclen);
7637 /* The target_dirent type is in what was formerly a padding
7638 * byte at the end of the structure:
7640 *(((char *)tde) + treclen - 1) = type;
7642 de = (struct linux_dirent64 *)((char *)de + reclen);
7643 tde = (struct target_dirent *)((char *)tde + treclen);
7644 len -= reclen;
7645 tlen += treclen;
7647 ret = tlen;
7649 unlock_user(dirp, arg2, ret);
7651 #endif
7652 break;
7653 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7654 case TARGET_NR_getdents64:
7656 struct linux_dirent64 *dirp;
7657 abi_long count = arg3;
7658 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7659 goto efault;
7660 ret = get_errno(sys_getdents64(arg1, dirp, count));
7661 if (!is_error(ret)) {
7662 struct linux_dirent64 *de;
7663 int len = ret;
7664 int reclen;
7665 de = dirp;
7666 while (len > 0) {
7667 reclen = de->d_reclen;
7668 if (reclen > len)
7669 break;
7670 de->d_reclen = tswap16(reclen);
7671 tswap64s((uint64_t *)&de->d_ino);
7672 tswap64s((uint64_t *)&de->d_off);
7673 de = (struct linux_dirent64 *)((char *)de + reclen);
7674 len -= reclen;
7677 unlock_user(dirp, arg2, ret);
7679 break;
7680 #endif /* TARGET_NR_getdents64 */
7681 #if defined(TARGET_NR__newselect)
7682 case TARGET_NR__newselect:
7683 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7684 break;
7685 #endif
7686 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7687 # ifdef TARGET_NR_poll
7688 case TARGET_NR_poll:
7689 # endif
7690 # ifdef TARGET_NR_ppoll
7691 case TARGET_NR_ppoll:
7692 # endif
7694 struct target_pollfd *target_pfd;
7695 unsigned int nfds = arg2;
7696 int timeout = arg3;
7697 struct pollfd *pfd;
7698 unsigned int i;
7700 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7701 if (!target_pfd)
7702 goto efault;
7704 pfd = alloca(sizeof(struct pollfd) * nfds);
7705 for(i = 0; i < nfds; i++) {
7706 pfd[i].fd = tswap32(target_pfd[i].fd);
7707 pfd[i].events = tswap16(target_pfd[i].events);
7710 # ifdef TARGET_NR_ppoll
7711 if (num == TARGET_NR_ppoll) {
7712 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7713 target_sigset_t *target_set;
7714 sigset_t _set, *set = &_set;
7716 if (arg3) {
7717 if (target_to_host_timespec(timeout_ts, arg3)) {
7718 unlock_user(target_pfd, arg1, 0);
7719 goto efault;
7721 } else {
7722 timeout_ts = NULL;
7725 if (arg4) {
7726 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7727 if (!target_set) {
7728 unlock_user(target_pfd, arg1, 0);
7729 goto efault;
7731 target_to_host_sigset(set, target_set);
7732 } else {
7733 set = NULL;
7736 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7738 if (!is_error(ret) && arg3) {
7739 host_to_target_timespec(arg3, timeout_ts);
7741 if (arg4) {
7742 unlock_user(target_set, arg4, 0);
7744 } else
7745 # endif
7746 ret = get_errno(poll(pfd, nfds, timeout));
7748 if (!is_error(ret)) {
7749 for(i = 0; i < nfds; i++) {
7750 target_pfd[i].revents = tswap16(pfd[i].revents);
7753 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7755 break;
7756 #endif
7757 case TARGET_NR_flock:
7758 /* NOTE: the flock constant seems to be the same for every
7759 Linux platform */
7760 ret = get_errno(flock(arg1, arg2));
7761 break;
7762 case TARGET_NR_readv:
7764 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7765 if (vec != NULL) {
7766 ret = get_errno(readv(arg1, vec, arg3));
7767 unlock_iovec(vec, arg2, arg3, 1);
7768 } else {
7769 ret = -host_to_target_errno(errno);
7772 break;
7773 case TARGET_NR_writev:
7775 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7776 if (vec != NULL) {
7777 ret = get_errno(writev(arg1, vec, arg3));
7778 unlock_iovec(vec, arg2, arg3, 0);
7779 } else {
7780 ret = -host_to_target_errno(errno);
7783 break;
7784 case TARGET_NR_getsid:
7785 ret = get_errno(getsid(arg1));
7786 break;
7787 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7788 case TARGET_NR_fdatasync:
7789 ret = get_errno(fdatasync(arg1));
7790 break;
7791 #endif
7792 case TARGET_NR__sysctl:
7793 /* We don't implement this, but ENOTDIR is always a safe
7794 return value. */
7795 ret = -TARGET_ENOTDIR;
7796 break;
7797 case TARGET_NR_sched_getaffinity:
7799 unsigned int mask_size;
7800 unsigned long *mask;
7803 * sched_getaffinity needs multiples of ulong, so need to take
7804 * care of mismatches between target ulong and host ulong sizes.
7806 if (arg2 & (sizeof(abi_ulong) - 1)) {
7807 ret = -TARGET_EINVAL;
7808 break;
7810 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7812 mask = alloca(mask_size);
7813 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7815 if (!is_error(ret)) {
7816 if (ret > arg2) {
7817 /* More data returned than the caller's buffer will fit.
7818 * This only happens if sizeof(abi_long) < sizeof(long)
7819 * and the caller passed us a buffer holding an odd number
7820 * of abi_longs. If the host kernel is actually using the
7821 * extra 4 bytes then fail EINVAL; otherwise we can just
7822 * ignore them and only copy the interesting part.
7824 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
7825 if (numcpus > arg2 * 8) {
7826 ret = -TARGET_EINVAL;
7827 break;
7829 ret = arg2;
7832 if (copy_to_user(arg3, mask, ret)) {
7833 goto efault;
7837 break;
7838 case TARGET_NR_sched_setaffinity:
7840 unsigned int mask_size;
7841 unsigned long *mask;
7844 * sched_setaffinity needs multiples of ulong, so need to take
7845 * care of mismatches between target ulong and host ulong sizes.
7847 if (arg2 & (sizeof(abi_ulong) - 1)) {
7848 ret = -TARGET_EINVAL;
7849 break;
7851 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7853 mask = alloca(mask_size);
7854 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7855 goto efault;
7857 memcpy(mask, p, arg2);
7858 unlock_user_struct(p, arg2, 0);
7860 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7862 break;
7863 case TARGET_NR_sched_setparam:
7865 struct sched_param *target_schp;
7866 struct sched_param schp;
7868 if (arg2 == 0) {
7869 return -TARGET_EINVAL;
7871 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7872 goto efault;
7873 schp.sched_priority = tswap32(target_schp->sched_priority);
7874 unlock_user_struct(target_schp, arg2, 0);
7875 ret = get_errno(sched_setparam(arg1, &schp));
7877 break;
7878 case TARGET_NR_sched_getparam:
7880 struct sched_param *target_schp;
7881 struct sched_param schp;
7883 if (arg2 == 0) {
7884 return -TARGET_EINVAL;
7886 ret = get_errno(sched_getparam(arg1, &schp));
7887 if (!is_error(ret)) {
7888 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7889 goto efault;
7890 target_schp->sched_priority = tswap32(schp.sched_priority);
7891 unlock_user_struct(target_schp, arg2, 1);
7894 break;
7895 case TARGET_NR_sched_setscheduler:
7897 struct sched_param *target_schp;
7898 struct sched_param schp;
7899 if (arg3 == 0) {
7900 return -TARGET_EINVAL;
7902 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7903 goto efault;
7904 schp.sched_priority = tswap32(target_schp->sched_priority);
7905 unlock_user_struct(target_schp, arg3, 0);
7906 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7908 break;
7909 case TARGET_NR_sched_getscheduler:
7910 ret = get_errno(sched_getscheduler(arg1));
7911 break;
7912 case TARGET_NR_sched_yield:
7913 ret = get_errno(sched_yield());
7914 break;
7915 case TARGET_NR_sched_get_priority_max:
7916 ret = get_errno(sched_get_priority_max(arg1));
7917 break;
7918 case TARGET_NR_sched_get_priority_min:
7919 ret = get_errno(sched_get_priority_min(arg1));
7920 break;
7921 case TARGET_NR_sched_rr_get_interval:
7923 struct timespec ts;
7924 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7925 if (!is_error(ret)) {
7926 ret = host_to_target_timespec(arg2, &ts);
7929 break;
7930 case TARGET_NR_nanosleep:
7932 struct timespec req, rem;
7933 target_to_host_timespec(&req, arg1);
7934 ret = get_errno(nanosleep(&req, &rem));
7935 if (is_error(ret) && arg2) {
7936 host_to_target_timespec(arg2, &rem);
7939 break;
7940 #ifdef TARGET_NR_query_module
7941 case TARGET_NR_query_module:
7942 goto unimplemented;
7943 #endif
7944 #ifdef TARGET_NR_nfsservctl
7945 case TARGET_NR_nfsservctl:
7946 goto unimplemented;
7947 #endif
7948 case TARGET_NR_prctl:
7949 switch (arg1) {
7950 case PR_GET_PDEATHSIG:
7952 int deathsig;
7953 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7954 if (!is_error(ret) && arg2
7955 && put_user_ual(deathsig, arg2)) {
7956 goto efault;
7958 break;
7960 #ifdef PR_GET_NAME
7961 case PR_GET_NAME:
7963 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7964 if (!name) {
7965 goto efault;
7967 ret = get_errno(prctl(arg1, (unsigned long)name,
7968 arg3, arg4, arg5));
7969 unlock_user(name, arg2, 16);
7970 break;
7972 case PR_SET_NAME:
7974 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7975 if (!name) {
7976 goto efault;
7978 ret = get_errno(prctl(arg1, (unsigned long)name,
7979 arg3, arg4, arg5));
7980 unlock_user(name, arg2, 0);
7981 break;
7983 #endif
7984 default:
7985 /* Most prctl options have no pointer arguments */
7986 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7987 break;
7989 break;
7990 #ifdef TARGET_NR_arch_prctl
7991 case TARGET_NR_arch_prctl:
7992 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7993 ret = do_arch_prctl(cpu_env, arg1, arg2);
7994 break;
7995 #else
7996 goto unimplemented;
7997 #endif
7998 #endif
7999 #ifdef TARGET_NR_pread64
8000 case TARGET_NR_pread64:
8001 if (regpairs_aligned(cpu_env)) {
8002 arg4 = arg5;
8003 arg5 = arg6;
8005 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8006 goto efault;
8007 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8008 unlock_user(p, arg2, ret);
8009 break;
8010 case TARGET_NR_pwrite64:
8011 if (regpairs_aligned(cpu_env)) {
8012 arg4 = arg5;
8013 arg5 = arg6;
8015 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8016 goto efault;
8017 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8018 unlock_user(p, arg2, 0);
8019 break;
8020 #endif
8021 case TARGET_NR_getcwd:
8022 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8023 goto efault;
8024 ret = get_errno(sys_getcwd1(p, arg2));
8025 unlock_user(p, arg1, ret);
8026 break;
8027 case TARGET_NR_capget:
8028 case TARGET_NR_capset:
8030 struct target_user_cap_header *target_header;
8031 struct target_user_cap_data *target_data = NULL;
8032 struct __user_cap_header_struct header;
8033 struct __user_cap_data_struct data[2];
8034 struct __user_cap_data_struct *dataptr = NULL;
8035 int i, target_datalen;
8036 int data_items = 1;
8038 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8039 goto efault;
8041 header.version = tswap32(target_header->version);
8042 header.pid = tswap32(target_header->pid);
8044 if (header.version != _LINUX_CAPABILITY_VERSION) {
8045 /* Version 2 and up takes pointer to two user_data structs */
8046 data_items = 2;
8049 target_datalen = sizeof(*target_data) * data_items;
8051 if (arg2) {
8052 if (num == TARGET_NR_capget) {
8053 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8054 } else {
8055 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8057 if (!target_data) {
8058 unlock_user_struct(target_header, arg1, 0);
8059 goto efault;
8062 if (num == TARGET_NR_capset) {
8063 for (i = 0; i < data_items; i++) {
8064 data[i].effective = tswap32(target_data[i].effective);
8065 data[i].permitted = tswap32(target_data[i].permitted);
8066 data[i].inheritable = tswap32(target_data[i].inheritable);
8070 dataptr = data;
8073 if (num == TARGET_NR_capget) {
8074 ret = get_errno(capget(&header, dataptr));
8075 } else {
8076 ret = get_errno(capset(&header, dataptr));
8079 /* The kernel always updates version for both capget and capset */
8080 target_header->version = tswap32(header.version);
8081 unlock_user_struct(target_header, arg1, 1);
8083 if (arg2) {
8084 if (num == TARGET_NR_capget) {
8085 for (i = 0; i < data_items; i++) {
8086 target_data[i].effective = tswap32(data[i].effective);
8087 target_data[i].permitted = tswap32(data[i].permitted);
8088 target_data[i].inheritable = tswap32(data[i].inheritable);
8090 unlock_user(target_data, arg2, target_datalen);
8091 } else {
8092 unlock_user(target_data, arg2, 0);
8095 break;
8097 case TARGET_NR_sigaltstack:
8098 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8099 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8100 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8101 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8102 break;
8103 #else
8104 goto unimplemented;
8105 #endif
8107 #ifdef CONFIG_SENDFILE
8108 case TARGET_NR_sendfile:
8110 off_t *offp = NULL;
8111 off_t off;
8112 if (arg3) {
8113 ret = get_user_sal(off, arg3);
8114 if (is_error(ret)) {
8115 break;
8117 offp = &off;
8119 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8120 if (!is_error(ret) && arg3) {
8121 abi_long ret2 = put_user_sal(off, arg3);
8122 if (is_error(ret2)) {
8123 ret = ret2;
8126 break;
8128 #ifdef TARGET_NR_sendfile64
8129 case TARGET_NR_sendfile64:
8131 off_t *offp = NULL;
8132 off_t off;
8133 if (arg3) {
8134 ret = get_user_s64(off, arg3);
8135 if (is_error(ret)) {
8136 break;
8138 offp = &off;
8140 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8141 if (!is_error(ret) && arg3) {
8142 abi_long ret2 = put_user_s64(off, arg3);
8143 if (is_error(ret2)) {
8144 ret = ret2;
8147 break;
8149 #endif
8150 #else
8151 case TARGET_NR_sendfile:
8152 #ifdef TARGET_NR_sendfile64
8153 case TARGET_NR_sendfile64:
8154 #endif
8155 goto unimplemented;
8156 #endif
8158 #ifdef TARGET_NR_getpmsg
8159 case TARGET_NR_getpmsg:
8160 goto unimplemented;
8161 #endif
8162 #ifdef TARGET_NR_putpmsg
8163 case TARGET_NR_putpmsg:
8164 goto unimplemented;
8165 #endif
8166 #ifdef TARGET_NR_vfork
8167 case TARGET_NR_vfork:
8168 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8169 0, 0, 0, 0));
8170 break;
8171 #endif
8172 #ifdef TARGET_NR_ugetrlimit
8173 case TARGET_NR_ugetrlimit:
8175 struct rlimit rlim;
8176 int resource = target_to_host_resource(arg1);
8177 ret = get_errno(getrlimit(resource, &rlim));
8178 if (!is_error(ret)) {
8179 struct target_rlimit *target_rlim;
8180 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8181 goto efault;
8182 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8183 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8184 unlock_user_struct(target_rlim, arg2, 1);
8186 break;
8188 #endif
8189 #ifdef TARGET_NR_truncate64
8190 case TARGET_NR_truncate64:
8191 if (!(p = lock_user_string(arg1)))
8192 goto efault;
8193 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8194 unlock_user(p, arg1, 0);
8195 break;
8196 #endif
8197 #ifdef TARGET_NR_ftruncate64
8198 case TARGET_NR_ftruncate64:
8199 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8200 break;
8201 #endif
8202 #ifdef TARGET_NR_stat64
8203 case TARGET_NR_stat64:
8204 if (!(p = lock_user_string(arg1)))
8205 goto efault;
8206 ret = get_errno(stat(path(p), &st));
8207 unlock_user(p, arg1, 0);
8208 if (!is_error(ret))
8209 ret = host_to_target_stat64(cpu_env, arg2, &st);
8210 break;
8211 #endif
8212 #ifdef TARGET_NR_lstat64
8213 case TARGET_NR_lstat64:
8214 if (!(p = lock_user_string(arg1)))
8215 goto efault;
8216 ret = get_errno(lstat(path(p), &st));
8217 unlock_user(p, arg1, 0);
8218 if (!is_error(ret))
8219 ret = host_to_target_stat64(cpu_env, arg2, &st);
8220 break;
8221 #endif
8222 #ifdef TARGET_NR_fstat64
8223 case TARGET_NR_fstat64:
8224 ret = get_errno(fstat(arg1, &st));
8225 if (!is_error(ret))
8226 ret = host_to_target_stat64(cpu_env, arg2, &st);
8227 break;
8228 #endif
8229 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8230 #ifdef TARGET_NR_fstatat64
8231 case TARGET_NR_fstatat64:
8232 #endif
8233 #ifdef TARGET_NR_newfstatat
8234 case TARGET_NR_newfstatat:
8235 #endif
8236 if (!(p = lock_user_string(arg2)))
8237 goto efault;
8238 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8239 if (!is_error(ret))
8240 ret = host_to_target_stat64(cpu_env, arg3, &st);
8241 break;
8242 #endif
8243 case TARGET_NR_lchown:
8244 if (!(p = lock_user_string(arg1)))
8245 goto efault;
8246 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8247 unlock_user(p, arg1, 0);
8248 break;
8249 #ifdef TARGET_NR_getuid
8250 case TARGET_NR_getuid:
8251 ret = get_errno(high2lowuid(getuid()));
8252 break;
8253 #endif
8254 #ifdef TARGET_NR_getgid
8255 case TARGET_NR_getgid:
8256 ret = get_errno(high2lowgid(getgid()));
8257 break;
8258 #endif
8259 #ifdef TARGET_NR_geteuid
8260 case TARGET_NR_geteuid:
8261 ret = get_errno(high2lowuid(geteuid()));
8262 break;
8263 #endif
8264 #ifdef TARGET_NR_getegid
8265 case TARGET_NR_getegid:
8266 ret = get_errno(high2lowgid(getegid()));
8267 break;
8268 #endif
8269 case TARGET_NR_setreuid:
8270 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8271 break;
8272 case TARGET_NR_setregid:
8273 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8274 break;
8275 case TARGET_NR_getgroups:
8277 int gidsetsize = arg1;
8278 target_id *target_grouplist;
8279 gid_t *grouplist;
8280 int i;
8282 grouplist = alloca(gidsetsize * sizeof(gid_t));
8283 ret = get_errno(getgroups(gidsetsize, grouplist));
8284 if (gidsetsize == 0)
8285 break;
8286 if (!is_error(ret)) {
8287 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8288 if (!target_grouplist)
8289 goto efault;
8290 for(i = 0;i < ret; i++)
8291 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8292 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8295 break;
8296 case TARGET_NR_setgroups:
8298 int gidsetsize = arg1;
8299 target_id *target_grouplist;
8300 gid_t *grouplist = NULL;
8301 int i;
8302 if (gidsetsize) {
8303 grouplist = alloca(gidsetsize * sizeof(gid_t));
8304 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8305 if (!target_grouplist) {
8306 ret = -TARGET_EFAULT;
8307 goto fail;
8309 for (i = 0; i < gidsetsize; i++) {
8310 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8312 unlock_user(target_grouplist, arg2, 0);
8314 ret = get_errno(setgroups(gidsetsize, grouplist));
8316 break;
8317 case TARGET_NR_fchown:
8318 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8319 break;
8320 #if defined(TARGET_NR_fchownat)
8321 case TARGET_NR_fchownat:
8322 if (!(p = lock_user_string(arg2)))
8323 goto efault;
8324 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8325 low2highgid(arg4), arg5));
8326 unlock_user(p, arg2, 0);
8327 break;
8328 #endif
8329 #ifdef TARGET_NR_setresuid
8330 case TARGET_NR_setresuid:
8331 ret = get_errno(setresuid(low2highuid(arg1),
8332 low2highuid(arg2),
8333 low2highuid(arg3)));
8334 break;
8335 #endif
8336 #ifdef TARGET_NR_getresuid
8337 case TARGET_NR_getresuid:
8339 uid_t ruid, euid, suid;
8340 ret = get_errno(getresuid(&ruid, &euid, &suid));
8341 if (!is_error(ret)) {
8342 if (put_user_id(high2lowuid(ruid), arg1)
8343 || put_user_id(high2lowuid(euid), arg2)
8344 || put_user_id(high2lowuid(suid), arg3))
8345 goto efault;
8348 break;
8349 #endif
8350 #ifdef TARGET_NR_getresgid
8351 case TARGET_NR_setresgid:
8352 ret = get_errno(setresgid(low2highgid(arg1),
8353 low2highgid(arg2),
8354 low2highgid(arg3)));
8355 break;
8356 #endif
8357 #ifdef TARGET_NR_getresgid
8358 case TARGET_NR_getresgid:
8360 gid_t rgid, egid, sgid;
8361 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8362 if (!is_error(ret)) {
8363 if (put_user_id(high2lowgid(rgid), arg1)
8364 || put_user_id(high2lowgid(egid), arg2)
8365 || put_user_id(high2lowgid(sgid), arg3))
8366 goto efault;
8369 break;
8370 #endif
8371 case TARGET_NR_chown:
8372 if (!(p = lock_user_string(arg1)))
8373 goto efault;
8374 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8375 unlock_user(p, arg1, 0);
8376 break;
8377 case TARGET_NR_setuid:
8378 ret = get_errno(setuid(low2highuid(arg1)));
8379 break;
8380 case TARGET_NR_setgid:
8381 ret = get_errno(setgid(low2highgid(arg1)));
8382 break;
8383 case TARGET_NR_setfsuid:
8384 ret = get_errno(setfsuid(arg1));
8385 break;
8386 case TARGET_NR_setfsgid:
8387 ret = get_errno(setfsgid(arg1));
8388 break;
8390 #ifdef TARGET_NR_lchown32
8391 case TARGET_NR_lchown32:
8392 if (!(p = lock_user_string(arg1)))
8393 goto efault;
8394 ret = get_errno(lchown(p, arg2, arg3));
8395 unlock_user(p, arg1, 0);
8396 break;
8397 #endif
8398 #ifdef TARGET_NR_getuid32
8399 case TARGET_NR_getuid32:
8400 ret = get_errno(getuid());
8401 break;
8402 #endif
8404 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8405 /* Alpha specific */
8406 case TARGET_NR_getxuid:
8408 uid_t euid;
8409 euid=geteuid();
8410 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8412 ret = get_errno(getuid());
8413 break;
8414 #endif
8415 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8416 /* Alpha specific */
8417 case TARGET_NR_getxgid:
8419 uid_t egid;
8420 egid=getegid();
8421 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8423 ret = get_errno(getgid());
8424 break;
8425 #endif
8426 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8427 /* Alpha specific */
8428 case TARGET_NR_osf_getsysinfo:
8429 ret = -TARGET_EOPNOTSUPP;
8430 switch (arg1) {
8431 case TARGET_GSI_IEEE_FP_CONTROL:
8433 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8435 /* Copied from linux ieee_fpcr_to_swcr. */
8436 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8437 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8438 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8439 | SWCR_TRAP_ENABLE_DZE
8440 | SWCR_TRAP_ENABLE_OVF);
8441 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8442 | SWCR_TRAP_ENABLE_INE);
8443 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8444 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8446 if (put_user_u64 (swcr, arg2))
8447 goto efault;
8448 ret = 0;
8450 break;
8452 /* case GSI_IEEE_STATE_AT_SIGNAL:
8453 -- Not implemented in linux kernel.
8454 case GSI_UACPROC:
8455 -- Retrieves current unaligned access state; not much used.
8456 case GSI_PROC_TYPE:
8457 -- Retrieves implver information; surely not used.
8458 case GSI_GET_HWRPB:
8459 -- Grabs a copy of the HWRPB; surely not used.
8462 break;
8463 #endif
8464 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8465 /* Alpha specific */
8466 case TARGET_NR_osf_setsysinfo:
8467 ret = -TARGET_EOPNOTSUPP;
8468 switch (arg1) {
8469 case TARGET_SSI_IEEE_FP_CONTROL:
8471 uint64_t swcr, fpcr, orig_fpcr;
8473 if (get_user_u64 (swcr, arg2)) {
8474 goto efault;
8476 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8477 fpcr = orig_fpcr & FPCR_DYN_MASK;
8479 /* Copied from linux ieee_swcr_to_fpcr. */
8480 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8481 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8482 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8483 | SWCR_TRAP_ENABLE_DZE
8484 | SWCR_TRAP_ENABLE_OVF)) << 48;
8485 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8486 | SWCR_TRAP_ENABLE_INE)) << 57;
8487 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8488 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8490 cpu_alpha_store_fpcr(cpu_env, fpcr);
8491 ret = 0;
8493 break;
8495 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8497 uint64_t exc, fpcr, orig_fpcr;
8498 int si_code;
8500 if (get_user_u64(exc, arg2)) {
8501 goto efault;
8504 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8506 /* We only add to the exception status here. */
8507 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8509 cpu_alpha_store_fpcr(cpu_env, fpcr);
8510 ret = 0;
8512 /* Old exceptions are not signaled. */
8513 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8515 /* If any exceptions set by this call,
8516 and are unmasked, send a signal. */
8517 si_code = 0;
8518 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8519 si_code = TARGET_FPE_FLTRES;
8521 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8522 si_code = TARGET_FPE_FLTUND;
8524 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8525 si_code = TARGET_FPE_FLTOVF;
8527 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8528 si_code = TARGET_FPE_FLTDIV;
8530 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8531 si_code = TARGET_FPE_FLTINV;
8533 if (si_code != 0) {
8534 target_siginfo_t info;
8535 info.si_signo = SIGFPE;
8536 info.si_errno = 0;
8537 info.si_code = si_code;
8538 info._sifields._sigfault._addr
8539 = ((CPUArchState *)cpu_env)->pc;
8540 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8543 break;
8545 /* case SSI_NVPAIRS:
8546 -- Used with SSIN_UACPROC to enable unaligned accesses.
8547 case SSI_IEEE_STATE_AT_SIGNAL:
8548 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8549 -- Not implemented in linux kernel
8552 break;
8553 #endif
8554 #ifdef TARGET_NR_osf_sigprocmask
8555 /* Alpha specific. */
8556 case TARGET_NR_osf_sigprocmask:
8558 abi_ulong mask;
8559 int how;
8560 sigset_t set, oldset;
8562 switch(arg1) {
8563 case TARGET_SIG_BLOCK:
8564 how = SIG_BLOCK;
8565 break;
8566 case TARGET_SIG_UNBLOCK:
8567 how = SIG_UNBLOCK;
8568 break;
8569 case TARGET_SIG_SETMASK:
8570 how = SIG_SETMASK;
8571 break;
8572 default:
8573 ret = -TARGET_EINVAL;
8574 goto fail;
8576 mask = arg2;
8577 target_to_host_old_sigset(&set, &mask);
8578 do_sigprocmask(how, &set, &oldset);
8579 host_to_target_old_sigset(&mask, &oldset);
8580 ret = mask;
8582 break;
8583 #endif
8585 #ifdef TARGET_NR_getgid32
8586 case TARGET_NR_getgid32:
8587 ret = get_errno(getgid());
8588 break;
8589 #endif
8590 #ifdef TARGET_NR_geteuid32
8591 case TARGET_NR_geteuid32:
8592 ret = get_errno(geteuid());
8593 break;
8594 #endif
8595 #ifdef TARGET_NR_getegid32
8596 case TARGET_NR_getegid32:
8597 ret = get_errno(getegid());
8598 break;
8599 #endif
8600 #ifdef TARGET_NR_setreuid32
8601 case TARGET_NR_setreuid32:
8602 ret = get_errno(setreuid(arg1, arg2));
8603 break;
8604 #endif
8605 #ifdef TARGET_NR_setregid32
8606 case TARGET_NR_setregid32:
8607 ret = get_errno(setregid(arg1, arg2));
8608 break;
8609 #endif
8610 #ifdef TARGET_NR_getgroups32
8611 case TARGET_NR_getgroups32:
8613 int gidsetsize = arg1;
8614 uint32_t *target_grouplist;
8615 gid_t *grouplist;
8616 int i;
8618 grouplist = alloca(gidsetsize * sizeof(gid_t));
8619 ret = get_errno(getgroups(gidsetsize, grouplist));
8620 if (gidsetsize == 0)
8621 break;
8622 if (!is_error(ret)) {
8623 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8624 if (!target_grouplist) {
8625 ret = -TARGET_EFAULT;
8626 goto fail;
8628 for(i = 0;i < ret; i++)
8629 target_grouplist[i] = tswap32(grouplist[i]);
8630 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8633 break;
8634 #endif
8635 #ifdef TARGET_NR_setgroups32
8636 case TARGET_NR_setgroups32:
8638 int gidsetsize = arg1;
8639 uint32_t *target_grouplist;
8640 gid_t *grouplist;
8641 int i;
8643 grouplist = alloca(gidsetsize * sizeof(gid_t));
8644 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8645 if (!target_grouplist) {
8646 ret = -TARGET_EFAULT;
8647 goto fail;
8649 for(i = 0;i < gidsetsize; i++)
8650 grouplist[i] = tswap32(target_grouplist[i]);
8651 unlock_user(target_grouplist, arg2, 0);
8652 ret = get_errno(setgroups(gidsetsize, grouplist));
8654 break;
8655 #endif
8656 #ifdef TARGET_NR_fchown32
8657 case TARGET_NR_fchown32:
8658 ret = get_errno(fchown(arg1, arg2, arg3));
8659 break;
8660 #endif
8661 #ifdef TARGET_NR_setresuid32
8662 case TARGET_NR_setresuid32:
8663 ret = get_errno(setresuid(arg1, arg2, arg3));
8664 break;
8665 #endif
8666 #ifdef TARGET_NR_getresuid32
8667 case TARGET_NR_getresuid32:
8669 uid_t ruid, euid, suid;
8670 ret = get_errno(getresuid(&ruid, &euid, &suid));
8671 if (!is_error(ret)) {
8672 if (put_user_u32(ruid, arg1)
8673 || put_user_u32(euid, arg2)
8674 || put_user_u32(suid, arg3))
8675 goto efault;
8678 break;
8679 #endif
8680 #ifdef TARGET_NR_setresgid32
8681 case TARGET_NR_setresgid32:
8682 ret = get_errno(setresgid(arg1, arg2, arg3));
8683 break;
8684 #endif
8685 #ifdef TARGET_NR_getresgid32
8686 case TARGET_NR_getresgid32:
8688 gid_t rgid, egid, sgid;
8689 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8690 if (!is_error(ret)) {
8691 if (put_user_u32(rgid, arg1)
8692 || put_user_u32(egid, arg2)
8693 || put_user_u32(sgid, arg3))
8694 goto efault;
8697 break;
8698 #endif
8699 #ifdef TARGET_NR_chown32
8700 case TARGET_NR_chown32:
8701 if (!(p = lock_user_string(arg1)))
8702 goto efault;
8703 ret = get_errno(chown(p, arg2, arg3));
8704 unlock_user(p, arg1, 0);
8705 break;
8706 #endif
8707 #ifdef TARGET_NR_setuid32
8708 case TARGET_NR_setuid32:
8709 ret = get_errno(setuid(arg1));
8710 break;
8711 #endif
8712 #ifdef TARGET_NR_setgid32
8713 case TARGET_NR_setgid32:
8714 ret = get_errno(setgid(arg1));
8715 break;
8716 #endif
8717 #ifdef TARGET_NR_setfsuid32
8718 case TARGET_NR_setfsuid32:
8719 ret = get_errno(setfsuid(arg1));
8720 break;
8721 #endif
8722 #ifdef TARGET_NR_setfsgid32
8723 case TARGET_NR_setfsgid32:
8724 ret = get_errno(setfsgid(arg1));
8725 break;
8726 #endif
8728 case TARGET_NR_pivot_root:
8729 goto unimplemented;
8730 #ifdef TARGET_NR_mincore
8731 case TARGET_NR_mincore:
8733 void *a;
8734 ret = -TARGET_EFAULT;
8735 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8736 goto efault;
8737 if (!(p = lock_user_string(arg3)))
8738 goto mincore_fail;
8739 ret = get_errno(mincore(a, arg2, p));
8740 unlock_user(p, arg3, ret);
8741 mincore_fail:
8742 unlock_user(a, arg1, 0);
8744 break;
8745 #endif
8746 #ifdef TARGET_NR_arm_fadvise64_64
8747 case TARGET_NR_arm_fadvise64_64:
8750 * arm_fadvise64_64 looks like fadvise64_64 but
8751 * with different argument order
8753 abi_long temp;
8754 temp = arg3;
8755 arg3 = arg4;
8756 arg4 = temp;
8758 #endif
8759 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8760 #ifdef TARGET_NR_fadvise64_64
8761 case TARGET_NR_fadvise64_64:
8762 #endif
8763 #ifdef TARGET_NR_fadvise64
8764 case TARGET_NR_fadvise64:
8765 #endif
8766 #ifdef TARGET_S390X
8767 switch (arg4) {
8768 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8769 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8770 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8771 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8772 default: break;
8774 #endif
8775 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8776 break;
8777 #endif
8778 #ifdef TARGET_NR_madvise
8779 case TARGET_NR_madvise:
8780 /* A straight passthrough may not be safe because qemu sometimes
8781 turns private file-backed mappings into anonymous mappings.
8782 This will break MADV_DONTNEED.
8783 This is a hint, so ignoring and returning success is ok. */
8784 ret = get_errno(0);
8785 break;
8786 #endif
8787 #if TARGET_ABI_BITS == 32
8788 case TARGET_NR_fcntl64:
8790 int cmd;
8791 struct flock64 fl;
8792 struct target_flock64 *target_fl;
8793 #ifdef TARGET_ARM
8794 struct target_eabi_flock64 *target_efl;
8795 #endif
8797 cmd = target_to_host_fcntl_cmd(arg2);
8798 if (cmd == -TARGET_EINVAL) {
8799 ret = cmd;
8800 break;
8803 switch(arg2) {
8804 case TARGET_F_GETLK64:
8805 #ifdef TARGET_ARM
8806 if (((CPUARMState *)cpu_env)->eabi) {
8807 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8808 goto efault;
8809 fl.l_type = tswap16(target_efl->l_type);
8810 fl.l_whence = tswap16(target_efl->l_whence);
8811 fl.l_start = tswap64(target_efl->l_start);
8812 fl.l_len = tswap64(target_efl->l_len);
8813 fl.l_pid = tswap32(target_efl->l_pid);
8814 unlock_user_struct(target_efl, arg3, 0);
8815 } else
8816 #endif
8818 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8819 goto efault;
8820 fl.l_type = tswap16(target_fl->l_type);
8821 fl.l_whence = tswap16(target_fl->l_whence);
8822 fl.l_start = tswap64(target_fl->l_start);
8823 fl.l_len = tswap64(target_fl->l_len);
8824 fl.l_pid = tswap32(target_fl->l_pid);
8825 unlock_user_struct(target_fl, arg3, 0);
8827 ret = get_errno(fcntl(arg1, cmd, &fl));
8828 if (ret == 0) {
8829 #ifdef TARGET_ARM
8830 if (((CPUARMState *)cpu_env)->eabi) {
8831 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8832 goto efault;
8833 target_efl->l_type = tswap16(fl.l_type);
8834 target_efl->l_whence = tswap16(fl.l_whence);
8835 target_efl->l_start = tswap64(fl.l_start);
8836 target_efl->l_len = tswap64(fl.l_len);
8837 target_efl->l_pid = tswap32(fl.l_pid);
8838 unlock_user_struct(target_efl, arg3, 1);
8839 } else
8840 #endif
8842 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8843 goto efault;
8844 target_fl->l_type = tswap16(fl.l_type);
8845 target_fl->l_whence = tswap16(fl.l_whence);
8846 target_fl->l_start = tswap64(fl.l_start);
8847 target_fl->l_len = tswap64(fl.l_len);
8848 target_fl->l_pid = tswap32(fl.l_pid);
8849 unlock_user_struct(target_fl, arg3, 1);
8852 break;
8854 case TARGET_F_SETLK64:
8855 case TARGET_F_SETLKW64:
8856 #ifdef TARGET_ARM
8857 if (((CPUARMState *)cpu_env)->eabi) {
8858 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8859 goto efault;
8860 fl.l_type = tswap16(target_efl->l_type);
8861 fl.l_whence = tswap16(target_efl->l_whence);
8862 fl.l_start = tswap64(target_efl->l_start);
8863 fl.l_len = tswap64(target_efl->l_len);
8864 fl.l_pid = tswap32(target_efl->l_pid);
8865 unlock_user_struct(target_efl, arg3, 0);
8866 } else
8867 #endif
8869 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8870 goto efault;
8871 fl.l_type = tswap16(target_fl->l_type);
8872 fl.l_whence = tswap16(target_fl->l_whence);
8873 fl.l_start = tswap64(target_fl->l_start);
8874 fl.l_len = tswap64(target_fl->l_len);
8875 fl.l_pid = tswap32(target_fl->l_pid);
8876 unlock_user_struct(target_fl, arg3, 0);
8878 ret = get_errno(fcntl(arg1, cmd, &fl));
8879 break;
8880 default:
8881 ret = do_fcntl(arg1, arg2, arg3);
8882 break;
8884 break;
8886 #endif
8887 #ifdef TARGET_NR_cacheflush
8888 case TARGET_NR_cacheflush:
8889 /* self-modifying code is handled automatically, so nothing needed */
8890 ret = 0;
8891 break;
8892 #endif
8893 #ifdef TARGET_NR_security
8894 case TARGET_NR_security:
8895 goto unimplemented;
8896 #endif
8897 #ifdef TARGET_NR_getpagesize
8898 case TARGET_NR_getpagesize:
8899 ret = TARGET_PAGE_SIZE;
8900 break;
8901 #endif
8902 case TARGET_NR_gettid:
8903 ret = get_errno(gettid());
8904 break;
8905 #ifdef TARGET_NR_readahead
8906 case TARGET_NR_readahead:
8907 #if TARGET_ABI_BITS == 32
8908 if (regpairs_aligned(cpu_env)) {
8909 arg2 = arg3;
8910 arg3 = arg4;
8911 arg4 = arg5;
8913 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8914 #else
8915 ret = get_errno(readahead(arg1, arg2, arg3));
8916 #endif
8917 break;
8918 #endif
8919 #ifdef CONFIG_ATTR
8920 #ifdef TARGET_NR_setxattr
8921 case TARGET_NR_listxattr:
8922 case TARGET_NR_llistxattr:
8924 void *p, *b = 0;
8925 if (arg2) {
8926 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8927 if (!b) {
8928 ret = -TARGET_EFAULT;
8929 break;
8932 p = lock_user_string(arg1);
8933 if (p) {
8934 if (num == TARGET_NR_listxattr) {
8935 ret = get_errno(listxattr(p, b, arg3));
8936 } else {
8937 ret = get_errno(llistxattr(p, b, arg3));
8939 } else {
8940 ret = -TARGET_EFAULT;
8942 unlock_user(p, arg1, 0);
8943 unlock_user(b, arg2, arg3);
8944 break;
8946 case TARGET_NR_flistxattr:
8948 void *b = 0;
8949 if (arg2) {
8950 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8951 if (!b) {
8952 ret = -TARGET_EFAULT;
8953 break;
8956 ret = get_errno(flistxattr(arg1, b, arg3));
8957 unlock_user(b, arg2, arg3);
8958 break;
8960 case TARGET_NR_setxattr:
8961 case TARGET_NR_lsetxattr:
8963 void *p, *n, *v = 0;
8964 if (arg3) {
8965 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8966 if (!v) {
8967 ret = -TARGET_EFAULT;
8968 break;
8971 p = lock_user_string(arg1);
8972 n = lock_user_string(arg2);
8973 if (p && n) {
8974 if (num == TARGET_NR_setxattr) {
8975 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8976 } else {
8977 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8979 } else {
8980 ret = -TARGET_EFAULT;
8982 unlock_user(p, arg1, 0);
8983 unlock_user(n, arg2, 0);
8984 unlock_user(v, arg3, 0);
8986 break;
8987 case TARGET_NR_fsetxattr:
8989 void *n, *v = 0;
8990 if (arg3) {
8991 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8992 if (!v) {
8993 ret = -TARGET_EFAULT;
8994 break;
8997 n = lock_user_string(arg2);
8998 if (n) {
8999 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9000 } else {
9001 ret = -TARGET_EFAULT;
9003 unlock_user(n, arg2, 0);
9004 unlock_user(v, arg3, 0);
9006 break;
9007 case TARGET_NR_getxattr:
9008 case TARGET_NR_lgetxattr:
9010 void *p, *n, *v = 0;
9011 if (arg3) {
9012 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9013 if (!v) {
9014 ret = -TARGET_EFAULT;
9015 break;
9018 p = lock_user_string(arg1);
9019 n = lock_user_string(arg2);
9020 if (p && n) {
9021 if (num == TARGET_NR_getxattr) {
9022 ret = get_errno(getxattr(p, n, v, arg4));
9023 } else {
9024 ret = get_errno(lgetxattr(p, n, v, arg4));
9026 } else {
9027 ret = -TARGET_EFAULT;
9029 unlock_user(p, arg1, 0);
9030 unlock_user(n, arg2, 0);
9031 unlock_user(v, arg3, arg4);
9033 break;
9034 case TARGET_NR_fgetxattr:
9036 void *n, *v = 0;
9037 if (arg3) {
9038 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9039 if (!v) {
9040 ret = -TARGET_EFAULT;
9041 break;
9044 n = lock_user_string(arg2);
9045 if (n) {
9046 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9047 } else {
9048 ret = -TARGET_EFAULT;
9050 unlock_user(n, arg2, 0);
9051 unlock_user(v, arg3, arg4);
9053 break;
9054 case TARGET_NR_removexattr:
9055 case TARGET_NR_lremovexattr:
9057 void *p, *n;
9058 p = lock_user_string(arg1);
9059 n = lock_user_string(arg2);
9060 if (p && n) {
9061 if (num == TARGET_NR_removexattr) {
9062 ret = get_errno(removexattr(p, n));
9063 } else {
9064 ret = get_errno(lremovexattr(p, n));
9066 } else {
9067 ret = -TARGET_EFAULT;
9069 unlock_user(p, arg1, 0);
9070 unlock_user(n, arg2, 0);
9072 break;
9073 case TARGET_NR_fremovexattr:
9075 void *n;
9076 n = lock_user_string(arg2);
9077 if (n) {
9078 ret = get_errno(fremovexattr(arg1, n));
9079 } else {
9080 ret = -TARGET_EFAULT;
9082 unlock_user(n, arg2, 0);
9084 break;
9085 #endif
9086 #endif /* CONFIG_ATTR */
9087 #ifdef TARGET_NR_set_thread_area
9088 case TARGET_NR_set_thread_area:
9089 #if defined(TARGET_MIPS)
9090 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9091 ret = 0;
9092 break;
9093 #elif defined(TARGET_CRIS)
9094 if (arg1 & 0xff)
9095 ret = -TARGET_EINVAL;
9096 else {
9097 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9098 ret = 0;
9100 break;
9101 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9102 ret = do_set_thread_area(cpu_env, arg1);
9103 break;
9104 #elif defined(TARGET_M68K)
9106 TaskState *ts = cpu->opaque;
9107 ts->tp_value = arg1;
9108 ret = 0;
9109 break;
9111 #else
9112 goto unimplemented_nowarn;
9113 #endif
9114 #endif
9115 #ifdef TARGET_NR_get_thread_area
9116 case TARGET_NR_get_thread_area:
9117 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9118 ret = do_get_thread_area(cpu_env, arg1);
9119 break;
9120 #elif defined(TARGET_M68K)
9122 TaskState *ts = cpu->opaque;
9123 ret = ts->tp_value;
9124 break;
9126 #else
9127 goto unimplemented_nowarn;
9128 #endif
9129 #endif
9130 #ifdef TARGET_NR_getdomainname
9131 case TARGET_NR_getdomainname:
9132 goto unimplemented_nowarn;
9133 #endif
9135 #ifdef TARGET_NR_clock_gettime
9136 case TARGET_NR_clock_gettime:
9138 struct timespec ts;
9139 ret = get_errno(clock_gettime(arg1, &ts));
9140 if (!is_error(ret)) {
9141 host_to_target_timespec(arg2, &ts);
9143 break;
9145 #endif
9146 #ifdef TARGET_NR_clock_getres
9147 case TARGET_NR_clock_getres:
9149 struct timespec ts;
9150 ret = get_errno(clock_getres(arg1, &ts));
9151 if (!is_error(ret)) {
9152 host_to_target_timespec(arg2, &ts);
9154 break;
9156 #endif
9157 #ifdef TARGET_NR_clock_nanosleep
9158 case TARGET_NR_clock_nanosleep:
9160 struct timespec ts;
9161 target_to_host_timespec(&ts, arg3);
9162 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9163 if (arg4)
9164 host_to_target_timespec(arg4, &ts);
9166 #if defined(TARGET_PPC)
9167 /* clock_nanosleep is odd in that it returns positive errno values.
9168 * On PPC, CR0 bit 3 should be set in such a situation. */
9169 if (ret) {
9170 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9172 #endif
9173 break;
9175 #endif
9177 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9178 case TARGET_NR_set_tid_address:
9179 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9180 break;
9181 #endif
9183 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9184 case TARGET_NR_tkill:
9185 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9186 break;
9187 #endif
9189 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9190 case TARGET_NR_tgkill:
9191 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9192 target_to_host_signal(arg3)));
9193 break;
9194 #endif
9196 #ifdef TARGET_NR_set_robust_list
9197 case TARGET_NR_set_robust_list:
9198 case TARGET_NR_get_robust_list:
9199 /* The ABI for supporting robust futexes has userspace pass
9200 * the kernel a pointer to a linked list which is updated by
9201 * userspace after the syscall; the list is walked by the kernel
9202 * when the thread exits. Since the linked list in QEMU guest
9203 * memory isn't a valid linked list for the host and we have
9204 * no way to reliably intercept the thread-death event, we can't
9205 * support these. Silently return ENOSYS so that guest userspace
9206 * falls back to a non-robust futex implementation (which should
9207 * be OK except in the corner case of the guest crashing while
9208 * holding a mutex that is shared with another process via
9209 * shared memory).
9211 goto unimplemented_nowarn;
9212 #endif
9214 #if defined(TARGET_NR_utimensat)
9215 case TARGET_NR_utimensat:
9217 struct timespec *tsp, ts[2];
9218 if (!arg3) {
9219 tsp = NULL;
9220 } else {
9221 target_to_host_timespec(ts, arg3);
9222 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9223 tsp = ts;
9225 if (!arg2)
9226 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9227 else {
9228 if (!(p = lock_user_string(arg2))) {
9229 ret = -TARGET_EFAULT;
9230 goto fail;
9232 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9233 unlock_user(p, arg2, 0);
9236 break;
9237 #endif
9238 case TARGET_NR_futex:
9239 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9240 break;
9241 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9242 case TARGET_NR_inotify_init:
9243 ret = get_errno(sys_inotify_init());
9244 break;
9245 #endif
9246 #ifdef CONFIG_INOTIFY1
9247 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9248 case TARGET_NR_inotify_init1:
9249 ret = get_errno(sys_inotify_init1(arg1));
9250 break;
9251 #endif
9252 #endif
9253 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9254 case TARGET_NR_inotify_add_watch:
9255 p = lock_user_string(arg2);
9256 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9257 unlock_user(p, arg2, 0);
9258 break;
9259 #endif
9260 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9261 case TARGET_NR_inotify_rm_watch:
9262 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9263 break;
9264 #endif
9266 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9267 case TARGET_NR_mq_open:
9269 struct mq_attr posix_mq_attr, *attrp;
9271 p = lock_user_string(arg1 - 1);
9272 if (arg4 != 0) {
9273 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9274 attrp = &posix_mq_attr;
9275 } else {
9276 attrp = 0;
9278 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9279 unlock_user (p, arg1, 0);
9281 break;
9283 case TARGET_NR_mq_unlink:
9284 p = lock_user_string(arg1 - 1);
9285 ret = get_errno(mq_unlink(p));
9286 unlock_user (p, arg1, 0);
9287 break;
9289 case TARGET_NR_mq_timedsend:
9291 struct timespec ts;
9293 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9294 if (arg5 != 0) {
9295 target_to_host_timespec(&ts, arg5);
9296 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9297 host_to_target_timespec(arg5, &ts);
9299 else
9300 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9301 unlock_user (p, arg2, arg3);
9303 break;
9305 case TARGET_NR_mq_timedreceive:
9307 struct timespec ts;
9308 unsigned int prio;
9310 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9311 if (arg5 != 0) {
9312 target_to_host_timespec(&ts, arg5);
9313 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9314 host_to_target_timespec(arg5, &ts);
9316 else
9317 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9318 unlock_user (p, arg2, arg3);
9319 if (arg4 != 0)
9320 put_user_u32(prio, arg4);
9322 break;
9324 /* Not implemented for now... */
9325 /* case TARGET_NR_mq_notify: */
9326 /* break; */
9328 case TARGET_NR_mq_getsetattr:
9330 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9331 ret = 0;
9332 if (arg3 != 0) {
9333 ret = mq_getattr(arg1, &posix_mq_attr_out);
9334 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9336 if (arg2 != 0) {
9337 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9338 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9342 break;
9343 #endif
9345 #ifdef CONFIG_SPLICE
9346 #ifdef TARGET_NR_tee
9347 case TARGET_NR_tee:
9349 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9351 break;
9352 #endif
9353 #ifdef TARGET_NR_splice
9354 case TARGET_NR_splice:
9356 loff_t loff_in, loff_out;
9357 loff_t *ploff_in = NULL, *ploff_out = NULL;
9358 if (arg2) {
9359 if (get_user_u64(loff_in, arg2)) {
9360 goto efault;
9362 ploff_in = &loff_in;
9364 if (arg4) {
9365 if (get_user_u64(loff_out, arg4)) {
9366 goto efault;
9368 ploff_out = &loff_out;
9370 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9371 if (arg2) {
9372 if (put_user_u64(loff_in, arg2)) {
9373 goto efault;
9376 if (arg4) {
9377 if (put_user_u64(loff_out, arg4)) {
9378 goto efault;
9382 break;
9383 #endif
9384 #ifdef TARGET_NR_vmsplice
9385 case TARGET_NR_vmsplice:
9387 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9388 if (vec != NULL) {
9389 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9390 unlock_iovec(vec, arg2, arg3, 0);
9391 } else {
9392 ret = -host_to_target_errno(errno);
9395 break;
9396 #endif
9397 #endif /* CONFIG_SPLICE */
9398 #ifdef CONFIG_EVENTFD
9399 #if defined(TARGET_NR_eventfd)
9400 case TARGET_NR_eventfd:
9401 ret = get_errno(eventfd(arg1, 0));
9402 break;
9403 #endif
9404 #if defined(TARGET_NR_eventfd2)
9405 case TARGET_NR_eventfd2:
9407 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9408 if (arg2 & TARGET_O_NONBLOCK) {
9409 host_flags |= O_NONBLOCK;
9411 if (arg2 & TARGET_O_CLOEXEC) {
9412 host_flags |= O_CLOEXEC;
9414 ret = get_errno(eventfd(arg1, host_flags));
9415 break;
9417 #endif
9418 #endif /* CONFIG_EVENTFD */
9419 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9420 case TARGET_NR_fallocate:
9421 #if TARGET_ABI_BITS == 32
9422 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9423 target_offset64(arg5, arg6)));
9424 #else
9425 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9426 #endif
9427 break;
9428 #endif
9429 #if defined(CONFIG_SYNC_FILE_RANGE)
9430 #if defined(TARGET_NR_sync_file_range)
9431 case TARGET_NR_sync_file_range:
9432 #if TARGET_ABI_BITS == 32
9433 #if defined(TARGET_MIPS)
9434 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9435 target_offset64(arg5, arg6), arg7));
9436 #else
9437 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9438 target_offset64(arg4, arg5), arg6));
9439 #endif /* !TARGET_MIPS */
9440 #else
9441 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9442 #endif
9443 break;
9444 #endif
9445 #if defined(TARGET_NR_sync_file_range2)
9446 case TARGET_NR_sync_file_range2:
9447 /* This is like sync_file_range but the arguments are reordered */
9448 #if TARGET_ABI_BITS == 32
9449 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9450 target_offset64(arg5, arg6), arg2));
9451 #else
9452 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9453 #endif
9454 break;
9455 #endif
9456 #endif
9457 #if defined(CONFIG_EPOLL)
9458 #if defined(TARGET_NR_epoll_create)
9459 case TARGET_NR_epoll_create:
9460 ret = get_errno(epoll_create(arg1));
9461 break;
9462 #endif
9463 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9464 case TARGET_NR_epoll_create1:
9465 ret = get_errno(epoll_create1(arg1));
9466 break;
9467 #endif
9468 #if defined(TARGET_NR_epoll_ctl)
9469 case TARGET_NR_epoll_ctl:
9471 struct epoll_event ep;
9472 struct epoll_event *epp = 0;
9473 if (arg4) {
9474 struct target_epoll_event *target_ep;
9475 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9476 goto efault;
9478 ep.events = tswap32(target_ep->events);
9479 /* The epoll_data_t union is just opaque data to the kernel,
9480 * so we transfer all 64 bits across and need not worry what
9481 * actual data type it is.
9483 ep.data.u64 = tswap64(target_ep->data.u64);
9484 unlock_user_struct(target_ep, arg4, 0);
9485 epp = &ep;
9487 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9488 break;
9490 #endif
9492 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9493 #define IMPLEMENT_EPOLL_PWAIT
9494 #endif
9495 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9496 #if defined(TARGET_NR_epoll_wait)
9497 case TARGET_NR_epoll_wait:
9498 #endif
9499 #if defined(IMPLEMENT_EPOLL_PWAIT)
9500 case TARGET_NR_epoll_pwait:
9501 #endif
9503 struct target_epoll_event *target_ep;
9504 struct epoll_event *ep;
9505 int epfd = arg1;
9506 int maxevents = arg3;
9507 int timeout = arg4;
9509 target_ep = lock_user(VERIFY_WRITE, arg2,
9510 maxevents * sizeof(struct target_epoll_event), 1);
9511 if (!target_ep) {
9512 goto efault;
9515 ep = alloca(maxevents * sizeof(struct epoll_event));
9517 switch (num) {
9518 #if defined(IMPLEMENT_EPOLL_PWAIT)
9519 case TARGET_NR_epoll_pwait:
9521 target_sigset_t *target_set;
9522 sigset_t _set, *set = &_set;
9524 if (arg5) {
9525 target_set = lock_user(VERIFY_READ, arg5,
9526 sizeof(target_sigset_t), 1);
9527 if (!target_set) {
9528 unlock_user(target_ep, arg2, 0);
9529 goto efault;
9531 target_to_host_sigset(set, target_set);
9532 unlock_user(target_set, arg5, 0);
9533 } else {
9534 set = NULL;
9537 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9538 break;
9540 #endif
9541 #if defined(TARGET_NR_epoll_wait)
9542 case TARGET_NR_epoll_wait:
9543 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9544 break;
9545 #endif
9546 default:
9547 ret = -TARGET_ENOSYS;
9549 if (!is_error(ret)) {
9550 int i;
9551 for (i = 0; i < ret; i++) {
9552 target_ep[i].events = tswap32(ep[i].events);
9553 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9556 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9557 break;
9559 #endif
9560 #endif
9561 #ifdef TARGET_NR_prlimit64
9562 case TARGET_NR_prlimit64:
9564 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9565 struct target_rlimit64 *target_rnew, *target_rold;
9566 struct host_rlimit64 rnew, rold, *rnewp = 0;
9567 int resource = target_to_host_resource(arg2);
9568 if (arg3) {
9569 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9570 goto efault;
9572 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9573 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9574 unlock_user_struct(target_rnew, arg3, 0);
9575 rnewp = &rnew;
9578 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
9579 if (!is_error(ret) && arg4) {
9580 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9581 goto efault;
9583 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9584 target_rold->rlim_max = tswap64(rold.rlim_max);
9585 unlock_user_struct(target_rold, arg4, 1);
9587 break;
9589 #endif
9590 #ifdef TARGET_NR_gethostname
9591 case TARGET_NR_gethostname:
9593 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9594 if (name) {
9595 ret = get_errno(gethostname(name, arg2));
9596 unlock_user(name, arg1, arg2);
9597 } else {
9598 ret = -TARGET_EFAULT;
9600 break;
9602 #endif
9603 #ifdef TARGET_NR_atomic_cmpxchg_32
9604 case TARGET_NR_atomic_cmpxchg_32:
9606 /* should use start_exclusive from main.c */
9607 abi_ulong mem_value;
9608 if (get_user_u32(mem_value, arg6)) {
9609 target_siginfo_t info;
9610 info.si_signo = SIGSEGV;
9611 info.si_errno = 0;
9612 info.si_code = TARGET_SEGV_MAPERR;
9613 info._sifields._sigfault._addr = arg6;
9614 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9615 ret = 0xdeadbeef;
9618 if (mem_value == arg2)
9619 put_user_u32(arg1, arg6);
9620 ret = mem_value;
9621 break;
9623 #endif
9624 #ifdef TARGET_NR_atomic_barrier
9625 case TARGET_NR_atomic_barrier:
9627 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9628 ret = 0;
9629 break;
9631 #endif
9633 #ifdef TARGET_NR_timer_create
9634 case TARGET_NR_timer_create:
9636 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9638 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9640 int clkid = arg1;
9641 int timer_index = next_free_host_timer();
9643 if (timer_index < 0) {
9644 ret = -TARGET_EAGAIN;
9645 } else {
9646 timer_t *phtimer = g_posix_timers + timer_index;
9648 if (arg2) {
9649 phost_sevp = &host_sevp;
9650 ret = target_to_host_sigevent(phost_sevp, arg2);
9651 if (ret != 0) {
9652 break;
9656 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9657 if (ret) {
9658 phtimer = NULL;
9659 } else {
9660 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
9661 goto efault;
9665 break;
9667 #endif
9669 #ifdef TARGET_NR_timer_settime
9670 case TARGET_NR_timer_settime:
9672 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9673 * struct itimerspec * old_value */
9674 target_timer_t timerid = get_timer_id(arg1);
9676 if (timerid < 0) {
9677 ret = timerid;
9678 } else if (arg3 == 0) {
9679 ret = -TARGET_EINVAL;
9680 } else {
9681 timer_t htimer = g_posix_timers[timerid];
9682 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9684 target_to_host_itimerspec(&hspec_new, arg3);
9685 ret = get_errno(
9686 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9687 host_to_target_itimerspec(arg2, &hspec_old);
9689 break;
9691 #endif
9693 #ifdef TARGET_NR_timer_gettime
9694 case TARGET_NR_timer_gettime:
9696 /* args: timer_t timerid, struct itimerspec *curr_value */
9697 target_timer_t timerid = get_timer_id(arg1);
9699 if (timerid < 0) {
9700 ret = timerid;
9701 } else if (!arg2) {
9702 ret = -TARGET_EFAULT;
9703 } else {
9704 timer_t htimer = g_posix_timers[timerid];
9705 struct itimerspec hspec;
9706 ret = get_errno(timer_gettime(htimer, &hspec));
9708 if (host_to_target_itimerspec(arg2, &hspec)) {
9709 ret = -TARGET_EFAULT;
9712 break;
9714 #endif
9716 #ifdef TARGET_NR_timer_getoverrun
9717 case TARGET_NR_timer_getoverrun:
9719 /* args: timer_t timerid */
9720 target_timer_t timerid = get_timer_id(arg1);
9722 if (timerid < 0) {
9723 ret = timerid;
9724 } else {
9725 timer_t htimer = g_posix_timers[timerid];
9726 ret = get_errno(timer_getoverrun(htimer));
9728 break;
9730 #endif
9732 #ifdef TARGET_NR_timer_delete
9733 case TARGET_NR_timer_delete:
9735 /* args: timer_t timerid */
9736 target_timer_t timerid = get_timer_id(arg1);
9738 if (timerid < 0) {
9739 ret = timerid;
9740 } else {
9741 timer_t htimer = g_posix_timers[timerid];
9742 ret = get_errno(timer_delete(htimer));
9743 g_posix_timers[timerid] = 0;
9745 break;
9747 #endif
9749 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
9750 case TARGET_NR_timerfd_create:
9751 ret = get_errno(timerfd_create(arg1,
9752 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
9753 break;
9754 #endif
9756 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
9757 case TARGET_NR_timerfd_gettime:
9759 struct itimerspec its_curr;
9761 ret = get_errno(timerfd_gettime(arg1, &its_curr));
9763 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
9764 goto efault;
9767 break;
9768 #endif
9770 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
9771 case TARGET_NR_timerfd_settime:
9773 struct itimerspec its_new, its_old, *p_new;
9775 if (arg3) {
9776 if (target_to_host_itimerspec(&its_new, arg3)) {
9777 goto efault;
9779 p_new = &its_new;
9780 } else {
9781 p_new = NULL;
9784 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
9786 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
9787 goto efault;
9790 break;
9791 #endif
9793 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
9794 case TARGET_NR_ioprio_get:
9795 ret = get_errno(ioprio_get(arg1, arg2));
9796 break;
9797 #endif
9799 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
9800 case TARGET_NR_ioprio_set:
9801 ret = get_errno(ioprio_set(arg1, arg2, arg3));
9802 break;
9803 #endif
9805 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
9806 case TARGET_NR_setns:
9807 ret = get_errno(setns(arg1, arg2));
9808 break;
9809 #endif
9810 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
9811 case TARGET_NR_unshare:
9812 ret = get_errno(unshare(arg1));
9813 break;
9814 #endif
9816 default:
9817 unimplemented:
9818 gemu_log("qemu: Unsupported syscall: %d\n", num);
9819 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9820 unimplemented_nowarn:
9821 #endif
9822 ret = -TARGET_ENOSYS;
9823 break;
9825 fail:
9826 #ifdef DEBUG
9827 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9828 #endif
9829 if(do_strace)
9830 print_syscall_ret(num, ret);
9831 return ret;
9832 efault:
9833 ret = -TARGET_EFAULT;
9834 goto fail;