pci: Fix compiler warning (MinGW-w64 gcc 4.9)
[qemu/ar7.git] / linux-user / syscall.c
blobf62a4824628d0aa3fbe0ea9cd522a3e36a2e1c6e
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <linux/capability.h>
47 #include <signal.h>
48 #include <sched.h>
49 #ifdef __ia64__
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
52 #endif
53 #include <sys/socket.h>
54 #include <sys/un.h>
55 #include <sys/uio.h>
56 #include <sys/poll.h>
57 #include <sys/times.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/statfs.h>
61 #include <utime.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #include "qemu/sockets.h"
70 #ifdef CONFIG_TIMERFD
71 #include <sys/timerfd.h>
72 #endif
73 #ifdef TARGET_GPROF
74 #include <sys/gmon.h>
75 #endif
76 #ifdef CONFIG_EVENTFD
77 #include <sys/eventfd.h>
78 #endif
79 #ifdef CONFIG_EPOLL
80 #include <sys/epoll.h>
81 #endif
82 #ifdef CONFIG_ATTR
83 #include "qemu/xattr.h"
84 #endif
85 #ifdef CONFIG_SENDFILE
86 #include <sys/sendfile.h>
87 #endif
89 #define termios host_termios
90 #define winsize host_winsize
91 #define termio host_termio
92 #define sgttyb host_sgttyb /* same as target */
93 #define tchars host_tchars /* same as target */
94 #define ltchars host_ltchars /* same as target */
96 #include <linux/termios.h>
97 #include <linux/unistd.h>
98 #include <linux/cdrom.h>
99 #include <linux/hdreg.h>
100 #include <linux/soundcard.h>
101 #include <linux/kd.h>
102 #include <linux/mtio.h>
103 #include <linux/fs.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
106 #endif
107 #include <linux/fb.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include "linux_loop.h"
115 #include "uname.h"
117 #include "qemu.h"
119 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
120 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
122 //#define DEBUG
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
129 #undef _syscall0
130 #undef _syscall1
131 #undef _syscall2
132 #undef _syscall3
133 #undef _syscall4
134 #undef _syscall5
135 #undef _syscall6
137 #define _syscall0(type,name) \
138 static type name (void) \
140 return syscall(__NR_##name); \
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
146 return syscall(__NR_##name, arg1); \
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
152 return syscall(__NR_##name, arg1, arg2); \
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 type5,arg5) \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
178 type6 arg6) \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
199 defined(__s390x__)
200 #define __NR__llseek __NR_lseek
201 #endif
203 /* Newer kernel ports have llseek() instead of _llseek() */
204 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
205 #define TARGET_NR__llseek TARGET_NR_llseek
206 #endif
208 #ifdef __NR_gettid
209 _syscall0(int, gettid)
210 #else
211 /* This is a replacement for the host gettid() and must return a host
212 errno. */
213 static int gettid(void) {
214 return -ENOSYS;
216 #endif
217 #ifdef __NR_getdents
218 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
219 #endif
220 #if !defined(__NR_getdents) || \
221 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
222 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
223 #endif
224 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
225 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
226 loff_t *, res, uint, wh);
227 #endif
228 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
229 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
230 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
231 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
232 #endif
233 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
234 _syscall2(int,sys_tkill,int,tid,int,sig)
235 #endif
236 #ifdef __NR_exit_group
237 _syscall1(int,exit_group,int,error_code)
238 #endif
239 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
240 _syscall1(int,set_tid_address,int *,tidptr)
241 #endif
242 #if defined(TARGET_NR_futex) && defined(__NR_futex)
243 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
244 const struct timespec *,timeout,int *,uaddr2,int,val3)
245 #endif
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
253 void *, arg);
254 _syscall2(int, capget, struct __user_cap_header_struct *, header,
255 struct __user_cap_data_struct *, data);
256 _syscall2(int, capset, struct __user_cap_header_struct *, header,
257 struct __user_cap_data_struct *, data);
258 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
259 _syscall2(int, ioprio_get, int, which, int, who)
260 #endif
261 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
262 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
263 #endif
265 static bitmask_transtbl fcntl_flags_tbl[] = {
266 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
267 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
268 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
269 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
270 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
271 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
272 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
273 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
274 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
275 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
276 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
277 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
278 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
279 #if defined(O_DIRECT)
280 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
281 #endif
282 #if defined(O_NOATIME)
283 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
284 #endif
285 #if defined(O_CLOEXEC)
286 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
287 #endif
288 #if defined(O_PATH)
289 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
290 #endif
291 /* Don't terminate the list prematurely on 64-bit host+guest. */
292 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
293 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
294 #endif
295 { 0, 0, 0, 0 }
298 static int sys_getcwd1(char *buf, size_t size)
300 if (getcwd(buf, size) == NULL) {
301 /* getcwd() sets errno */
302 return (-1);
304 return strlen(buf)+1;
307 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
310 * open(2) has extra parameter 'mode' when called with
311 * flag O_CREAT.
313 if ((flags & O_CREAT) != 0) {
314 return (openat(dirfd, pathname, flags, mode));
316 return (openat(dirfd, pathname, flags));
319 #ifdef TARGET_NR_utimensat
320 #ifdef CONFIG_UTIMENSAT
321 static int sys_utimensat(int dirfd, const char *pathname,
322 const struct timespec times[2], int flags)
324 if (pathname == NULL)
325 return futimens(dirfd, times);
326 else
327 return utimensat(dirfd, pathname, times, flags);
329 #elif defined(__NR_utimensat)
330 #define __NR_sys_utimensat __NR_utimensat
331 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
332 const struct timespec *,tsp,int,flags)
333 #else
334 static int sys_utimensat(int dirfd, const char *pathname,
335 const struct timespec times[2], int flags)
337 errno = ENOSYS;
338 return -1;
340 #endif
341 #endif /* TARGET_NR_utimensat */
343 #ifdef CONFIG_INOTIFY
344 #include <sys/inotify.h>
346 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
347 static int sys_inotify_init(void)
349 return (inotify_init());
351 #endif
352 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
353 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
355 return (inotify_add_watch(fd, pathname, mask));
357 #endif
358 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
359 static int sys_inotify_rm_watch(int fd, int32_t wd)
361 return (inotify_rm_watch(fd, wd));
363 #endif
364 #ifdef CONFIG_INOTIFY1
365 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
366 static int sys_inotify_init1(int flags)
368 return (inotify_init1(flags));
370 #endif
371 #endif
372 #else
373 /* Userspace can usually survive runtime without inotify */
374 #undef TARGET_NR_inotify_init
375 #undef TARGET_NR_inotify_init1
376 #undef TARGET_NR_inotify_add_watch
377 #undef TARGET_NR_inotify_rm_watch
378 #endif /* CONFIG_INOTIFY */
380 #if defined(TARGET_NR_ppoll)
381 #ifndef __NR_ppoll
382 # define __NR_ppoll -1
383 #endif
384 #define __NR_sys_ppoll __NR_ppoll
385 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
386 struct timespec *, timeout, const sigset_t *, sigmask,
387 size_t, sigsetsize)
388 #endif
390 #if defined(TARGET_NR_pselect6)
391 #ifndef __NR_pselect6
392 # define __NR_pselect6 -1
393 #endif
394 #define __NR_sys_pselect6 __NR_pselect6
395 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
396 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
397 #endif
399 #if defined(TARGET_NR_prlimit64)
400 #ifndef __NR_prlimit64
401 # define __NR_prlimit64 -1
402 #endif
403 #define __NR_sys_prlimit64 __NR_prlimit64
404 /* The glibc rlimit structure may not be that used by the underlying syscall */
405 struct host_rlimit64 {
406 uint64_t rlim_cur;
407 uint64_t rlim_max;
409 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
410 const struct host_rlimit64 *, new_limit,
411 struct host_rlimit64 *, old_limit)
412 #endif
415 #if defined(TARGET_NR_timer_create)
416 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
417 static timer_t g_posix_timers[32] = { 0, } ;
419 static inline int next_free_host_timer(void)
421 int k ;
422 /* FIXME: Does finding the next free slot require a lock? */
423 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
424 if (g_posix_timers[k] == 0) {
425 g_posix_timers[k] = (timer_t) 1;
426 return k;
429 return -1;
431 #endif
433 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
434 #ifdef TARGET_ARM
435 static inline int regpairs_aligned(void *cpu_env) {
436 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
438 #elif defined(TARGET_MIPS)
439 static inline int regpairs_aligned(void *cpu_env) { return 1; }
440 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
441 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
442 * of registers which translates to the same as ARM/MIPS, because we start with
443 * r3 as arg1 */
444 static inline int regpairs_aligned(void *cpu_env) { return 1; }
445 #else
446 static inline int regpairs_aligned(void *cpu_env) { return 0; }
447 #endif
449 #define ERRNO_TABLE_SIZE 1200
451 /* target_to_host_errno_table[] is initialized from
452 * host_to_target_errno_table[] in syscall_init(). */
453 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
457 * This list is the union of errno values overridden in asm-<arch>/errno.h
458 * minus the errnos that are not actually generic to all archs.
460 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
461 [EIDRM] = TARGET_EIDRM,
462 [ECHRNG] = TARGET_ECHRNG,
463 [EL2NSYNC] = TARGET_EL2NSYNC,
464 [EL3HLT] = TARGET_EL3HLT,
465 [EL3RST] = TARGET_EL3RST,
466 [ELNRNG] = TARGET_ELNRNG,
467 [EUNATCH] = TARGET_EUNATCH,
468 [ENOCSI] = TARGET_ENOCSI,
469 [EL2HLT] = TARGET_EL2HLT,
470 [EDEADLK] = TARGET_EDEADLK,
471 [ENOLCK] = TARGET_ENOLCK,
472 [EBADE] = TARGET_EBADE,
473 [EBADR] = TARGET_EBADR,
474 [EXFULL] = TARGET_EXFULL,
475 [ENOANO] = TARGET_ENOANO,
476 [EBADRQC] = TARGET_EBADRQC,
477 [EBADSLT] = TARGET_EBADSLT,
478 [EBFONT] = TARGET_EBFONT,
479 [ENOSTR] = TARGET_ENOSTR,
480 [ENODATA] = TARGET_ENODATA,
481 [ETIME] = TARGET_ETIME,
482 [ENOSR] = TARGET_ENOSR,
483 [ENONET] = TARGET_ENONET,
484 [ENOPKG] = TARGET_ENOPKG,
485 [EREMOTE] = TARGET_EREMOTE,
486 [ENOLINK] = TARGET_ENOLINK,
487 [EADV] = TARGET_EADV,
488 [ESRMNT] = TARGET_ESRMNT,
489 [ECOMM] = TARGET_ECOMM,
490 [EPROTO] = TARGET_EPROTO,
491 [EDOTDOT] = TARGET_EDOTDOT,
492 [EMULTIHOP] = TARGET_EMULTIHOP,
493 [EBADMSG] = TARGET_EBADMSG,
494 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
495 [EOVERFLOW] = TARGET_EOVERFLOW,
496 [ENOTUNIQ] = TARGET_ENOTUNIQ,
497 [EBADFD] = TARGET_EBADFD,
498 [EREMCHG] = TARGET_EREMCHG,
499 [ELIBACC] = TARGET_ELIBACC,
500 [ELIBBAD] = TARGET_ELIBBAD,
501 [ELIBSCN] = TARGET_ELIBSCN,
502 [ELIBMAX] = TARGET_ELIBMAX,
503 [ELIBEXEC] = TARGET_ELIBEXEC,
504 [EILSEQ] = TARGET_EILSEQ,
505 [ENOSYS] = TARGET_ENOSYS,
506 [ELOOP] = TARGET_ELOOP,
507 [ERESTART] = TARGET_ERESTART,
508 [ESTRPIPE] = TARGET_ESTRPIPE,
509 [ENOTEMPTY] = TARGET_ENOTEMPTY,
510 [EUSERS] = TARGET_EUSERS,
511 [ENOTSOCK] = TARGET_ENOTSOCK,
512 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
513 [EMSGSIZE] = TARGET_EMSGSIZE,
514 [EPROTOTYPE] = TARGET_EPROTOTYPE,
515 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
516 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
517 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
518 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
519 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
520 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
521 [EADDRINUSE] = TARGET_EADDRINUSE,
522 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
523 [ENETDOWN] = TARGET_ENETDOWN,
524 [ENETUNREACH] = TARGET_ENETUNREACH,
525 [ENETRESET] = TARGET_ENETRESET,
526 [ECONNABORTED] = TARGET_ECONNABORTED,
527 [ECONNRESET] = TARGET_ECONNRESET,
528 [ENOBUFS] = TARGET_ENOBUFS,
529 [EISCONN] = TARGET_EISCONN,
530 [ENOTCONN] = TARGET_ENOTCONN,
531 [EUCLEAN] = TARGET_EUCLEAN,
532 [ENOTNAM] = TARGET_ENOTNAM,
533 [ENAVAIL] = TARGET_ENAVAIL,
534 [EISNAM] = TARGET_EISNAM,
535 [EREMOTEIO] = TARGET_EREMOTEIO,
536 [ESHUTDOWN] = TARGET_ESHUTDOWN,
537 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
538 [ETIMEDOUT] = TARGET_ETIMEDOUT,
539 [ECONNREFUSED] = TARGET_ECONNREFUSED,
540 [EHOSTDOWN] = TARGET_EHOSTDOWN,
541 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
542 [EALREADY] = TARGET_EALREADY,
543 [EINPROGRESS] = TARGET_EINPROGRESS,
544 [ESTALE] = TARGET_ESTALE,
545 [ECANCELED] = TARGET_ECANCELED,
546 [ENOMEDIUM] = TARGET_ENOMEDIUM,
547 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
548 #ifdef ENOKEY
549 [ENOKEY] = TARGET_ENOKEY,
550 #endif
551 #ifdef EKEYEXPIRED
552 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
553 #endif
554 #ifdef EKEYREVOKED
555 [EKEYREVOKED] = TARGET_EKEYREVOKED,
556 #endif
557 #ifdef EKEYREJECTED
558 [EKEYREJECTED] = TARGET_EKEYREJECTED,
559 #endif
560 #ifdef EOWNERDEAD
561 [EOWNERDEAD] = TARGET_EOWNERDEAD,
562 #endif
563 #ifdef ENOTRECOVERABLE
564 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
565 #endif
568 static inline int host_to_target_errno(int err)
570 if(host_to_target_errno_table[err])
571 return host_to_target_errno_table[err];
572 return err;
575 static inline int target_to_host_errno(int err)
577 if (target_to_host_errno_table[err])
578 return target_to_host_errno_table[err];
579 return err;
582 static inline abi_long get_errno(abi_long ret)
584 if (ret == -1)
585 return -host_to_target_errno(errno);
586 else
587 return ret;
590 static inline int is_error(abi_long ret)
592 return (abi_ulong)ret >= (abi_ulong)(-4096);
595 char *target_strerror(int err)
597 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
598 return NULL;
600 return strerror(target_to_host_errno(err));
603 static inline int host_to_target_sock_type(int host_type)
605 int target_type;
607 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
608 case SOCK_DGRAM:
609 target_type = TARGET_SOCK_DGRAM;
610 break;
611 case SOCK_STREAM:
612 target_type = TARGET_SOCK_STREAM;
613 break;
614 default:
615 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
616 break;
619 #if defined(SOCK_CLOEXEC)
620 if (host_type & SOCK_CLOEXEC) {
621 target_type |= TARGET_SOCK_CLOEXEC;
623 #endif
625 #if defined(SOCK_NONBLOCK)
626 if (host_type & SOCK_NONBLOCK) {
627 target_type |= TARGET_SOCK_NONBLOCK;
629 #endif
631 return target_type;
634 static abi_ulong target_brk;
635 static abi_ulong target_original_brk;
636 static abi_ulong brk_page;
638 void target_set_brk(abi_ulong new_brk)
640 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
641 brk_page = HOST_PAGE_ALIGN(target_brk);
644 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
645 #define DEBUGF_BRK(message, args...)
647 /* do_brk() must return target values and target errnos. */
648 abi_long do_brk(abi_ulong new_brk)
650 abi_long mapped_addr;
651 int new_alloc_size;
653 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
655 if (!new_brk) {
656 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
657 return target_brk;
659 if (new_brk < target_original_brk) {
660 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
661 target_brk);
662 return target_brk;
665 /* If the new brk is less than the highest page reserved to the
666 * target heap allocation, set it and we're almost done... */
667 if (new_brk <= brk_page) {
668 /* Heap contents are initialized to zero, as for anonymous
669 * mapped pages. */
670 if (new_brk > target_brk) {
671 memset(g2h(target_brk), 0, new_brk - target_brk);
673 target_brk = new_brk;
674 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
675 return target_brk;
678 /* We need to allocate more memory after the brk... Note that
679 * we don't use MAP_FIXED because that will map over the top of
680 * any existing mapping (like the one with the host libc or qemu
681 * itself); instead we treat "mapped but at wrong address" as
682 * a failure and unmap again.
684 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
685 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
686 PROT_READ|PROT_WRITE,
687 MAP_ANON|MAP_PRIVATE, 0, 0));
689 if (mapped_addr == brk_page) {
690 /* Heap contents are initialized to zero, as for anonymous
691 * mapped pages. Technically the new pages are already
692 * initialized to zero since they *are* anonymous mapped
693 * pages, however we have to take care with the contents that
694 * come from the remaining part of the previous page: it may
695 * contains garbage data due to a previous heap usage (grown
696 * then shrunken). */
697 memset(g2h(target_brk), 0, brk_page - target_brk);
699 target_brk = new_brk;
700 brk_page = HOST_PAGE_ALIGN(target_brk);
701 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
702 target_brk);
703 return target_brk;
704 } else if (mapped_addr != -1) {
705 /* Mapped but at wrong address, meaning there wasn't actually
706 * enough space for this brk.
708 target_munmap(mapped_addr, new_alloc_size);
709 mapped_addr = -1;
710 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
712 else {
713 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
716 #if defined(TARGET_ALPHA)
717 /* We (partially) emulate OSF/1 on Alpha, which requires we
718 return a proper errno, not an unchanged brk value. */
719 return -TARGET_ENOMEM;
720 #endif
721 /* For everything else, return the previous break. */
722 return target_brk;
725 static inline abi_long copy_from_user_fdset(fd_set *fds,
726 abi_ulong target_fds_addr,
727 int n)
729 int i, nw, j, k;
730 abi_ulong b, *target_fds;
732 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
733 if (!(target_fds = lock_user(VERIFY_READ,
734 target_fds_addr,
735 sizeof(abi_ulong) * nw,
736 1)))
737 return -TARGET_EFAULT;
739 FD_ZERO(fds);
740 k = 0;
741 for (i = 0; i < nw; i++) {
742 /* grab the abi_ulong */
743 __get_user(b, &target_fds[i]);
744 for (j = 0; j < TARGET_ABI_BITS; j++) {
745 /* check the bit inside the abi_ulong */
746 if ((b >> j) & 1)
747 FD_SET(k, fds);
748 k++;
752 unlock_user(target_fds, target_fds_addr, 0);
754 return 0;
757 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
758 abi_ulong target_fds_addr,
759 int n)
761 if (target_fds_addr) {
762 if (copy_from_user_fdset(fds, target_fds_addr, n))
763 return -TARGET_EFAULT;
764 *fds_ptr = fds;
765 } else {
766 *fds_ptr = NULL;
768 return 0;
771 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
772 const fd_set *fds,
773 int n)
775 int i, nw, j, k;
776 abi_long v;
777 abi_ulong *target_fds;
779 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
780 if (!(target_fds = lock_user(VERIFY_WRITE,
781 target_fds_addr,
782 sizeof(abi_ulong) * nw,
783 0)))
784 return -TARGET_EFAULT;
786 k = 0;
787 for (i = 0; i < nw; i++) {
788 v = 0;
789 for (j = 0; j < TARGET_ABI_BITS; j++) {
790 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
791 k++;
793 __put_user(v, &target_fds[i]);
796 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
798 return 0;
801 #if defined(__alpha__)
802 #define HOST_HZ 1024
803 #else
804 #define HOST_HZ 100
805 #endif
807 static inline abi_long host_to_target_clock_t(long ticks)
809 #if HOST_HZ == TARGET_HZ
810 return ticks;
811 #else
812 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
813 #endif
816 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
817 const struct rusage *rusage)
819 struct target_rusage *target_rusage;
821 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
822 return -TARGET_EFAULT;
823 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
824 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
825 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
826 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
827 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
828 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
829 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
830 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
831 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
832 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
833 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
834 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
835 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
836 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
837 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
838 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
839 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
840 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
841 unlock_user_struct(target_rusage, target_addr, 1);
843 return 0;
846 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
848 abi_ulong target_rlim_swap;
849 rlim_t result;
851 target_rlim_swap = tswapal(target_rlim);
852 if (target_rlim_swap == TARGET_RLIM_INFINITY)
853 return RLIM_INFINITY;
855 result = target_rlim_swap;
856 if (target_rlim_swap != (rlim_t)result)
857 return RLIM_INFINITY;
859 return result;
862 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
864 abi_ulong target_rlim_swap;
865 abi_ulong result;
867 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
868 target_rlim_swap = TARGET_RLIM_INFINITY;
869 else
870 target_rlim_swap = rlim;
871 result = tswapal(target_rlim_swap);
873 return result;
876 static inline int target_to_host_resource(int code)
878 switch (code) {
879 case TARGET_RLIMIT_AS:
880 return RLIMIT_AS;
881 case TARGET_RLIMIT_CORE:
882 return RLIMIT_CORE;
883 case TARGET_RLIMIT_CPU:
884 return RLIMIT_CPU;
885 case TARGET_RLIMIT_DATA:
886 return RLIMIT_DATA;
887 case TARGET_RLIMIT_FSIZE:
888 return RLIMIT_FSIZE;
889 case TARGET_RLIMIT_LOCKS:
890 return RLIMIT_LOCKS;
891 case TARGET_RLIMIT_MEMLOCK:
892 return RLIMIT_MEMLOCK;
893 case TARGET_RLIMIT_MSGQUEUE:
894 return RLIMIT_MSGQUEUE;
895 case TARGET_RLIMIT_NICE:
896 return RLIMIT_NICE;
897 case TARGET_RLIMIT_NOFILE:
898 return RLIMIT_NOFILE;
899 case TARGET_RLIMIT_NPROC:
900 return RLIMIT_NPROC;
901 case TARGET_RLIMIT_RSS:
902 return RLIMIT_RSS;
903 case TARGET_RLIMIT_RTPRIO:
904 return RLIMIT_RTPRIO;
905 case TARGET_RLIMIT_SIGPENDING:
906 return RLIMIT_SIGPENDING;
907 case TARGET_RLIMIT_STACK:
908 return RLIMIT_STACK;
909 default:
910 return code;
914 static inline abi_long copy_from_user_timeval(struct timeval *tv,
915 abi_ulong target_tv_addr)
917 struct target_timeval *target_tv;
919 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
920 return -TARGET_EFAULT;
922 __get_user(tv->tv_sec, &target_tv->tv_sec);
923 __get_user(tv->tv_usec, &target_tv->tv_usec);
925 unlock_user_struct(target_tv, target_tv_addr, 0);
927 return 0;
930 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
931 const struct timeval *tv)
933 struct target_timeval *target_tv;
935 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
936 return -TARGET_EFAULT;
938 __put_user(tv->tv_sec, &target_tv->tv_sec);
939 __put_user(tv->tv_usec, &target_tv->tv_usec);
941 unlock_user_struct(target_tv, target_tv_addr, 1);
943 return 0;
946 static inline abi_long copy_from_user_timezone(struct timezone *tz,
947 abi_ulong target_tz_addr)
949 struct target_timezone *target_tz;
951 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
952 return -TARGET_EFAULT;
955 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
956 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
958 unlock_user_struct(target_tz, target_tz_addr, 0);
960 return 0;
963 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
964 #include <mqueue.h>
966 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
967 abi_ulong target_mq_attr_addr)
969 struct target_mq_attr *target_mq_attr;
971 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
972 target_mq_attr_addr, 1))
973 return -TARGET_EFAULT;
975 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
976 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
977 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
978 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
980 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
982 return 0;
985 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
986 const struct mq_attr *attr)
988 struct target_mq_attr *target_mq_attr;
990 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
991 target_mq_attr_addr, 0))
992 return -TARGET_EFAULT;
994 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
995 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
996 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
997 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
999 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1001 return 0;
1003 #endif
1005 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1006 /* do_select() must return target values and target errnos. */
1007 static abi_long do_select(int n,
1008 abi_ulong rfd_addr, abi_ulong wfd_addr,
1009 abi_ulong efd_addr, abi_ulong target_tv_addr)
1011 fd_set rfds, wfds, efds;
1012 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1013 struct timeval tv, *tv_ptr;
1014 abi_long ret;
1016 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1017 if (ret) {
1018 return ret;
1020 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1021 if (ret) {
1022 return ret;
1024 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1025 if (ret) {
1026 return ret;
1029 if (target_tv_addr) {
1030 if (copy_from_user_timeval(&tv, target_tv_addr))
1031 return -TARGET_EFAULT;
1032 tv_ptr = &tv;
1033 } else {
1034 tv_ptr = NULL;
1037 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1039 if (!is_error(ret)) {
1040 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1041 return -TARGET_EFAULT;
1042 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1043 return -TARGET_EFAULT;
1044 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1045 return -TARGET_EFAULT;
1047 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1048 return -TARGET_EFAULT;
1051 return ret;
1053 #endif
1055 static abi_long do_pipe2(int host_pipe[], int flags)
1057 #ifdef CONFIG_PIPE2
1058 return pipe2(host_pipe, flags);
1059 #else
1060 return -ENOSYS;
1061 #endif
1064 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1065 int flags, int is_pipe2)
1067 int host_pipe[2];
1068 abi_long ret;
1069 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1071 if (is_error(ret))
1072 return get_errno(ret);
1074 /* Several targets have special calling conventions for the original
1075 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1076 if (!is_pipe2) {
1077 #if defined(TARGET_ALPHA)
1078 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1079 return host_pipe[0];
1080 #elif defined(TARGET_MIPS)
1081 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1082 return host_pipe[0];
1083 #elif defined(TARGET_SH4)
1084 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1085 return host_pipe[0];
1086 #elif defined(TARGET_SPARC)
1087 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1088 return host_pipe[0];
1089 #endif
1092 if (put_user_s32(host_pipe[0], pipedes)
1093 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1094 return -TARGET_EFAULT;
1095 return get_errno(ret);
1098 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1099 abi_ulong target_addr,
1100 socklen_t len)
1102 struct target_ip_mreqn *target_smreqn;
1104 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1105 if (!target_smreqn)
1106 return -TARGET_EFAULT;
1107 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1108 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1109 if (len == sizeof(struct target_ip_mreqn))
1110 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1111 unlock_user(target_smreqn, target_addr, 0);
1113 return 0;
1116 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1117 abi_ulong target_addr,
1118 socklen_t len)
1120 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1121 sa_family_t sa_family;
1122 struct target_sockaddr *target_saddr;
1124 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1125 if (!target_saddr)
1126 return -TARGET_EFAULT;
1128 sa_family = tswap16(target_saddr->sa_family);
1130 /* Oops. The caller might send a incomplete sun_path; sun_path
1131 * must be terminated by \0 (see the manual page), but
1132 * unfortunately it is quite common to specify sockaddr_un
1133 * length as "strlen(x->sun_path)" while it should be
1134 * "strlen(...) + 1". We'll fix that here if needed.
1135 * Linux kernel has a similar feature.
1138 if (sa_family == AF_UNIX) {
1139 if (len < unix_maxlen && len > 0) {
1140 char *cp = (char*)target_saddr;
1142 if ( cp[len-1] && !cp[len] )
1143 len++;
1145 if (len > unix_maxlen)
1146 len = unix_maxlen;
1149 memcpy(addr, target_saddr, len);
1150 addr->sa_family = sa_family;
1151 if (sa_family == AF_PACKET) {
1152 struct target_sockaddr_ll *lladdr;
1154 lladdr = (struct target_sockaddr_ll *)addr;
1155 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1156 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1158 unlock_user(target_saddr, target_addr, 0);
1160 return 0;
1163 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1164 struct sockaddr *addr,
1165 socklen_t len)
1167 struct target_sockaddr *target_saddr;
1169 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1170 if (!target_saddr)
1171 return -TARGET_EFAULT;
1172 memcpy(target_saddr, addr, len);
1173 target_saddr->sa_family = tswap16(addr->sa_family);
1174 unlock_user(target_saddr, target_addr, len);
1176 return 0;
1179 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1180 struct target_msghdr *target_msgh)
1182 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1183 abi_long msg_controllen;
1184 abi_ulong target_cmsg_addr;
1185 struct target_cmsghdr *target_cmsg;
1186 socklen_t space = 0;
1188 msg_controllen = tswapal(target_msgh->msg_controllen);
1189 if (msg_controllen < sizeof (struct target_cmsghdr))
1190 goto the_end;
1191 target_cmsg_addr = tswapal(target_msgh->msg_control);
1192 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1193 if (!target_cmsg)
1194 return -TARGET_EFAULT;
1196 while (cmsg && target_cmsg) {
1197 void *data = CMSG_DATA(cmsg);
1198 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1200 int len = tswapal(target_cmsg->cmsg_len)
1201 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1203 space += CMSG_SPACE(len);
1204 if (space > msgh->msg_controllen) {
1205 space -= CMSG_SPACE(len);
1206 gemu_log("Host cmsg overflow\n");
1207 break;
1210 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1211 cmsg->cmsg_level = SOL_SOCKET;
1212 } else {
1213 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1215 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1216 cmsg->cmsg_len = CMSG_LEN(len);
1218 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1219 int *fd = (int *)data;
1220 int *target_fd = (int *)target_data;
1221 int i, numfds = len / sizeof(int);
1223 for (i = 0; i < numfds; i++)
1224 fd[i] = tswap32(target_fd[i]);
1225 } else if (cmsg->cmsg_level == SOL_SOCKET
1226 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1227 struct ucred *cred = (struct ucred *)data;
1228 struct target_ucred *target_cred =
1229 (struct target_ucred *)target_data;
1231 __put_user(target_cred->pid, &cred->pid);
1232 __put_user(target_cred->uid, &cred->uid);
1233 __put_user(target_cred->gid, &cred->gid);
1234 } else {
1235 gemu_log("Unsupported ancillary data: %d/%d\n",
1236 cmsg->cmsg_level, cmsg->cmsg_type);
1237 memcpy(data, target_data, len);
1240 cmsg = CMSG_NXTHDR(msgh, cmsg);
1241 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1243 unlock_user(target_cmsg, target_cmsg_addr, 0);
1244 the_end:
1245 msgh->msg_controllen = space;
1246 return 0;
1249 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1250 struct msghdr *msgh)
1252 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1253 abi_long msg_controllen;
1254 abi_ulong target_cmsg_addr;
1255 struct target_cmsghdr *target_cmsg;
1256 socklen_t space = 0;
1258 msg_controllen = tswapal(target_msgh->msg_controllen);
1259 if (msg_controllen < sizeof (struct target_cmsghdr))
1260 goto the_end;
1261 target_cmsg_addr = tswapal(target_msgh->msg_control);
1262 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1263 if (!target_cmsg)
1264 return -TARGET_EFAULT;
1266 while (cmsg && target_cmsg) {
1267 void *data = CMSG_DATA(cmsg);
1268 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1270 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1272 space += TARGET_CMSG_SPACE(len);
1273 if (space > msg_controllen) {
1274 space -= TARGET_CMSG_SPACE(len);
1275 gemu_log("Target cmsg overflow\n");
1276 break;
1279 if (cmsg->cmsg_level == SOL_SOCKET) {
1280 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1281 } else {
1282 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1284 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1285 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1287 switch (cmsg->cmsg_level) {
1288 case SOL_SOCKET:
1289 switch (cmsg->cmsg_type) {
1290 case SCM_RIGHTS:
1292 int *fd = (int *)data;
1293 int *target_fd = (int *)target_data;
1294 int i, numfds = len / sizeof(int);
1296 for (i = 0; i < numfds; i++)
1297 target_fd[i] = tswap32(fd[i]);
1298 break;
1300 case SO_TIMESTAMP:
1302 struct timeval *tv = (struct timeval *)data;
1303 struct target_timeval *target_tv =
1304 (struct target_timeval *)target_data;
1306 if (len != sizeof(struct timeval))
1307 goto unimplemented;
1309 /* copy struct timeval to target */
1310 target_tv->tv_sec = tswapal(tv->tv_sec);
1311 target_tv->tv_usec = tswapal(tv->tv_usec);
1312 break;
1314 case SCM_CREDENTIALS:
1316 struct ucred *cred = (struct ucred *)data;
1317 struct target_ucred *target_cred =
1318 (struct target_ucred *)target_data;
1320 __put_user(cred->pid, &target_cred->pid);
1321 __put_user(cred->uid, &target_cred->uid);
1322 __put_user(cred->gid, &target_cred->gid);
1323 break;
1325 default:
1326 goto unimplemented;
1328 break;
1330 default:
1331 unimplemented:
1332 gemu_log("Unsupported ancillary data: %d/%d\n",
1333 cmsg->cmsg_level, cmsg->cmsg_type);
1334 memcpy(target_data, data, len);
1337 cmsg = CMSG_NXTHDR(msgh, cmsg);
1338 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1340 unlock_user(target_cmsg, target_cmsg_addr, space);
1341 the_end:
1342 target_msgh->msg_controllen = tswapal(space);
1343 return 0;
1346 /* do_setsockopt() Must return target values and target errnos. */
1347 static abi_long do_setsockopt(int sockfd, int level, int optname,
1348 abi_ulong optval_addr, socklen_t optlen)
1350 abi_long ret;
1351 int val;
1352 struct ip_mreqn *ip_mreq;
1353 struct ip_mreq_source *ip_mreq_source;
1355 switch(level) {
1356 case SOL_TCP:
1357 /* TCP options all take an 'int' value. */
1358 if (optlen < sizeof(uint32_t))
1359 return -TARGET_EINVAL;
1361 if (get_user_u32(val, optval_addr))
1362 return -TARGET_EFAULT;
1363 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1364 break;
1365 case SOL_IP:
1366 switch(optname) {
1367 case IP_TOS:
1368 case IP_TTL:
1369 case IP_HDRINCL:
1370 case IP_ROUTER_ALERT:
1371 case IP_RECVOPTS:
1372 case IP_RETOPTS:
1373 case IP_PKTINFO:
1374 case IP_MTU_DISCOVER:
1375 case IP_RECVERR:
1376 case IP_RECVTOS:
1377 #ifdef IP_FREEBIND
1378 case IP_FREEBIND:
1379 #endif
1380 case IP_MULTICAST_TTL:
1381 case IP_MULTICAST_LOOP:
1382 val = 0;
1383 if (optlen >= sizeof(uint32_t)) {
1384 if (get_user_u32(val, optval_addr))
1385 return -TARGET_EFAULT;
1386 } else if (optlen >= 1) {
1387 if (get_user_u8(val, optval_addr))
1388 return -TARGET_EFAULT;
1390 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1391 break;
1392 case IP_ADD_MEMBERSHIP:
1393 case IP_DROP_MEMBERSHIP:
1394 if (optlen < sizeof (struct target_ip_mreq) ||
1395 optlen > sizeof (struct target_ip_mreqn))
1396 return -TARGET_EINVAL;
1398 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1399 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1400 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1401 break;
1403 case IP_BLOCK_SOURCE:
1404 case IP_UNBLOCK_SOURCE:
1405 case IP_ADD_SOURCE_MEMBERSHIP:
1406 case IP_DROP_SOURCE_MEMBERSHIP:
1407 if (optlen != sizeof (struct target_ip_mreq_source))
1408 return -TARGET_EINVAL;
1410 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1411 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1412 unlock_user (ip_mreq_source, optval_addr, 0);
1413 break;
1415 default:
1416 goto unimplemented;
1418 break;
1419 case SOL_IPV6:
1420 switch (optname) {
1421 case IPV6_MTU_DISCOVER:
1422 case IPV6_MTU:
1423 case IPV6_V6ONLY:
1424 case IPV6_RECVPKTINFO:
1425 val = 0;
1426 if (optlen < sizeof(uint32_t)) {
1427 return -TARGET_EINVAL;
1429 if (get_user_u32(val, optval_addr)) {
1430 return -TARGET_EFAULT;
1432 ret = get_errno(setsockopt(sockfd, level, optname,
1433 &val, sizeof(val)));
1434 break;
1435 default:
1436 goto unimplemented;
1438 break;
1439 case SOL_RAW:
1440 switch (optname) {
1441 case ICMP_FILTER:
1442 /* struct icmp_filter takes an u32 value */
1443 if (optlen < sizeof(uint32_t)) {
1444 return -TARGET_EINVAL;
1447 if (get_user_u32(val, optval_addr)) {
1448 return -TARGET_EFAULT;
1450 ret = get_errno(setsockopt(sockfd, level, optname,
1451 &val, sizeof(val)));
1452 break;
1454 default:
1455 goto unimplemented;
1457 break;
1458 case TARGET_SOL_SOCKET:
1459 switch (optname) {
1460 case TARGET_SO_RCVTIMEO:
1462 struct timeval tv;
1464 optname = SO_RCVTIMEO;
1466 set_timeout:
1467 if (optlen != sizeof(struct target_timeval)) {
1468 return -TARGET_EINVAL;
1471 if (copy_from_user_timeval(&tv, optval_addr)) {
1472 return -TARGET_EFAULT;
1475 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1476 &tv, sizeof(tv)));
1477 return ret;
1479 case TARGET_SO_SNDTIMEO:
1480 optname = SO_SNDTIMEO;
1481 goto set_timeout;
1482 case TARGET_SO_ATTACH_FILTER:
1484 struct target_sock_fprog *tfprog;
1485 struct target_sock_filter *tfilter;
1486 struct sock_fprog fprog;
1487 struct sock_filter *filter;
1488 int i;
1490 if (optlen != sizeof(*tfprog)) {
1491 return -TARGET_EINVAL;
1493 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1494 return -TARGET_EFAULT;
1496 if (!lock_user_struct(VERIFY_READ, tfilter,
1497 tswapal(tfprog->filter), 0)) {
1498 unlock_user_struct(tfprog, optval_addr, 1);
1499 return -TARGET_EFAULT;
1502 fprog.len = tswap16(tfprog->len);
1503 filter = malloc(fprog.len * sizeof(*filter));
1504 if (filter == NULL) {
1505 unlock_user_struct(tfilter, tfprog->filter, 1);
1506 unlock_user_struct(tfprog, optval_addr, 1);
1507 return -TARGET_ENOMEM;
1509 for (i = 0; i < fprog.len; i++) {
1510 filter[i].code = tswap16(tfilter[i].code);
1511 filter[i].jt = tfilter[i].jt;
1512 filter[i].jf = tfilter[i].jf;
1513 filter[i].k = tswap32(tfilter[i].k);
1515 fprog.filter = filter;
1517 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1518 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1519 free(filter);
1521 unlock_user_struct(tfilter, tfprog->filter, 1);
1522 unlock_user_struct(tfprog, optval_addr, 1);
1523 return ret;
1525 case TARGET_SO_BINDTODEVICE:
1527 char *dev_ifname, *addr_ifname;
1529 if (optlen > IFNAMSIZ - 1) {
1530 optlen = IFNAMSIZ - 1;
1532 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1533 if (!dev_ifname) {
1534 return -TARGET_EFAULT;
1536 optname = SO_BINDTODEVICE;
1537 addr_ifname = alloca(IFNAMSIZ);
1538 memcpy(addr_ifname, dev_ifname, optlen);
1539 addr_ifname[optlen] = 0;
1540 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
1541 unlock_user (dev_ifname, optval_addr, 0);
1542 return ret;
1544 /* Options with 'int' argument. */
1545 case TARGET_SO_DEBUG:
1546 optname = SO_DEBUG;
1547 break;
1548 case TARGET_SO_REUSEADDR:
1549 optname = SO_REUSEADDR;
1550 break;
1551 case TARGET_SO_TYPE:
1552 optname = SO_TYPE;
1553 break;
1554 case TARGET_SO_ERROR:
1555 optname = SO_ERROR;
1556 break;
1557 case TARGET_SO_DONTROUTE:
1558 optname = SO_DONTROUTE;
1559 break;
1560 case TARGET_SO_BROADCAST:
1561 optname = SO_BROADCAST;
1562 break;
1563 case TARGET_SO_SNDBUF:
1564 optname = SO_SNDBUF;
1565 break;
1566 case TARGET_SO_SNDBUFFORCE:
1567 optname = SO_SNDBUFFORCE;
1568 break;
1569 case TARGET_SO_RCVBUF:
1570 optname = SO_RCVBUF;
1571 break;
1572 case TARGET_SO_RCVBUFFORCE:
1573 optname = SO_RCVBUFFORCE;
1574 break;
1575 case TARGET_SO_KEEPALIVE:
1576 optname = SO_KEEPALIVE;
1577 break;
1578 case TARGET_SO_OOBINLINE:
1579 optname = SO_OOBINLINE;
1580 break;
1581 case TARGET_SO_NO_CHECK:
1582 optname = SO_NO_CHECK;
1583 break;
1584 case TARGET_SO_PRIORITY:
1585 optname = SO_PRIORITY;
1586 break;
1587 #ifdef SO_BSDCOMPAT
1588 case TARGET_SO_BSDCOMPAT:
1589 optname = SO_BSDCOMPAT;
1590 break;
1591 #endif
1592 case TARGET_SO_PASSCRED:
1593 optname = SO_PASSCRED;
1594 break;
1595 case TARGET_SO_PASSSEC:
1596 optname = SO_PASSSEC;
1597 break;
1598 case TARGET_SO_TIMESTAMP:
1599 optname = SO_TIMESTAMP;
1600 break;
1601 case TARGET_SO_RCVLOWAT:
1602 optname = SO_RCVLOWAT;
1603 break;
1604 break;
1605 default:
1606 goto unimplemented;
1608 if (optlen < sizeof(uint32_t))
1609 return -TARGET_EINVAL;
1611 if (get_user_u32(val, optval_addr))
1612 return -TARGET_EFAULT;
1613 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1614 break;
1615 default:
1616 unimplemented:
1617 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1618 ret = -TARGET_ENOPROTOOPT;
1620 return ret;
1623 /* do_getsockopt() Must return target values and target errnos. */
1624 static abi_long do_getsockopt(int sockfd, int level, int optname,
1625 abi_ulong optval_addr, abi_ulong optlen)
1627 abi_long ret;
1628 int len, val;
1629 socklen_t lv;
1631 switch(level) {
1632 case TARGET_SOL_SOCKET:
1633 level = SOL_SOCKET;
1634 switch (optname) {
1635 /* These don't just return a single integer */
1636 case TARGET_SO_LINGER:
1637 case TARGET_SO_RCVTIMEO:
1638 case TARGET_SO_SNDTIMEO:
1639 case TARGET_SO_PEERNAME:
1640 goto unimplemented;
1641 case TARGET_SO_PEERCRED: {
1642 struct ucred cr;
1643 socklen_t crlen;
1644 struct target_ucred *tcr;
1646 if (get_user_u32(len, optlen)) {
1647 return -TARGET_EFAULT;
1649 if (len < 0) {
1650 return -TARGET_EINVAL;
1653 crlen = sizeof(cr);
1654 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1655 &cr, &crlen));
1656 if (ret < 0) {
1657 return ret;
1659 if (len > crlen) {
1660 len = crlen;
1662 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1663 return -TARGET_EFAULT;
1665 __put_user(cr.pid, &tcr->pid);
1666 __put_user(cr.uid, &tcr->uid);
1667 __put_user(cr.gid, &tcr->gid);
1668 unlock_user_struct(tcr, optval_addr, 1);
1669 if (put_user_u32(len, optlen)) {
1670 return -TARGET_EFAULT;
1672 break;
1674 /* Options with 'int' argument. */
1675 case TARGET_SO_DEBUG:
1676 optname = SO_DEBUG;
1677 goto int_case;
1678 case TARGET_SO_REUSEADDR:
1679 optname = SO_REUSEADDR;
1680 goto int_case;
1681 case TARGET_SO_TYPE:
1682 optname = SO_TYPE;
1683 goto int_case;
1684 case TARGET_SO_ERROR:
1685 optname = SO_ERROR;
1686 goto int_case;
1687 case TARGET_SO_DONTROUTE:
1688 optname = SO_DONTROUTE;
1689 goto int_case;
1690 case TARGET_SO_BROADCAST:
1691 optname = SO_BROADCAST;
1692 goto int_case;
1693 case TARGET_SO_SNDBUF:
1694 optname = SO_SNDBUF;
1695 goto int_case;
1696 case TARGET_SO_RCVBUF:
1697 optname = SO_RCVBUF;
1698 goto int_case;
1699 case TARGET_SO_KEEPALIVE:
1700 optname = SO_KEEPALIVE;
1701 goto int_case;
1702 case TARGET_SO_OOBINLINE:
1703 optname = SO_OOBINLINE;
1704 goto int_case;
1705 case TARGET_SO_NO_CHECK:
1706 optname = SO_NO_CHECK;
1707 goto int_case;
1708 case TARGET_SO_PRIORITY:
1709 optname = SO_PRIORITY;
1710 goto int_case;
1711 #ifdef SO_BSDCOMPAT
1712 case TARGET_SO_BSDCOMPAT:
1713 optname = SO_BSDCOMPAT;
1714 goto int_case;
1715 #endif
1716 case TARGET_SO_PASSCRED:
1717 optname = SO_PASSCRED;
1718 goto int_case;
1719 case TARGET_SO_TIMESTAMP:
1720 optname = SO_TIMESTAMP;
1721 goto int_case;
1722 case TARGET_SO_RCVLOWAT:
1723 optname = SO_RCVLOWAT;
1724 goto int_case;
1725 case TARGET_SO_ACCEPTCONN:
1726 optname = SO_ACCEPTCONN;
1727 goto int_case;
1728 default:
1729 goto int_case;
1731 break;
1732 case SOL_TCP:
1733 /* TCP options all take an 'int' value. */
1734 int_case:
1735 if (get_user_u32(len, optlen))
1736 return -TARGET_EFAULT;
1737 if (len < 0)
1738 return -TARGET_EINVAL;
1739 lv = sizeof(lv);
1740 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1741 if (ret < 0)
1742 return ret;
1743 if (optname == SO_TYPE) {
1744 val = host_to_target_sock_type(val);
1746 if (len > lv)
1747 len = lv;
1748 if (len == 4) {
1749 if (put_user_u32(val, optval_addr))
1750 return -TARGET_EFAULT;
1751 } else {
1752 if (put_user_u8(val, optval_addr))
1753 return -TARGET_EFAULT;
1755 if (put_user_u32(len, optlen))
1756 return -TARGET_EFAULT;
1757 break;
1758 case SOL_IP:
1759 switch(optname) {
1760 case IP_TOS:
1761 case IP_TTL:
1762 case IP_HDRINCL:
1763 case IP_ROUTER_ALERT:
1764 case IP_RECVOPTS:
1765 case IP_RETOPTS:
1766 case IP_PKTINFO:
1767 case IP_MTU_DISCOVER:
1768 case IP_RECVERR:
1769 case IP_RECVTOS:
1770 #ifdef IP_FREEBIND
1771 case IP_FREEBIND:
1772 #endif
1773 case IP_MULTICAST_TTL:
1774 case IP_MULTICAST_LOOP:
1775 if (get_user_u32(len, optlen))
1776 return -TARGET_EFAULT;
1777 if (len < 0)
1778 return -TARGET_EINVAL;
1779 lv = sizeof(lv);
1780 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1781 if (ret < 0)
1782 return ret;
1783 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1784 len = 1;
1785 if (put_user_u32(len, optlen)
1786 || put_user_u8(val, optval_addr))
1787 return -TARGET_EFAULT;
1788 } else {
1789 if (len > sizeof(int))
1790 len = sizeof(int);
1791 if (put_user_u32(len, optlen)
1792 || put_user_u32(val, optval_addr))
1793 return -TARGET_EFAULT;
1795 break;
1796 default:
1797 ret = -TARGET_ENOPROTOOPT;
1798 break;
1800 break;
1801 default:
1802 unimplemented:
1803 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1804 level, optname);
1805 ret = -TARGET_EOPNOTSUPP;
1806 break;
1808 return ret;
1811 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1812 int count, int copy)
1814 struct target_iovec *target_vec;
1815 struct iovec *vec;
1816 abi_ulong total_len, max_len;
1817 int i;
1818 int err = 0;
1819 bool bad_address = false;
1821 if (count == 0) {
1822 errno = 0;
1823 return NULL;
1825 if (count < 0 || count > IOV_MAX) {
1826 errno = EINVAL;
1827 return NULL;
1830 vec = calloc(count, sizeof(struct iovec));
1831 if (vec == NULL) {
1832 errno = ENOMEM;
1833 return NULL;
1836 target_vec = lock_user(VERIFY_READ, target_addr,
1837 count * sizeof(struct target_iovec), 1);
1838 if (target_vec == NULL) {
1839 err = EFAULT;
1840 goto fail2;
1843 /* ??? If host page size > target page size, this will result in a
1844 value larger than what we can actually support. */
1845 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1846 total_len = 0;
1848 for (i = 0; i < count; i++) {
1849 abi_ulong base = tswapal(target_vec[i].iov_base);
1850 abi_long len = tswapal(target_vec[i].iov_len);
1852 if (len < 0) {
1853 err = EINVAL;
1854 goto fail;
1855 } else if (len == 0) {
1856 /* Zero length pointer is ignored. */
1857 vec[i].iov_base = 0;
1858 } else {
1859 vec[i].iov_base = lock_user(type, base, len, copy);
1860 /* If the first buffer pointer is bad, this is a fault. But
1861 * subsequent bad buffers will result in a partial write; this
1862 * is realized by filling the vector with null pointers and
1863 * zero lengths. */
1864 if (!vec[i].iov_base) {
1865 if (i == 0) {
1866 err = EFAULT;
1867 goto fail;
1868 } else {
1869 bad_address = true;
1872 if (bad_address) {
1873 len = 0;
1875 if (len > max_len - total_len) {
1876 len = max_len - total_len;
1879 vec[i].iov_len = len;
1880 total_len += len;
1883 unlock_user(target_vec, target_addr, 0);
1884 return vec;
1886 fail:
1887 while (--i >= 0) {
1888 if (tswapal(target_vec[i].iov_len) > 0) {
1889 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
1892 unlock_user(target_vec, target_addr, 0);
1893 fail2:
1894 free(vec);
1895 errno = err;
1896 return NULL;
1899 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1900 int count, int copy)
1902 struct target_iovec *target_vec;
1903 int i;
1905 target_vec = lock_user(VERIFY_READ, target_addr,
1906 count * sizeof(struct target_iovec), 1);
1907 if (target_vec) {
1908 for (i = 0; i < count; i++) {
1909 abi_ulong base = tswapal(target_vec[i].iov_base);
1910 abi_long len = tswapal(target_vec[i].iov_len);
1911 if (len < 0) {
1912 break;
1914 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1916 unlock_user(target_vec, target_addr, 0);
1919 free(vec);
1922 static inline int target_to_host_sock_type(int *type)
1924 int host_type = 0;
1925 int target_type = *type;
1927 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1928 case TARGET_SOCK_DGRAM:
1929 host_type = SOCK_DGRAM;
1930 break;
1931 case TARGET_SOCK_STREAM:
1932 host_type = SOCK_STREAM;
1933 break;
1934 default:
1935 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1936 break;
1938 if (target_type & TARGET_SOCK_CLOEXEC) {
1939 #if defined(SOCK_CLOEXEC)
1940 host_type |= SOCK_CLOEXEC;
1941 #else
1942 return -TARGET_EINVAL;
1943 #endif
1945 if (target_type & TARGET_SOCK_NONBLOCK) {
1946 #if defined(SOCK_NONBLOCK)
1947 host_type |= SOCK_NONBLOCK;
1948 #elif !defined(O_NONBLOCK)
1949 return -TARGET_EINVAL;
1950 #endif
1952 *type = host_type;
1953 return 0;
1956 /* Try to emulate socket type flags after socket creation. */
1957 static int sock_flags_fixup(int fd, int target_type)
1959 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1960 if (target_type & TARGET_SOCK_NONBLOCK) {
1961 int flags = fcntl(fd, F_GETFL);
1962 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1963 close(fd);
1964 return -TARGET_EINVAL;
1967 #endif
1968 return fd;
1971 /* do_socket() Must return target values and target errnos. */
1972 static abi_long do_socket(int domain, int type, int protocol)
1974 int target_type = type;
1975 int ret;
1977 ret = target_to_host_sock_type(&type);
1978 if (ret) {
1979 return ret;
1982 if (domain == PF_NETLINK)
1983 return -TARGET_EAFNOSUPPORT;
1984 ret = get_errno(socket(domain, type, protocol));
1985 if (ret >= 0) {
1986 ret = sock_flags_fixup(ret, target_type);
1988 return ret;
1991 /* do_bind() Must return target values and target errnos. */
1992 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1993 socklen_t addrlen)
1995 void *addr;
1996 abi_long ret;
1998 if ((int)addrlen < 0) {
1999 return -TARGET_EINVAL;
2002 addr = alloca(addrlen+1);
2004 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2005 if (ret)
2006 return ret;
2008 return get_errno(bind(sockfd, addr, addrlen));
2011 /* do_connect() Must return target values and target errnos. */
2012 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2013 socklen_t addrlen)
2015 void *addr;
2016 abi_long ret;
2018 if ((int)addrlen < 0) {
2019 return -TARGET_EINVAL;
2022 addr = alloca(addrlen+1);
2024 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2025 if (ret)
2026 return ret;
2028 return get_errno(connect(sockfd, addr, addrlen));
2031 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2032 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2033 int flags, int send)
2035 abi_long ret, len;
2036 struct msghdr msg;
2037 int count;
2038 struct iovec *vec;
2039 abi_ulong target_vec;
2041 if (msgp->msg_name) {
2042 msg.msg_namelen = tswap32(msgp->msg_namelen);
2043 msg.msg_name = alloca(msg.msg_namelen+1);
2044 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
2045 msg.msg_namelen);
2046 if (ret) {
2047 goto out2;
2049 } else {
2050 msg.msg_name = NULL;
2051 msg.msg_namelen = 0;
2053 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2054 msg.msg_control = alloca(msg.msg_controllen);
2055 msg.msg_flags = tswap32(msgp->msg_flags);
2057 count = tswapal(msgp->msg_iovlen);
2058 target_vec = tswapal(msgp->msg_iov);
2059 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2060 target_vec, count, send);
2061 if (vec == NULL) {
2062 ret = -host_to_target_errno(errno);
2063 goto out2;
2065 msg.msg_iovlen = count;
2066 msg.msg_iov = vec;
2068 if (send) {
2069 ret = target_to_host_cmsg(&msg, msgp);
2070 if (ret == 0)
2071 ret = get_errno(sendmsg(fd, &msg, flags));
2072 } else {
2073 ret = get_errno(recvmsg(fd, &msg, flags));
2074 if (!is_error(ret)) {
2075 len = ret;
2076 ret = host_to_target_cmsg(msgp, &msg);
2077 if (!is_error(ret)) {
2078 msgp->msg_namelen = tswap32(msg.msg_namelen);
2079 if (msg.msg_name != NULL) {
2080 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2081 msg.msg_name, msg.msg_namelen);
2082 if (ret) {
2083 goto out;
2087 ret = len;
2092 out:
2093 unlock_iovec(vec, target_vec, count, !send);
2094 out2:
2095 return ret;
2098 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2099 int flags, int send)
2101 abi_long ret;
2102 struct target_msghdr *msgp;
2104 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2105 msgp,
2106 target_msg,
2107 send ? 1 : 0)) {
2108 return -TARGET_EFAULT;
2110 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2111 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2112 return ret;
2115 #ifdef TARGET_NR_sendmmsg
2116 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2117 * so it might not have this *mmsg-specific flag either.
2119 #ifndef MSG_WAITFORONE
2120 #define MSG_WAITFORONE 0x10000
2121 #endif
2123 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2124 unsigned int vlen, unsigned int flags,
2125 int send)
2127 struct target_mmsghdr *mmsgp;
2128 abi_long ret = 0;
2129 int i;
2131 if (vlen > UIO_MAXIOV) {
2132 vlen = UIO_MAXIOV;
2135 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2136 if (!mmsgp) {
2137 return -TARGET_EFAULT;
2140 for (i = 0; i < vlen; i++) {
2141 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2142 if (is_error(ret)) {
2143 break;
2145 mmsgp[i].msg_len = tswap32(ret);
2146 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2147 if (flags & MSG_WAITFORONE) {
2148 flags |= MSG_DONTWAIT;
2152 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2154 /* Return number of datagrams sent if we sent any at all;
2155 * otherwise return the error.
2157 if (i) {
2158 return i;
2160 return ret;
2162 #endif
2164 /* If we don't have a system accept4() then just call accept.
2165 * The callsites to do_accept4() will ensure that they don't
2166 * pass a non-zero flags argument in this config.
2168 #ifndef CONFIG_ACCEPT4
2169 static inline int accept4(int sockfd, struct sockaddr *addr,
2170 socklen_t *addrlen, int flags)
2172 assert(flags == 0);
2173 return accept(sockfd, addr, addrlen);
2175 #endif
2177 /* do_accept4() Must return target values and target errnos. */
2178 static abi_long do_accept4(int fd, abi_ulong target_addr,
2179 abi_ulong target_addrlen_addr, int flags)
2181 socklen_t addrlen;
2182 void *addr;
2183 abi_long ret;
2184 int host_flags;
2186 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2188 if (target_addr == 0) {
2189 return get_errno(accept4(fd, NULL, NULL, host_flags));
2192 /* linux returns EINVAL if addrlen pointer is invalid */
2193 if (get_user_u32(addrlen, target_addrlen_addr))
2194 return -TARGET_EINVAL;
2196 if ((int)addrlen < 0) {
2197 return -TARGET_EINVAL;
2200 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2201 return -TARGET_EINVAL;
2203 addr = alloca(addrlen);
2205 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2206 if (!is_error(ret)) {
2207 host_to_target_sockaddr(target_addr, addr, addrlen);
2208 if (put_user_u32(addrlen, target_addrlen_addr))
2209 ret = -TARGET_EFAULT;
2211 return ret;
2214 /* do_getpeername() Must return target values and target errnos. */
2215 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2216 abi_ulong target_addrlen_addr)
2218 socklen_t addrlen;
2219 void *addr;
2220 abi_long ret;
2222 if (get_user_u32(addrlen, target_addrlen_addr))
2223 return -TARGET_EFAULT;
2225 if ((int)addrlen < 0) {
2226 return -TARGET_EINVAL;
2229 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2230 return -TARGET_EFAULT;
2232 addr = alloca(addrlen);
2234 ret = get_errno(getpeername(fd, addr, &addrlen));
2235 if (!is_error(ret)) {
2236 host_to_target_sockaddr(target_addr, addr, addrlen);
2237 if (put_user_u32(addrlen, target_addrlen_addr))
2238 ret = -TARGET_EFAULT;
2240 return ret;
2243 /* do_getsockname() Must return target values and target errnos. */
2244 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2245 abi_ulong target_addrlen_addr)
2247 socklen_t addrlen;
2248 void *addr;
2249 abi_long ret;
2251 if (get_user_u32(addrlen, target_addrlen_addr))
2252 return -TARGET_EFAULT;
2254 if ((int)addrlen < 0) {
2255 return -TARGET_EINVAL;
2258 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2259 return -TARGET_EFAULT;
2261 addr = alloca(addrlen);
2263 ret = get_errno(getsockname(fd, addr, &addrlen));
2264 if (!is_error(ret)) {
2265 host_to_target_sockaddr(target_addr, addr, addrlen);
2266 if (put_user_u32(addrlen, target_addrlen_addr))
2267 ret = -TARGET_EFAULT;
2269 return ret;
2272 /* do_socketpair() Must return target values and target errnos. */
2273 static abi_long do_socketpair(int domain, int type, int protocol,
2274 abi_ulong target_tab_addr)
2276 int tab[2];
2277 abi_long ret;
2279 target_to_host_sock_type(&type);
2281 ret = get_errno(socketpair(domain, type, protocol, tab));
2282 if (!is_error(ret)) {
2283 if (put_user_s32(tab[0], target_tab_addr)
2284 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2285 ret = -TARGET_EFAULT;
2287 return ret;
2290 /* do_sendto() Must return target values and target errnos. */
2291 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2292 abi_ulong target_addr, socklen_t addrlen)
2294 void *addr;
2295 void *host_msg;
2296 abi_long ret;
2298 if ((int)addrlen < 0) {
2299 return -TARGET_EINVAL;
2302 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2303 if (!host_msg)
2304 return -TARGET_EFAULT;
2305 if (target_addr) {
2306 addr = alloca(addrlen+1);
2307 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2308 if (ret) {
2309 unlock_user(host_msg, msg, 0);
2310 return ret;
2312 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2313 } else {
2314 ret = get_errno(send(fd, host_msg, len, flags));
2316 unlock_user(host_msg, msg, 0);
2317 return ret;
2320 /* do_recvfrom() Must return target values and target errnos. */
2321 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2322 abi_ulong target_addr,
2323 abi_ulong target_addrlen)
2325 socklen_t addrlen;
2326 void *addr;
2327 void *host_msg;
2328 abi_long ret;
2330 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2331 if (!host_msg)
2332 return -TARGET_EFAULT;
2333 if (target_addr) {
2334 if (get_user_u32(addrlen, target_addrlen)) {
2335 ret = -TARGET_EFAULT;
2336 goto fail;
2338 if ((int)addrlen < 0) {
2339 ret = -TARGET_EINVAL;
2340 goto fail;
2342 addr = alloca(addrlen);
2343 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2344 } else {
2345 addr = NULL; /* To keep compiler quiet. */
2346 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2348 if (!is_error(ret)) {
2349 if (target_addr) {
2350 host_to_target_sockaddr(target_addr, addr, addrlen);
2351 if (put_user_u32(addrlen, target_addrlen)) {
2352 ret = -TARGET_EFAULT;
2353 goto fail;
2356 unlock_user(host_msg, msg, len);
2357 } else {
2358 fail:
2359 unlock_user(host_msg, msg, 0);
2361 return ret;
2364 #ifdef TARGET_NR_socketcall
2365 /* do_socketcall() Must return target values and target errnos. */
2366 static abi_long do_socketcall(int num, abi_ulong vptr)
2368 static const unsigned ac[] = { /* number of arguments per call */
2369 [SOCKOP_socket] = 3, /* domain, type, protocol */
2370 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2371 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2372 [SOCKOP_listen] = 2, /* sockfd, backlog */
2373 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2374 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2375 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2376 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2377 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2378 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2379 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2380 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2381 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2382 [SOCKOP_shutdown] = 2, /* sockfd, how */
2383 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2384 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2385 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2386 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2388 abi_long a[6]; /* max 6 args */
2390 /* first, collect the arguments in a[] according to ac[] */
2391 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2392 unsigned i;
2393 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2394 for (i = 0; i < ac[num]; ++i) {
2395 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2396 return -TARGET_EFAULT;
2401 /* now when we have the args, actually handle the call */
2402 switch (num) {
2403 case SOCKOP_socket: /* domain, type, protocol */
2404 return do_socket(a[0], a[1], a[2]);
2405 case SOCKOP_bind: /* sockfd, addr, addrlen */
2406 return do_bind(a[0], a[1], a[2]);
2407 case SOCKOP_connect: /* sockfd, addr, addrlen */
2408 return do_connect(a[0], a[1], a[2]);
2409 case SOCKOP_listen: /* sockfd, backlog */
2410 return get_errno(listen(a[0], a[1]));
2411 case SOCKOP_accept: /* sockfd, addr, addrlen */
2412 return do_accept4(a[0], a[1], a[2], 0);
2413 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2414 return do_accept4(a[0], a[1], a[2], a[3]);
2415 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2416 return do_getsockname(a[0], a[1], a[2]);
2417 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2418 return do_getpeername(a[0], a[1], a[2]);
2419 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2420 return do_socketpair(a[0], a[1], a[2], a[3]);
2421 case SOCKOP_send: /* sockfd, msg, len, flags */
2422 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2423 case SOCKOP_recv: /* sockfd, msg, len, flags */
2424 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2425 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2426 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2427 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2428 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2429 case SOCKOP_shutdown: /* sockfd, how */
2430 return get_errno(shutdown(a[0], a[1]));
2431 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2432 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2433 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2434 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2435 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2436 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2437 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2438 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2439 default:
2440 gemu_log("Unsupported socketcall: %d\n", num);
2441 return -TARGET_ENOSYS;
2444 #endif
2446 #define N_SHM_REGIONS 32
2448 static struct shm_region {
2449 abi_ulong start;
2450 abi_ulong size;
2451 } shm_regions[N_SHM_REGIONS];
2453 struct target_semid_ds
2455 struct target_ipc_perm sem_perm;
2456 abi_ulong sem_otime;
2457 #if !defined(TARGET_PPC64)
2458 abi_ulong __unused1;
2459 #endif
2460 abi_ulong sem_ctime;
2461 #if !defined(TARGET_PPC64)
2462 abi_ulong __unused2;
2463 #endif
2464 abi_ulong sem_nsems;
2465 abi_ulong __unused3;
2466 abi_ulong __unused4;
2469 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2470 abi_ulong target_addr)
2472 struct target_ipc_perm *target_ip;
2473 struct target_semid_ds *target_sd;
2475 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2476 return -TARGET_EFAULT;
2477 target_ip = &(target_sd->sem_perm);
2478 host_ip->__key = tswap32(target_ip->__key);
2479 host_ip->uid = tswap32(target_ip->uid);
2480 host_ip->gid = tswap32(target_ip->gid);
2481 host_ip->cuid = tswap32(target_ip->cuid);
2482 host_ip->cgid = tswap32(target_ip->cgid);
2483 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2484 host_ip->mode = tswap32(target_ip->mode);
2485 #else
2486 host_ip->mode = tswap16(target_ip->mode);
2487 #endif
2488 #if defined(TARGET_PPC)
2489 host_ip->__seq = tswap32(target_ip->__seq);
2490 #else
2491 host_ip->__seq = tswap16(target_ip->__seq);
2492 #endif
2493 unlock_user_struct(target_sd, target_addr, 0);
2494 return 0;
2497 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2498 struct ipc_perm *host_ip)
2500 struct target_ipc_perm *target_ip;
2501 struct target_semid_ds *target_sd;
2503 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2504 return -TARGET_EFAULT;
2505 target_ip = &(target_sd->sem_perm);
2506 target_ip->__key = tswap32(host_ip->__key);
2507 target_ip->uid = tswap32(host_ip->uid);
2508 target_ip->gid = tswap32(host_ip->gid);
2509 target_ip->cuid = tswap32(host_ip->cuid);
2510 target_ip->cgid = tswap32(host_ip->cgid);
2511 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2512 target_ip->mode = tswap32(host_ip->mode);
2513 #else
2514 target_ip->mode = tswap16(host_ip->mode);
2515 #endif
2516 #if defined(TARGET_PPC)
2517 target_ip->__seq = tswap32(host_ip->__seq);
2518 #else
2519 target_ip->__seq = tswap16(host_ip->__seq);
2520 #endif
2521 unlock_user_struct(target_sd, target_addr, 1);
2522 return 0;
2525 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2526 abi_ulong target_addr)
2528 struct target_semid_ds *target_sd;
2530 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2531 return -TARGET_EFAULT;
2532 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2533 return -TARGET_EFAULT;
2534 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2535 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2536 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2537 unlock_user_struct(target_sd, target_addr, 0);
2538 return 0;
2541 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2542 struct semid_ds *host_sd)
2544 struct target_semid_ds *target_sd;
2546 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2547 return -TARGET_EFAULT;
2548 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2549 return -TARGET_EFAULT;
2550 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2551 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2552 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2553 unlock_user_struct(target_sd, target_addr, 1);
2554 return 0;
2557 struct target_seminfo {
2558 int semmap;
2559 int semmni;
2560 int semmns;
2561 int semmnu;
2562 int semmsl;
2563 int semopm;
2564 int semume;
2565 int semusz;
2566 int semvmx;
2567 int semaem;
2570 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2571 struct seminfo *host_seminfo)
2573 struct target_seminfo *target_seminfo;
2574 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2575 return -TARGET_EFAULT;
2576 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2577 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2578 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2579 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2580 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2581 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2582 __put_user(host_seminfo->semume, &target_seminfo->semume);
2583 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2584 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2585 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2586 unlock_user_struct(target_seminfo, target_addr, 1);
2587 return 0;
2590 union semun {
2591 int val;
2592 struct semid_ds *buf;
2593 unsigned short *array;
2594 struct seminfo *__buf;
2597 union target_semun {
2598 int val;
2599 abi_ulong buf;
2600 abi_ulong array;
2601 abi_ulong __buf;
2604 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2605 abi_ulong target_addr)
2607 int nsems;
2608 unsigned short *array;
2609 union semun semun;
2610 struct semid_ds semid_ds;
2611 int i, ret;
2613 semun.buf = &semid_ds;
2615 ret = semctl(semid, 0, IPC_STAT, semun);
2616 if (ret == -1)
2617 return get_errno(ret);
2619 nsems = semid_ds.sem_nsems;
2621 *host_array = malloc(nsems*sizeof(unsigned short));
2622 if (!*host_array) {
2623 return -TARGET_ENOMEM;
2625 array = lock_user(VERIFY_READ, target_addr,
2626 nsems*sizeof(unsigned short), 1);
2627 if (!array) {
2628 free(*host_array);
2629 return -TARGET_EFAULT;
2632 for(i=0; i<nsems; i++) {
2633 __get_user((*host_array)[i], &array[i]);
2635 unlock_user(array, target_addr, 0);
2637 return 0;
2640 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2641 unsigned short **host_array)
2643 int nsems;
2644 unsigned short *array;
2645 union semun semun;
2646 struct semid_ds semid_ds;
2647 int i, ret;
2649 semun.buf = &semid_ds;
2651 ret = semctl(semid, 0, IPC_STAT, semun);
2652 if (ret == -1)
2653 return get_errno(ret);
2655 nsems = semid_ds.sem_nsems;
2657 array = lock_user(VERIFY_WRITE, target_addr,
2658 nsems*sizeof(unsigned short), 0);
2659 if (!array)
2660 return -TARGET_EFAULT;
2662 for(i=0; i<nsems; i++) {
2663 __put_user((*host_array)[i], &array[i]);
2665 free(*host_array);
2666 unlock_user(array, target_addr, 1);
2668 return 0;
2671 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2672 union target_semun target_su)
2674 union semun arg;
2675 struct semid_ds dsarg;
2676 unsigned short *array = NULL;
2677 struct seminfo seminfo;
2678 abi_long ret = -TARGET_EINVAL;
2679 abi_long err;
2680 cmd &= 0xff;
2682 switch( cmd ) {
2683 case GETVAL:
2684 case SETVAL:
2685 /* In 64 bit cross-endian situations, we will erroneously pick up
2686 * the wrong half of the union for the "val" element. To rectify
2687 * this, the entire 8-byte structure is byteswapped, followed by
2688 * a swap of the 4 byte val field. In other cases, the data is
2689 * already in proper host byte order. */
2690 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2691 target_su.buf = tswapal(target_su.buf);
2692 arg.val = tswap32(target_su.val);
2693 } else {
2694 arg.val = target_su.val;
2696 ret = get_errno(semctl(semid, semnum, cmd, arg));
2697 break;
2698 case GETALL:
2699 case SETALL:
2700 err = target_to_host_semarray(semid, &array, target_su.array);
2701 if (err)
2702 return err;
2703 arg.array = array;
2704 ret = get_errno(semctl(semid, semnum, cmd, arg));
2705 err = host_to_target_semarray(semid, target_su.array, &array);
2706 if (err)
2707 return err;
2708 break;
2709 case IPC_STAT:
2710 case IPC_SET:
2711 case SEM_STAT:
2712 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2713 if (err)
2714 return err;
2715 arg.buf = &dsarg;
2716 ret = get_errno(semctl(semid, semnum, cmd, arg));
2717 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2718 if (err)
2719 return err;
2720 break;
2721 case IPC_INFO:
2722 case SEM_INFO:
2723 arg.__buf = &seminfo;
2724 ret = get_errno(semctl(semid, semnum, cmd, arg));
2725 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2726 if (err)
2727 return err;
2728 break;
2729 case IPC_RMID:
2730 case GETPID:
2731 case GETNCNT:
2732 case GETZCNT:
2733 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2734 break;
2737 return ret;
2740 struct target_sembuf {
2741 unsigned short sem_num;
2742 short sem_op;
2743 short sem_flg;
2746 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2747 abi_ulong target_addr,
2748 unsigned nsops)
2750 struct target_sembuf *target_sembuf;
2751 int i;
2753 target_sembuf = lock_user(VERIFY_READ, target_addr,
2754 nsops*sizeof(struct target_sembuf), 1);
2755 if (!target_sembuf)
2756 return -TARGET_EFAULT;
2758 for(i=0; i<nsops; i++) {
2759 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2760 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2761 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2764 unlock_user(target_sembuf, target_addr, 0);
2766 return 0;
2769 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2771 struct sembuf sops[nsops];
2773 if (target_to_host_sembuf(sops, ptr, nsops))
2774 return -TARGET_EFAULT;
2776 return get_errno(semop(semid, sops, nsops));
2779 struct target_msqid_ds
2781 struct target_ipc_perm msg_perm;
2782 abi_ulong msg_stime;
2783 #if TARGET_ABI_BITS == 32
2784 abi_ulong __unused1;
2785 #endif
2786 abi_ulong msg_rtime;
2787 #if TARGET_ABI_BITS == 32
2788 abi_ulong __unused2;
2789 #endif
2790 abi_ulong msg_ctime;
2791 #if TARGET_ABI_BITS == 32
2792 abi_ulong __unused3;
2793 #endif
2794 abi_ulong __msg_cbytes;
2795 abi_ulong msg_qnum;
2796 abi_ulong msg_qbytes;
2797 abi_ulong msg_lspid;
2798 abi_ulong msg_lrpid;
2799 abi_ulong __unused4;
2800 abi_ulong __unused5;
2803 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2804 abi_ulong target_addr)
2806 struct target_msqid_ds *target_md;
2808 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2809 return -TARGET_EFAULT;
2810 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2811 return -TARGET_EFAULT;
2812 host_md->msg_stime = tswapal(target_md->msg_stime);
2813 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2814 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2815 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2816 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2817 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2818 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2819 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2820 unlock_user_struct(target_md, target_addr, 0);
2821 return 0;
2824 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2825 struct msqid_ds *host_md)
2827 struct target_msqid_ds *target_md;
2829 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2830 return -TARGET_EFAULT;
2831 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2832 return -TARGET_EFAULT;
2833 target_md->msg_stime = tswapal(host_md->msg_stime);
2834 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2835 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2836 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2837 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2838 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2839 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2840 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2841 unlock_user_struct(target_md, target_addr, 1);
2842 return 0;
2845 struct target_msginfo {
2846 int msgpool;
2847 int msgmap;
2848 int msgmax;
2849 int msgmnb;
2850 int msgmni;
2851 int msgssz;
2852 int msgtql;
2853 unsigned short int msgseg;
2856 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2857 struct msginfo *host_msginfo)
2859 struct target_msginfo *target_msginfo;
2860 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2861 return -TARGET_EFAULT;
2862 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2863 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2864 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2865 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2866 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2867 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2868 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2869 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2870 unlock_user_struct(target_msginfo, target_addr, 1);
2871 return 0;
2874 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2876 struct msqid_ds dsarg;
2877 struct msginfo msginfo;
2878 abi_long ret = -TARGET_EINVAL;
2880 cmd &= 0xff;
2882 switch (cmd) {
2883 case IPC_STAT:
2884 case IPC_SET:
2885 case MSG_STAT:
2886 if (target_to_host_msqid_ds(&dsarg,ptr))
2887 return -TARGET_EFAULT;
2888 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2889 if (host_to_target_msqid_ds(ptr,&dsarg))
2890 return -TARGET_EFAULT;
2891 break;
2892 case IPC_RMID:
2893 ret = get_errno(msgctl(msgid, cmd, NULL));
2894 break;
2895 case IPC_INFO:
2896 case MSG_INFO:
2897 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2898 if (host_to_target_msginfo(ptr, &msginfo))
2899 return -TARGET_EFAULT;
2900 break;
2903 return ret;
2906 struct target_msgbuf {
2907 abi_long mtype;
2908 char mtext[1];
2911 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2912 ssize_t msgsz, int msgflg)
2914 struct target_msgbuf *target_mb;
2915 struct msgbuf *host_mb;
2916 abi_long ret = 0;
2918 if (msgsz < 0) {
2919 return -TARGET_EINVAL;
2922 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2923 return -TARGET_EFAULT;
2924 host_mb = malloc(msgsz+sizeof(long));
2925 if (!host_mb) {
2926 unlock_user_struct(target_mb, msgp, 0);
2927 return -TARGET_ENOMEM;
2929 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2930 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2931 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2932 free(host_mb);
2933 unlock_user_struct(target_mb, msgp, 0);
2935 return ret;
2938 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2939 unsigned int msgsz, abi_long msgtyp,
2940 int msgflg)
2942 struct target_msgbuf *target_mb;
2943 char *target_mtext;
2944 struct msgbuf *host_mb;
2945 abi_long ret = 0;
2947 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2948 return -TARGET_EFAULT;
2950 host_mb = g_malloc(msgsz+sizeof(long));
2951 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2953 if (ret > 0) {
2954 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2955 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2956 if (!target_mtext) {
2957 ret = -TARGET_EFAULT;
2958 goto end;
2960 memcpy(target_mb->mtext, host_mb->mtext, ret);
2961 unlock_user(target_mtext, target_mtext_addr, ret);
2964 target_mb->mtype = tswapal(host_mb->mtype);
2966 end:
2967 if (target_mb)
2968 unlock_user_struct(target_mb, msgp, 1);
2969 g_free(host_mb);
2970 return ret;
2973 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2974 abi_ulong target_addr)
2976 struct target_shmid_ds *target_sd;
2978 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2979 return -TARGET_EFAULT;
2980 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2981 return -TARGET_EFAULT;
2982 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2983 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2984 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2985 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2986 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2987 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2988 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2989 unlock_user_struct(target_sd, target_addr, 0);
2990 return 0;
2993 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2994 struct shmid_ds *host_sd)
2996 struct target_shmid_ds *target_sd;
2998 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2999 return -TARGET_EFAULT;
3000 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3001 return -TARGET_EFAULT;
3002 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3003 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3004 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3005 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3006 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3007 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3008 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3009 unlock_user_struct(target_sd, target_addr, 1);
3010 return 0;
3013 struct target_shminfo {
3014 abi_ulong shmmax;
3015 abi_ulong shmmin;
3016 abi_ulong shmmni;
3017 abi_ulong shmseg;
3018 abi_ulong shmall;
3021 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3022 struct shminfo *host_shminfo)
3024 struct target_shminfo *target_shminfo;
3025 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3026 return -TARGET_EFAULT;
3027 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3028 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3029 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3030 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3031 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3032 unlock_user_struct(target_shminfo, target_addr, 1);
3033 return 0;
3036 struct target_shm_info {
3037 int used_ids;
3038 abi_ulong shm_tot;
3039 abi_ulong shm_rss;
3040 abi_ulong shm_swp;
3041 abi_ulong swap_attempts;
3042 abi_ulong swap_successes;
3045 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3046 struct shm_info *host_shm_info)
3048 struct target_shm_info *target_shm_info;
3049 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3050 return -TARGET_EFAULT;
3051 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3052 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3053 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3054 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3055 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3056 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3057 unlock_user_struct(target_shm_info, target_addr, 1);
3058 return 0;
3061 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3063 struct shmid_ds dsarg;
3064 struct shminfo shminfo;
3065 struct shm_info shm_info;
3066 abi_long ret = -TARGET_EINVAL;
3068 cmd &= 0xff;
3070 switch(cmd) {
3071 case IPC_STAT:
3072 case IPC_SET:
3073 case SHM_STAT:
3074 if (target_to_host_shmid_ds(&dsarg, buf))
3075 return -TARGET_EFAULT;
3076 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3077 if (host_to_target_shmid_ds(buf, &dsarg))
3078 return -TARGET_EFAULT;
3079 break;
3080 case IPC_INFO:
3081 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3082 if (host_to_target_shminfo(buf, &shminfo))
3083 return -TARGET_EFAULT;
3084 break;
3085 case SHM_INFO:
3086 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3087 if (host_to_target_shm_info(buf, &shm_info))
3088 return -TARGET_EFAULT;
3089 break;
3090 case IPC_RMID:
3091 case SHM_LOCK:
3092 case SHM_UNLOCK:
3093 ret = get_errno(shmctl(shmid, cmd, NULL));
3094 break;
3097 return ret;
3100 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3102 abi_long raddr;
3103 void *host_raddr;
3104 struct shmid_ds shm_info;
3105 int i,ret;
3107 /* find out the length of the shared memory segment */
3108 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3109 if (is_error(ret)) {
3110 /* can't get length, bail out */
3111 return ret;
3114 mmap_lock();
3116 if (shmaddr)
3117 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3118 else {
3119 abi_ulong mmap_start;
3121 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3123 if (mmap_start == -1) {
3124 errno = ENOMEM;
3125 host_raddr = (void *)-1;
3126 } else
3127 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3130 if (host_raddr == (void *)-1) {
3131 mmap_unlock();
3132 return get_errno((long)host_raddr);
3134 raddr=h2g((unsigned long)host_raddr);
3136 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3137 PAGE_VALID | PAGE_READ |
3138 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3140 for (i = 0; i < N_SHM_REGIONS; i++) {
3141 if (shm_regions[i].start == 0) {
3142 shm_regions[i].start = raddr;
3143 shm_regions[i].size = shm_info.shm_segsz;
3144 break;
3148 mmap_unlock();
3149 return raddr;
3153 static inline abi_long do_shmdt(abi_ulong shmaddr)
3155 int i;
3157 for (i = 0; i < N_SHM_REGIONS; ++i) {
3158 if (shm_regions[i].start == shmaddr) {
3159 shm_regions[i].start = 0;
3160 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3161 break;
3165 return get_errno(shmdt(g2h(shmaddr)));
3168 #ifdef TARGET_NR_ipc
3169 /* ??? This only works with linear mappings. */
3170 /* do_ipc() must return target values and target errnos. */
3171 static abi_long do_ipc(unsigned int call, abi_long first,
3172 abi_long second, abi_long third,
3173 abi_long ptr, abi_long fifth)
3175 int version;
3176 abi_long ret = 0;
3178 version = call >> 16;
3179 call &= 0xffff;
3181 switch (call) {
3182 case IPCOP_semop:
3183 ret = do_semop(first, ptr, second);
3184 break;
3186 case IPCOP_semget:
3187 ret = get_errno(semget(first, second, third));
3188 break;
3190 case IPCOP_semctl: {
3191 /* The semun argument to semctl is passed by value, so dereference the
3192 * ptr argument. */
3193 abi_ulong atptr;
3194 get_user_ual(atptr, ptr);
3195 ret = do_semctl(first, second, third,
3196 (union target_semun) atptr);
3197 break;
3200 case IPCOP_msgget:
3201 ret = get_errno(msgget(first, second));
3202 break;
3204 case IPCOP_msgsnd:
3205 ret = do_msgsnd(first, ptr, second, third);
3206 break;
3208 case IPCOP_msgctl:
3209 ret = do_msgctl(first, second, ptr);
3210 break;
3212 case IPCOP_msgrcv:
3213 switch (version) {
3214 case 0:
3216 struct target_ipc_kludge {
3217 abi_long msgp;
3218 abi_long msgtyp;
3219 } *tmp;
3221 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3222 ret = -TARGET_EFAULT;
3223 break;
3226 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3228 unlock_user_struct(tmp, ptr, 0);
3229 break;
3231 default:
3232 ret = do_msgrcv(first, ptr, second, fifth, third);
3234 break;
3236 case IPCOP_shmat:
3237 switch (version) {
3238 default:
3240 abi_ulong raddr;
3241 raddr = do_shmat(first, ptr, second);
3242 if (is_error(raddr))
3243 return get_errno(raddr);
3244 if (put_user_ual(raddr, third))
3245 return -TARGET_EFAULT;
3246 break;
3248 case 1:
3249 ret = -TARGET_EINVAL;
3250 break;
3252 break;
3253 case IPCOP_shmdt:
3254 ret = do_shmdt(ptr);
3255 break;
3257 case IPCOP_shmget:
3258 /* IPC_* flag values are the same on all linux platforms */
3259 ret = get_errno(shmget(first, second, third));
3260 break;
3262 /* IPC_* and SHM_* command values are the same on all linux platforms */
3263 case IPCOP_shmctl:
3264 ret = do_shmctl(first, second, ptr);
3265 break;
3266 default:
3267 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3268 ret = -TARGET_ENOSYS;
3269 break;
3271 return ret;
3273 #endif
3275 /* kernel structure types definitions */
3277 #define STRUCT(name, ...) STRUCT_ ## name,
3278 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3279 enum {
3280 #include "syscall_types.h"
3282 #undef STRUCT
3283 #undef STRUCT_SPECIAL
3285 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3286 #define STRUCT_SPECIAL(name)
3287 #include "syscall_types.h"
3288 #undef STRUCT
3289 #undef STRUCT_SPECIAL
3291 typedef struct IOCTLEntry IOCTLEntry;
3293 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3294 int fd, abi_long cmd, abi_long arg);
3296 struct IOCTLEntry {
3297 int target_cmd;
3298 unsigned int host_cmd;
3299 const char *name;
3300 int access;
3301 do_ioctl_fn *do_ioctl;
3302 const argtype arg_type[5];
3305 #define IOC_R 0x0001
3306 #define IOC_W 0x0002
3307 #define IOC_RW (IOC_R | IOC_W)
3309 #define MAX_STRUCT_SIZE 4096
3311 #ifdef CONFIG_FIEMAP
3312 /* So fiemap access checks don't overflow on 32 bit systems.
3313 * This is very slightly smaller than the limit imposed by
3314 * the underlying kernel.
3316 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3317 / sizeof(struct fiemap_extent))
3319 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3320 int fd, abi_long cmd, abi_long arg)
3322 /* The parameter for this ioctl is a struct fiemap followed
3323 * by an array of struct fiemap_extent whose size is set
3324 * in fiemap->fm_extent_count. The array is filled in by the
3325 * ioctl.
3327 int target_size_in, target_size_out;
3328 struct fiemap *fm;
3329 const argtype *arg_type = ie->arg_type;
3330 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3331 void *argptr, *p;
3332 abi_long ret;
3333 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3334 uint32_t outbufsz;
3335 int free_fm = 0;
3337 assert(arg_type[0] == TYPE_PTR);
3338 assert(ie->access == IOC_RW);
3339 arg_type++;
3340 target_size_in = thunk_type_size(arg_type, 0);
3341 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3342 if (!argptr) {
3343 return -TARGET_EFAULT;
3345 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3346 unlock_user(argptr, arg, 0);
3347 fm = (struct fiemap *)buf_temp;
3348 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3349 return -TARGET_EINVAL;
3352 outbufsz = sizeof (*fm) +
3353 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3355 if (outbufsz > MAX_STRUCT_SIZE) {
3356 /* We can't fit all the extents into the fixed size buffer.
3357 * Allocate one that is large enough and use it instead.
3359 fm = malloc(outbufsz);
3360 if (!fm) {
3361 return -TARGET_ENOMEM;
3363 memcpy(fm, buf_temp, sizeof(struct fiemap));
3364 free_fm = 1;
3366 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3367 if (!is_error(ret)) {
3368 target_size_out = target_size_in;
3369 /* An extent_count of 0 means we were only counting the extents
3370 * so there are no structs to copy
3372 if (fm->fm_extent_count != 0) {
3373 target_size_out += fm->fm_mapped_extents * extent_size;
3375 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3376 if (!argptr) {
3377 ret = -TARGET_EFAULT;
3378 } else {
3379 /* Convert the struct fiemap */
3380 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3381 if (fm->fm_extent_count != 0) {
3382 p = argptr + target_size_in;
3383 /* ...and then all the struct fiemap_extents */
3384 for (i = 0; i < fm->fm_mapped_extents; i++) {
3385 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3386 THUNK_TARGET);
3387 p += extent_size;
3390 unlock_user(argptr, arg, target_size_out);
3393 if (free_fm) {
3394 free(fm);
3396 return ret;
3398 #endif
3400 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3401 int fd, abi_long cmd, abi_long arg)
3403 const argtype *arg_type = ie->arg_type;
3404 int target_size;
3405 void *argptr;
3406 int ret;
3407 struct ifconf *host_ifconf;
3408 uint32_t outbufsz;
3409 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3410 int target_ifreq_size;
3411 int nb_ifreq;
3412 int free_buf = 0;
3413 int i;
3414 int target_ifc_len;
3415 abi_long target_ifc_buf;
3416 int host_ifc_len;
3417 char *host_ifc_buf;
3419 assert(arg_type[0] == TYPE_PTR);
3420 assert(ie->access == IOC_RW);
3422 arg_type++;
3423 target_size = thunk_type_size(arg_type, 0);
3425 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3426 if (!argptr)
3427 return -TARGET_EFAULT;
3428 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3429 unlock_user(argptr, arg, 0);
3431 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3432 target_ifc_len = host_ifconf->ifc_len;
3433 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3435 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3436 nb_ifreq = target_ifc_len / target_ifreq_size;
3437 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3439 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3440 if (outbufsz > MAX_STRUCT_SIZE) {
3441 /* We can't fit all the extents into the fixed size buffer.
3442 * Allocate one that is large enough and use it instead.
3444 host_ifconf = malloc(outbufsz);
3445 if (!host_ifconf) {
3446 return -TARGET_ENOMEM;
3448 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3449 free_buf = 1;
3451 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3453 host_ifconf->ifc_len = host_ifc_len;
3454 host_ifconf->ifc_buf = host_ifc_buf;
3456 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3457 if (!is_error(ret)) {
3458 /* convert host ifc_len to target ifc_len */
3460 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3461 target_ifc_len = nb_ifreq * target_ifreq_size;
3462 host_ifconf->ifc_len = target_ifc_len;
3464 /* restore target ifc_buf */
3466 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3468 /* copy struct ifconf to target user */
3470 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3471 if (!argptr)
3472 return -TARGET_EFAULT;
3473 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3474 unlock_user(argptr, arg, target_size);
3476 /* copy ifreq[] to target user */
3478 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3479 for (i = 0; i < nb_ifreq ; i++) {
3480 thunk_convert(argptr + i * target_ifreq_size,
3481 host_ifc_buf + i * sizeof(struct ifreq),
3482 ifreq_arg_type, THUNK_TARGET);
3484 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3487 if (free_buf) {
3488 free(host_ifconf);
3491 return ret;
3494 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3495 abi_long cmd, abi_long arg)
3497 void *argptr;
3498 struct dm_ioctl *host_dm;
3499 abi_long guest_data;
3500 uint32_t guest_data_size;
3501 int target_size;
3502 const argtype *arg_type = ie->arg_type;
3503 abi_long ret;
3504 void *big_buf = NULL;
3505 char *host_data;
3507 arg_type++;
3508 target_size = thunk_type_size(arg_type, 0);
3509 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3510 if (!argptr) {
3511 ret = -TARGET_EFAULT;
3512 goto out;
3514 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3515 unlock_user(argptr, arg, 0);
3517 /* buf_temp is too small, so fetch things into a bigger buffer */
3518 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3519 memcpy(big_buf, buf_temp, target_size);
3520 buf_temp = big_buf;
3521 host_dm = big_buf;
3523 guest_data = arg + host_dm->data_start;
3524 if ((guest_data - arg) < 0) {
3525 ret = -EINVAL;
3526 goto out;
3528 guest_data_size = host_dm->data_size - host_dm->data_start;
3529 host_data = (char*)host_dm + host_dm->data_start;
3531 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3532 switch (ie->host_cmd) {
3533 case DM_REMOVE_ALL:
3534 case DM_LIST_DEVICES:
3535 case DM_DEV_CREATE:
3536 case DM_DEV_REMOVE:
3537 case DM_DEV_SUSPEND:
3538 case DM_DEV_STATUS:
3539 case DM_DEV_WAIT:
3540 case DM_TABLE_STATUS:
3541 case DM_TABLE_CLEAR:
3542 case DM_TABLE_DEPS:
3543 case DM_LIST_VERSIONS:
3544 /* no input data */
3545 break;
3546 case DM_DEV_RENAME:
3547 case DM_DEV_SET_GEOMETRY:
3548 /* data contains only strings */
3549 memcpy(host_data, argptr, guest_data_size);
3550 break;
3551 case DM_TARGET_MSG:
3552 memcpy(host_data, argptr, guest_data_size);
3553 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3554 break;
3555 case DM_TABLE_LOAD:
3557 void *gspec = argptr;
3558 void *cur_data = host_data;
3559 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3560 int spec_size = thunk_type_size(arg_type, 0);
3561 int i;
3563 for (i = 0; i < host_dm->target_count; i++) {
3564 struct dm_target_spec *spec = cur_data;
3565 uint32_t next;
3566 int slen;
3568 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3569 slen = strlen((char*)gspec + spec_size) + 1;
3570 next = spec->next;
3571 spec->next = sizeof(*spec) + slen;
3572 strcpy((char*)&spec[1], gspec + spec_size);
3573 gspec += next;
3574 cur_data += spec->next;
3576 break;
3578 default:
3579 ret = -TARGET_EINVAL;
3580 unlock_user(argptr, guest_data, 0);
3581 goto out;
3583 unlock_user(argptr, guest_data, 0);
3585 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3586 if (!is_error(ret)) {
3587 guest_data = arg + host_dm->data_start;
3588 guest_data_size = host_dm->data_size - host_dm->data_start;
3589 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3590 switch (ie->host_cmd) {
3591 case DM_REMOVE_ALL:
3592 case DM_DEV_CREATE:
3593 case DM_DEV_REMOVE:
3594 case DM_DEV_RENAME:
3595 case DM_DEV_SUSPEND:
3596 case DM_DEV_STATUS:
3597 case DM_TABLE_LOAD:
3598 case DM_TABLE_CLEAR:
3599 case DM_TARGET_MSG:
3600 case DM_DEV_SET_GEOMETRY:
3601 /* no return data */
3602 break;
3603 case DM_LIST_DEVICES:
3605 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3606 uint32_t remaining_data = guest_data_size;
3607 void *cur_data = argptr;
3608 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3609 int nl_size = 12; /* can't use thunk_size due to alignment */
3611 while (1) {
3612 uint32_t next = nl->next;
3613 if (next) {
3614 nl->next = nl_size + (strlen(nl->name) + 1);
3616 if (remaining_data < nl->next) {
3617 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3618 break;
3620 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3621 strcpy(cur_data + nl_size, nl->name);
3622 cur_data += nl->next;
3623 remaining_data -= nl->next;
3624 if (!next) {
3625 break;
3627 nl = (void*)nl + next;
3629 break;
3631 case DM_DEV_WAIT:
3632 case DM_TABLE_STATUS:
3634 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3635 void *cur_data = argptr;
3636 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3637 int spec_size = thunk_type_size(arg_type, 0);
3638 int i;
3640 for (i = 0; i < host_dm->target_count; i++) {
3641 uint32_t next = spec->next;
3642 int slen = strlen((char*)&spec[1]) + 1;
3643 spec->next = (cur_data - argptr) + spec_size + slen;
3644 if (guest_data_size < spec->next) {
3645 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3646 break;
3648 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3649 strcpy(cur_data + spec_size, (char*)&spec[1]);
3650 cur_data = argptr + spec->next;
3651 spec = (void*)host_dm + host_dm->data_start + next;
3653 break;
3655 case DM_TABLE_DEPS:
3657 void *hdata = (void*)host_dm + host_dm->data_start;
3658 int count = *(uint32_t*)hdata;
3659 uint64_t *hdev = hdata + 8;
3660 uint64_t *gdev = argptr + 8;
3661 int i;
3663 *(uint32_t*)argptr = tswap32(count);
3664 for (i = 0; i < count; i++) {
3665 *gdev = tswap64(*hdev);
3666 gdev++;
3667 hdev++;
3669 break;
3671 case DM_LIST_VERSIONS:
3673 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3674 uint32_t remaining_data = guest_data_size;
3675 void *cur_data = argptr;
3676 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3677 int vers_size = thunk_type_size(arg_type, 0);
3679 while (1) {
3680 uint32_t next = vers->next;
3681 if (next) {
3682 vers->next = vers_size + (strlen(vers->name) + 1);
3684 if (remaining_data < vers->next) {
3685 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3686 break;
3688 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3689 strcpy(cur_data + vers_size, vers->name);
3690 cur_data += vers->next;
3691 remaining_data -= vers->next;
3692 if (!next) {
3693 break;
3695 vers = (void*)vers + next;
3697 break;
3699 default:
3700 unlock_user(argptr, guest_data, 0);
3701 ret = -TARGET_EINVAL;
3702 goto out;
3704 unlock_user(argptr, guest_data, guest_data_size);
3706 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3707 if (!argptr) {
3708 ret = -TARGET_EFAULT;
3709 goto out;
3711 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3712 unlock_user(argptr, arg, target_size);
3714 out:
3715 g_free(big_buf);
3716 return ret;
3719 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3720 abi_long cmd, abi_long arg)
3722 void *argptr;
3723 int target_size;
3724 const argtype *arg_type = ie->arg_type;
3725 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3726 abi_long ret;
3728 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3729 struct blkpg_partition host_part;
3731 /* Read and convert blkpg */
3732 arg_type++;
3733 target_size = thunk_type_size(arg_type, 0);
3734 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3735 if (!argptr) {
3736 ret = -TARGET_EFAULT;
3737 goto out;
3739 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3740 unlock_user(argptr, arg, 0);
3742 switch (host_blkpg->op) {
3743 case BLKPG_ADD_PARTITION:
3744 case BLKPG_DEL_PARTITION:
3745 /* payload is struct blkpg_partition */
3746 break;
3747 default:
3748 /* Unknown opcode */
3749 ret = -TARGET_EINVAL;
3750 goto out;
3753 /* Read and convert blkpg->data */
3754 arg = (abi_long)(uintptr_t)host_blkpg->data;
3755 target_size = thunk_type_size(part_arg_type, 0);
3756 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3757 if (!argptr) {
3758 ret = -TARGET_EFAULT;
3759 goto out;
3761 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3762 unlock_user(argptr, arg, 0);
3764 /* Swizzle the data pointer to our local copy and call! */
3765 host_blkpg->data = &host_part;
3766 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3768 out:
3769 return ret;
3772 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3773 int fd, abi_long cmd, abi_long arg)
3775 const argtype *arg_type = ie->arg_type;
3776 const StructEntry *se;
3777 const argtype *field_types;
3778 const int *dst_offsets, *src_offsets;
3779 int target_size;
3780 void *argptr;
3781 abi_ulong *target_rt_dev_ptr;
3782 unsigned long *host_rt_dev_ptr;
3783 abi_long ret;
3784 int i;
3786 assert(ie->access == IOC_W);
3787 assert(*arg_type == TYPE_PTR);
3788 arg_type++;
3789 assert(*arg_type == TYPE_STRUCT);
3790 target_size = thunk_type_size(arg_type, 0);
3791 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3792 if (!argptr) {
3793 return -TARGET_EFAULT;
3795 arg_type++;
3796 assert(*arg_type == (int)STRUCT_rtentry);
3797 se = struct_entries + *arg_type++;
3798 assert(se->convert[0] == NULL);
3799 /* convert struct here to be able to catch rt_dev string */
3800 field_types = se->field_types;
3801 dst_offsets = se->field_offsets[THUNK_HOST];
3802 src_offsets = se->field_offsets[THUNK_TARGET];
3803 for (i = 0; i < se->nb_fields; i++) {
3804 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3805 assert(*field_types == TYPE_PTRVOID);
3806 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3807 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3808 if (*target_rt_dev_ptr != 0) {
3809 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3810 tswapal(*target_rt_dev_ptr));
3811 if (!*host_rt_dev_ptr) {
3812 unlock_user(argptr, arg, 0);
3813 return -TARGET_EFAULT;
3815 } else {
3816 *host_rt_dev_ptr = 0;
3818 field_types++;
3819 continue;
3821 field_types = thunk_convert(buf_temp + dst_offsets[i],
3822 argptr + src_offsets[i],
3823 field_types, THUNK_HOST);
3825 unlock_user(argptr, arg, 0);
3827 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3828 if (*host_rt_dev_ptr != 0) {
3829 unlock_user((void *)*host_rt_dev_ptr,
3830 *target_rt_dev_ptr, 0);
3832 return ret;
3835 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3836 int fd, abi_long cmd, abi_long arg)
3838 int sig = target_to_host_signal(arg);
3839 return get_errno(ioctl(fd, ie->host_cmd, sig));
3842 static IOCTLEntry ioctl_entries[] = {
3843 #define IOCTL(cmd, access, ...) \
3844 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3845 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3846 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3847 #include "ioctls.h"
3848 { 0, 0, },
3851 /* ??? Implement proper locking for ioctls. */
3852 /* do_ioctl() Must return target values and target errnos. */
3853 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3855 const IOCTLEntry *ie;
3856 const argtype *arg_type;
3857 abi_long ret;
3858 uint8_t buf_temp[MAX_STRUCT_SIZE];
3859 int target_size;
3860 void *argptr;
3862 ie = ioctl_entries;
3863 for(;;) {
3864 if (ie->target_cmd == 0) {
3865 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3866 return -TARGET_ENOSYS;
3868 if (ie->target_cmd == cmd)
3869 break;
3870 ie++;
3872 arg_type = ie->arg_type;
3873 #if defined(DEBUG)
3874 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3875 #endif
3876 if (ie->do_ioctl) {
3877 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3880 switch(arg_type[0]) {
3881 case TYPE_NULL:
3882 /* no argument */
3883 ret = get_errno(ioctl(fd, ie->host_cmd));
3884 break;
3885 case TYPE_PTRVOID:
3886 case TYPE_INT:
3887 /* int argment */
3888 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3889 break;
3890 case TYPE_PTR:
3891 arg_type++;
3892 target_size = thunk_type_size(arg_type, 0);
3893 switch(ie->access) {
3894 case IOC_R:
3895 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3896 if (!is_error(ret)) {
3897 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3898 if (!argptr)
3899 return -TARGET_EFAULT;
3900 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3901 unlock_user(argptr, arg, target_size);
3903 break;
3904 case IOC_W:
3905 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3906 if (!argptr)
3907 return -TARGET_EFAULT;
3908 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3909 unlock_user(argptr, arg, 0);
3910 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3911 break;
3912 default:
3913 case IOC_RW:
3914 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3915 if (!argptr)
3916 return -TARGET_EFAULT;
3917 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3918 unlock_user(argptr, arg, 0);
3919 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3920 if (!is_error(ret)) {
3921 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3922 if (!argptr)
3923 return -TARGET_EFAULT;
3924 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3925 unlock_user(argptr, arg, target_size);
3927 break;
3929 break;
3930 default:
3931 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3932 (long)cmd, arg_type[0]);
3933 ret = -TARGET_ENOSYS;
3934 break;
3936 return ret;
3939 static const bitmask_transtbl iflag_tbl[] = {
3940 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3941 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3942 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3943 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3944 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3945 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3946 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3947 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3948 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3949 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3950 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3951 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3952 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3953 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3954 { 0, 0, 0, 0 }
3957 static const bitmask_transtbl oflag_tbl[] = {
3958 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3959 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3960 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3961 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3962 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3963 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3964 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3965 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3966 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3967 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3968 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3969 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3970 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3971 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3972 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3973 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3974 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3975 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3976 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3977 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3978 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3979 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3980 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3981 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3982 { 0, 0, 0, 0 }
3985 static const bitmask_transtbl cflag_tbl[] = {
3986 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3987 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3988 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3989 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3990 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3991 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3992 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3993 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3994 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3995 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3996 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3997 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3998 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3999 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4000 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4001 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4002 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4003 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4004 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4005 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4006 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4007 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4008 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4009 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4010 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4011 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4012 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4013 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4014 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4015 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4016 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4017 { 0, 0, 0, 0 }
4020 static const bitmask_transtbl lflag_tbl[] = {
4021 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4022 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4023 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4024 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4025 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4026 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4027 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4028 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4029 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4030 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4031 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4032 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4033 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4034 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4035 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4036 { 0, 0, 0, 0 }
4039 static void target_to_host_termios (void *dst, const void *src)
4041 struct host_termios *host = dst;
4042 const struct target_termios *target = src;
4044 host->c_iflag =
4045 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4046 host->c_oflag =
4047 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4048 host->c_cflag =
4049 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4050 host->c_lflag =
4051 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4052 host->c_line = target->c_line;
4054 memset(host->c_cc, 0, sizeof(host->c_cc));
4055 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4056 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4057 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4058 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4059 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4060 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4061 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4062 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4063 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4064 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4065 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4066 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4067 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4068 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4069 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4070 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4071 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4074 static void host_to_target_termios (void *dst, const void *src)
4076 struct target_termios *target = dst;
4077 const struct host_termios *host = src;
4079 target->c_iflag =
4080 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4081 target->c_oflag =
4082 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4083 target->c_cflag =
4084 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4085 target->c_lflag =
4086 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4087 target->c_line = host->c_line;
4089 memset(target->c_cc, 0, sizeof(target->c_cc));
4090 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4091 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4092 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4093 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4094 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4095 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4096 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4097 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4098 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4099 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4100 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4101 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4102 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4103 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4104 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4105 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4106 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4109 static const StructEntry struct_termios_def = {
4110 .convert = { host_to_target_termios, target_to_host_termios },
4111 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4112 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4115 static bitmask_transtbl mmap_flags_tbl[] = {
4116 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4117 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4118 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4119 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4120 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4121 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4122 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4123 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4124 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4125 MAP_NORESERVE },
4126 { 0, 0, 0, 0 }
4129 #if defined(TARGET_I386)
4131 /* NOTE: there is really one LDT for all the threads */
4132 static uint8_t *ldt_table;
4134 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4136 int size;
4137 void *p;
4139 if (!ldt_table)
4140 return 0;
4141 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4142 if (size > bytecount)
4143 size = bytecount;
4144 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4145 if (!p)
4146 return -TARGET_EFAULT;
4147 /* ??? Should this by byteswapped? */
4148 memcpy(p, ldt_table, size);
4149 unlock_user(p, ptr, size);
4150 return size;
4153 /* XXX: add locking support */
4154 static abi_long write_ldt(CPUX86State *env,
4155 abi_ulong ptr, unsigned long bytecount, int oldmode)
4157 struct target_modify_ldt_ldt_s ldt_info;
4158 struct target_modify_ldt_ldt_s *target_ldt_info;
4159 int seg_32bit, contents, read_exec_only, limit_in_pages;
4160 int seg_not_present, useable, lm;
4161 uint32_t *lp, entry_1, entry_2;
4163 if (bytecount != sizeof(ldt_info))
4164 return -TARGET_EINVAL;
4165 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4166 return -TARGET_EFAULT;
4167 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4168 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4169 ldt_info.limit = tswap32(target_ldt_info->limit);
4170 ldt_info.flags = tswap32(target_ldt_info->flags);
4171 unlock_user_struct(target_ldt_info, ptr, 0);
4173 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4174 return -TARGET_EINVAL;
4175 seg_32bit = ldt_info.flags & 1;
4176 contents = (ldt_info.flags >> 1) & 3;
4177 read_exec_only = (ldt_info.flags >> 3) & 1;
4178 limit_in_pages = (ldt_info.flags >> 4) & 1;
4179 seg_not_present = (ldt_info.flags >> 5) & 1;
4180 useable = (ldt_info.flags >> 6) & 1;
4181 #ifdef TARGET_ABI32
4182 lm = 0;
4183 #else
4184 lm = (ldt_info.flags >> 7) & 1;
4185 #endif
4186 if (contents == 3) {
4187 if (oldmode)
4188 return -TARGET_EINVAL;
4189 if (seg_not_present == 0)
4190 return -TARGET_EINVAL;
4192 /* allocate the LDT */
4193 if (!ldt_table) {
4194 env->ldt.base = target_mmap(0,
4195 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4196 PROT_READ|PROT_WRITE,
4197 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4198 if (env->ldt.base == -1)
4199 return -TARGET_ENOMEM;
4200 memset(g2h(env->ldt.base), 0,
4201 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4202 env->ldt.limit = 0xffff;
4203 ldt_table = g2h(env->ldt.base);
4206 /* NOTE: same code as Linux kernel */
4207 /* Allow LDTs to be cleared by the user. */
4208 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4209 if (oldmode ||
4210 (contents == 0 &&
4211 read_exec_only == 1 &&
4212 seg_32bit == 0 &&
4213 limit_in_pages == 0 &&
4214 seg_not_present == 1 &&
4215 useable == 0 )) {
4216 entry_1 = 0;
4217 entry_2 = 0;
4218 goto install;
4222 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4223 (ldt_info.limit & 0x0ffff);
4224 entry_2 = (ldt_info.base_addr & 0xff000000) |
4225 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4226 (ldt_info.limit & 0xf0000) |
4227 ((read_exec_only ^ 1) << 9) |
4228 (contents << 10) |
4229 ((seg_not_present ^ 1) << 15) |
4230 (seg_32bit << 22) |
4231 (limit_in_pages << 23) |
4232 (lm << 21) |
4233 0x7000;
4234 if (!oldmode)
4235 entry_2 |= (useable << 20);
4237 /* Install the new entry ... */
4238 install:
4239 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4240 lp[0] = tswap32(entry_1);
4241 lp[1] = tswap32(entry_2);
4242 return 0;
4245 /* specific and weird i386 syscalls */
4246 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4247 unsigned long bytecount)
4249 abi_long ret;
4251 switch (func) {
4252 case 0:
4253 ret = read_ldt(ptr, bytecount);
4254 break;
4255 case 1:
4256 ret = write_ldt(env, ptr, bytecount, 1);
4257 break;
4258 case 0x11:
4259 ret = write_ldt(env, ptr, bytecount, 0);
4260 break;
4261 default:
4262 ret = -TARGET_ENOSYS;
4263 break;
4265 return ret;
4268 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4269 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4271 uint64_t *gdt_table = g2h(env->gdt.base);
4272 struct target_modify_ldt_ldt_s ldt_info;
4273 struct target_modify_ldt_ldt_s *target_ldt_info;
4274 int seg_32bit, contents, read_exec_only, limit_in_pages;
4275 int seg_not_present, useable, lm;
4276 uint32_t *lp, entry_1, entry_2;
4277 int i;
4279 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4280 if (!target_ldt_info)
4281 return -TARGET_EFAULT;
4282 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4283 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4284 ldt_info.limit = tswap32(target_ldt_info->limit);
4285 ldt_info.flags = tswap32(target_ldt_info->flags);
4286 if (ldt_info.entry_number == -1) {
4287 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4288 if (gdt_table[i] == 0) {
4289 ldt_info.entry_number = i;
4290 target_ldt_info->entry_number = tswap32(i);
4291 break;
4295 unlock_user_struct(target_ldt_info, ptr, 1);
4297 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4298 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4299 return -TARGET_EINVAL;
4300 seg_32bit = ldt_info.flags & 1;
4301 contents = (ldt_info.flags >> 1) & 3;
4302 read_exec_only = (ldt_info.flags >> 3) & 1;
4303 limit_in_pages = (ldt_info.flags >> 4) & 1;
4304 seg_not_present = (ldt_info.flags >> 5) & 1;
4305 useable = (ldt_info.flags >> 6) & 1;
4306 #ifdef TARGET_ABI32
4307 lm = 0;
4308 #else
4309 lm = (ldt_info.flags >> 7) & 1;
4310 #endif
4312 if (contents == 3) {
4313 if (seg_not_present == 0)
4314 return -TARGET_EINVAL;
4317 /* NOTE: same code as Linux kernel */
4318 /* Allow LDTs to be cleared by the user. */
4319 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4320 if ((contents == 0 &&
4321 read_exec_only == 1 &&
4322 seg_32bit == 0 &&
4323 limit_in_pages == 0 &&
4324 seg_not_present == 1 &&
4325 useable == 0 )) {
4326 entry_1 = 0;
4327 entry_2 = 0;
4328 goto install;
4332 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4333 (ldt_info.limit & 0x0ffff);
4334 entry_2 = (ldt_info.base_addr & 0xff000000) |
4335 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4336 (ldt_info.limit & 0xf0000) |
4337 ((read_exec_only ^ 1) << 9) |
4338 (contents << 10) |
4339 ((seg_not_present ^ 1) << 15) |
4340 (seg_32bit << 22) |
4341 (limit_in_pages << 23) |
4342 (useable << 20) |
4343 (lm << 21) |
4344 0x7000;
4346 /* Install the new entry ... */
4347 install:
4348 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4349 lp[0] = tswap32(entry_1);
4350 lp[1] = tswap32(entry_2);
4351 return 0;
4354 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4356 struct target_modify_ldt_ldt_s *target_ldt_info;
4357 uint64_t *gdt_table = g2h(env->gdt.base);
4358 uint32_t base_addr, limit, flags;
4359 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4360 int seg_not_present, useable, lm;
4361 uint32_t *lp, entry_1, entry_2;
4363 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4364 if (!target_ldt_info)
4365 return -TARGET_EFAULT;
4366 idx = tswap32(target_ldt_info->entry_number);
4367 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4368 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4369 unlock_user_struct(target_ldt_info, ptr, 1);
4370 return -TARGET_EINVAL;
4372 lp = (uint32_t *)(gdt_table + idx);
4373 entry_1 = tswap32(lp[0]);
4374 entry_2 = tswap32(lp[1]);
4376 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4377 contents = (entry_2 >> 10) & 3;
4378 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4379 seg_32bit = (entry_2 >> 22) & 1;
4380 limit_in_pages = (entry_2 >> 23) & 1;
4381 useable = (entry_2 >> 20) & 1;
4382 #ifdef TARGET_ABI32
4383 lm = 0;
4384 #else
4385 lm = (entry_2 >> 21) & 1;
4386 #endif
4387 flags = (seg_32bit << 0) | (contents << 1) |
4388 (read_exec_only << 3) | (limit_in_pages << 4) |
4389 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4390 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4391 base_addr = (entry_1 >> 16) |
4392 (entry_2 & 0xff000000) |
4393 ((entry_2 & 0xff) << 16);
4394 target_ldt_info->base_addr = tswapal(base_addr);
4395 target_ldt_info->limit = tswap32(limit);
4396 target_ldt_info->flags = tswap32(flags);
4397 unlock_user_struct(target_ldt_info, ptr, 1);
4398 return 0;
4400 #endif /* TARGET_I386 && TARGET_ABI32 */
4402 #ifndef TARGET_ABI32
4403 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4405 abi_long ret = 0;
4406 abi_ulong val;
4407 int idx;
4409 switch(code) {
4410 case TARGET_ARCH_SET_GS:
4411 case TARGET_ARCH_SET_FS:
4412 if (code == TARGET_ARCH_SET_GS)
4413 idx = R_GS;
4414 else
4415 idx = R_FS;
4416 cpu_x86_load_seg(env, idx, 0);
4417 env->segs[idx].base = addr;
4418 break;
4419 case TARGET_ARCH_GET_GS:
4420 case TARGET_ARCH_GET_FS:
4421 if (code == TARGET_ARCH_GET_GS)
4422 idx = R_GS;
4423 else
4424 idx = R_FS;
4425 val = env->segs[idx].base;
4426 if (put_user(val, addr, abi_ulong))
4427 ret = -TARGET_EFAULT;
4428 break;
4429 default:
4430 ret = -TARGET_EINVAL;
4431 break;
4433 return ret;
4435 #endif
4437 #endif /* defined(TARGET_I386) */
4439 #define NEW_STACK_SIZE 0x40000
4442 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4443 typedef struct {
4444 CPUArchState *env;
4445 pthread_mutex_t mutex;
4446 pthread_cond_t cond;
4447 pthread_t thread;
4448 uint32_t tid;
4449 abi_ulong child_tidptr;
4450 abi_ulong parent_tidptr;
4451 sigset_t sigmask;
4452 } new_thread_info;
4454 static void * QEMU_NORETURN clone_func(void *arg)
4456 new_thread_info *info = arg;
4457 CPUArchState *env;
4458 CPUState *cpu;
4459 TaskState *ts;
4461 env = info->env;
4462 cpu = ENV_GET_CPU(env);
4463 thread_cpu = cpu;
4464 ts = (TaskState *)cpu->opaque;
4465 info->tid = gettid();
4466 cpu->host_tid = info->tid;
4467 task_settid(ts);
4468 if (info->child_tidptr)
4469 put_user_u32(info->tid, info->child_tidptr);
4470 if (info->parent_tidptr)
4471 put_user_u32(info->tid, info->parent_tidptr);
4472 /* Enable signals. */
4473 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4474 /* Signal to the parent that we're ready. */
4475 pthread_mutex_lock(&info->mutex);
4476 pthread_cond_broadcast(&info->cond);
4477 pthread_mutex_unlock(&info->mutex);
4478 /* Wait until the parent has finshed initializing the tls state. */
4479 pthread_mutex_lock(&clone_lock);
4480 pthread_mutex_unlock(&clone_lock);
4481 cpu_loop(env);
4482 /* never exits */
4485 /* do_fork() Must return host values and target errnos (unlike most
4486 do_*() functions). */
4487 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4488 abi_ulong parent_tidptr, target_ulong newtls,
4489 abi_ulong child_tidptr)
4491 CPUState *cpu = ENV_GET_CPU(env);
4492 int ret;
4493 TaskState *ts;
4494 CPUState *new_cpu;
4495 CPUArchState *new_env;
4496 unsigned int nptl_flags;
4497 sigset_t sigmask;
4499 /* Emulate vfork() with fork() */
4500 if (flags & CLONE_VFORK)
4501 flags &= ~(CLONE_VFORK | CLONE_VM);
4503 if (flags & CLONE_VM) {
4504 TaskState *parent_ts = (TaskState *)cpu->opaque;
4505 new_thread_info info;
4506 pthread_attr_t attr;
4508 ts = g_malloc0(sizeof(TaskState));
4509 init_task_state(ts);
4510 /* we create a new CPU instance. */
4511 new_env = cpu_copy(env);
4512 /* Init regs that differ from the parent. */
4513 cpu_clone_regs(new_env, newsp);
4514 new_cpu = ENV_GET_CPU(new_env);
4515 new_cpu->opaque = ts;
4516 ts->bprm = parent_ts->bprm;
4517 ts->info = parent_ts->info;
4518 nptl_flags = flags;
4519 flags &= ~CLONE_NPTL_FLAGS2;
4521 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4522 ts->child_tidptr = child_tidptr;
4525 if (nptl_flags & CLONE_SETTLS)
4526 cpu_set_tls (new_env, newtls);
4528 /* Grab a mutex so that thread setup appears atomic. */
4529 pthread_mutex_lock(&clone_lock);
4531 memset(&info, 0, sizeof(info));
4532 pthread_mutex_init(&info.mutex, NULL);
4533 pthread_mutex_lock(&info.mutex);
4534 pthread_cond_init(&info.cond, NULL);
4535 info.env = new_env;
4536 if (nptl_flags & CLONE_CHILD_SETTID)
4537 info.child_tidptr = child_tidptr;
4538 if (nptl_flags & CLONE_PARENT_SETTID)
4539 info.parent_tidptr = parent_tidptr;
4541 ret = pthread_attr_init(&attr);
4542 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4543 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4544 /* It is not safe to deliver signals until the child has finished
4545 initializing, so temporarily block all signals. */
4546 sigfillset(&sigmask);
4547 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4549 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4550 /* TODO: Free new CPU state if thread creation failed. */
4552 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4553 pthread_attr_destroy(&attr);
4554 if (ret == 0) {
4555 /* Wait for the child to initialize. */
4556 pthread_cond_wait(&info.cond, &info.mutex);
4557 ret = info.tid;
4558 if (flags & CLONE_PARENT_SETTID)
4559 put_user_u32(ret, parent_tidptr);
4560 } else {
4561 ret = -1;
4563 pthread_mutex_unlock(&info.mutex);
4564 pthread_cond_destroy(&info.cond);
4565 pthread_mutex_destroy(&info.mutex);
4566 pthread_mutex_unlock(&clone_lock);
4567 } else {
4568 /* if no CLONE_VM, we consider it is a fork */
4569 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4570 return -EINVAL;
4571 fork_start();
4572 ret = fork();
4573 if (ret == 0) {
4574 /* Child Process. */
4575 rcu_after_fork();
4576 cpu_clone_regs(env, newsp);
4577 fork_end(1);
4578 /* There is a race condition here. The parent process could
4579 theoretically read the TID in the child process before the child
4580 tid is set. This would require using either ptrace
4581 (not implemented) or having *_tidptr to point at a shared memory
4582 mapping. We can't repeat the spinlock hack used above because
4583 the child process gets its own copy of the lock. */
4584 if (flags & CLONE_CHILD_SETTID)
4585 put_user_u32(gettid(), child_tidptr);
4586 if (flags & CLONE_PARENT_SETTID)
4587 put_user_u32(gettid(), parent_tidptr);
4588 ts = (TaskState *)cpu->opaque;
4589 if (flags & CLONE_SETTLS)
4590 cpu_set_tls (env, newtls);
4591 if (flags & CLONE_CHILD_CLEARTID)
4592 ts->child_tidptr = child_tidptr;
4593 } else {
4594 fork_end(0);
4597 return ret;
4600 /* warning : doesn't handle linux specific flags... */
4601 static int target_to_host_fcntl_cmd(int cmd)
4603 switch(cmd) {
4604 case TARGET_F_DUPFD:
4605 case TARGET_F_GETFD:
4606 case TARGET_F_SETFD:
4607 case TARGET_F_GETFL:
4608 case TARGET_F_SETFL:
4609 return cmd;
4610 case TARGET_F_GETLK:
4611 return F_GETLK;
4612 case TARGET_F_SETLK:
4613 return F_SETLK;
4614 case TARGET_F_SETLKW:
4615 return F_SETLKW;
4616 case TARGET_F_GETOWN:
4617 return F_GETOWN;
4618 case TARGET_F_SETOWN:
4619 return F_SETOWN;
4620 case TARGET_F_GETSIG:
4621 return F_GETSIG;
4622 case TARGET_F_SETSIG:
4623 return F_SETSIG;
4624 #if TARGET_ABI_BITS == 32
4625 case TARGET_F_GETLK64:
4626 return F_GETLK64;
4627 case TARGET_F_SETLK64:
4628 return F_SETLK64;
4629 case TARGET_F_SETLKW64:
4630 return F_SETLKW64;
4631 #endif
4632 case TARGET_F_SETLEASE:
4633 return F_SETLEASE;
4634 case TARGET_F_GETLEASE:
4635 return F_GETLEASE;
4636 #ifdef F_DUPFD_CLOEXEC
4637 case TARGET_F_DUPFD_CLOEXEC:
4638 return F_DUPFD_CLOEXEC;
4639 #endif
4640 case TARGET_F_NOTIFY:
4641 return F_NOTIFY;
4642 #ifdef F_GETOWN_EX
4643 case TARGET_F_GETOWN_EX:
4644 return F_GETOWN_EX;
4645 #endif
4646 #ifdef F_SETOWN_EX
4647 case TARGET_F_SETOWN_EX:
4648 return F_SETOWN_EX;
4649 #endif
4650 default:
4651 return -TARGET_EINVAL;
4653 return -TARGET_EINVAL;
4656 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4657 static const bitmask_transtbl flock_tbl[] = {
4658 TRANSTBL_CONVERT(F_RDLCK),
4659 TRANSTBL_CONVERT(F_WRLCK),
4660 TRANSTBL_CONVERT(F_UNLCK),
4661 TRANSTBL_CONVERT(F_EXLCK),
4662 TRANSTBL_CONVERT(F_SHLCK),
4663 { 0, 0, 0, 0 }
4666 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4668 struct flock fl;
4669 struct target_flock *target_fl;
4670 struct flock64 fl64;
4671 struct target_flock64 *target_fl64;
4672 #ifdef F_GETOWN_EX
4673 struct f_owner_ex fox;
4674 struct target_f_owner_ex *target_fox;
4675 #endif
4676 abi_long ret;
4677 int host_cmd = target_to_host_fcntl_cmd(cmd);
4679 if (host_cmd == -TARGET_EINVAL)
4680 return host_cmd;
4682 switch(cmd) {
4683 case TARGET_F_GETLK:
4684 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4685 return -TARGET_EFAULT;
4686 fl.l_type =
4687 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4688 fl.l_whence = tswap16(target_fl->l_whence);
4689 fl.l_start = tswapal(target_fl->l_start);
4690 fl.l_len = tswapal(target_fl->l_len);
4691 fl.l_pid = tswap32(target_fl->l_pid);
4692 unlock_user_struct(target_fl, arg, 0);
4693 ret = get_errno(fcntl(fd, host_cmd, &fl));
4694 if (ret == 0) {
4695 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4696 return -TARGET_EFAULT;
4697 target_fl->l_type =
4698 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4699 target_fl->l_whence = tswap16(fl.l_whence);
4700 target_fl->l_start = tswapal(fl.l_start);
4701 target_fl->l_len = tswapal(fl.l_len);
4702 target_fl->l_pid = tswap32(fl.l_pid);
4703 unlock_user_struct(target_fl, arg, 1);
4705 break;
4707 case TARGET_F_SETLK:
4708 case TARGET_F_SETLKW:
4709 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4710 return -TARGET_EFAULT;
4711 fl.l_type =
4712 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4713 fl.l_whence = tswap16(target_fl->l_whence);
4714 fl.l_start = tswapal(target_fl->l_start);
4715 fl.l_len = tswapal(target_fl->l_len);
4716 fl.l_pid = tswap32(target_fl->l_pid);
4717 unlock_user_struct(target_fl, arg, 0);
4718 ret = get_errno(fcntl(fd, host_cmd, &fl));
4719 break;
4721 case TARGET_F_GETLK64:
4722 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4723 return -TARGET_EFAULT;
4724 fl64.l_type =
4725 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4726 fl64.l_whence = tswap16(target_fl64->l_whence);
4727 fl64.l_start = tswap64(target_fl64->l_start);
4728 fl64.l_len = tswap64(target_fl64->l_len);
4729 fl64.l_pid = tswap32(target_fl64->l_pid);
4730 unlock_user_struct(target_fl64, arg, 0);
4731 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4732 if (ret == 0) {
4733 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4734 return -TARGET_EFAULT;
4735 target_fl64->l_type =
4736 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4737 target_fl64->l_whence = tswap16(fl64.l_whence);
4738 target_fl64->l_start = tswap64(fl64.l_start);
4739 target_fl64->l_len = tswap64(fl64.l_len);
4740 target_fl64->l_pid = tswap32(fl64.l_pid);
4741 unlock_user_struct(target_fl64, arg, 1);
4743 break;
4744 case TARGET_F_SETLK64:
4745 case TARGET_F_SETLKW64:
4746 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4747 return -TARGET_EFAULT;
4748 fl64.l_type =
4749 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4750 fl64.l_whence = tswap16(target_fl64->l_whence);
4751 fl64.l_start = tswap64(target_fl64->l_start);
4752 fl64.l_len = tswap64(target_fl64->l_len);
4753 fl64.l_pid = tswap32(target_fl64->l_pid);
4754 unlock_user_struct(target_fl64, arg, 0);
4755 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4756 break;
4758 case TARGET_F_GETFL:
4759 ret = get_errno(fcntl(fd, host_cmd, arg));
4760 if (ret >= 0) {
4761 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4763 break;
4765 case TARGET_F_SETFL:
4766 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4767 break;
4769 #ifdef F_GETOWN_EX
4770 case TARGET_F_GETOWN_EX:
4771 ret = get_errno(fcntl(fd, host_cmd, &fox));
4772 if (ret >= 0) {
4773 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4774 return -TARGET_EFAULT;
4775 target_fox->type = tswap32(fox.type);
4776 target_fox->pid = tswap32(fox.pid);
4777 unlock_user_struct(target_fox, arg, 1);
4779 break;
4780 #endif
4782 #ifdef F_SETOWN_EX
4783 case TARGET_F_SETOWN_EX:
4784 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4785 return -TARGET_EFAULT;
4786 fox.type = tswap32(target_fox->type);
4787 fox.pid = tswap32(target_fox->pid);
4788 unlock_user_struct(target_fox, arg, 0);
4789 ret = get_errno(fcntl(fd, host_cmd, &fox));
4790 break;
4791 #endif
4793 case TARGET_F_SETOWN:
4794 case TARGET_F_GETOWN:
4795 case TARGET_F_SETSIG:
4796 case TARGET_F_GETSIG:
4797 case TARGET_F_SETLEASE:
4798 case TARGET_F_GETLEASE:
4799 ret = get_errno(fcntl(fd, host_cmd, arg));
4800 break;
4802 default:
4803 ret = get_errno(fcntl(fd, cmd, arg));
4804 break;
4806 return ret;
4809 #ifdef USE_UID16
4811 static inline int high2lowuid(int uid)
4813 if (uid > 65535)
4814 return 65534;
4815 else
4816 return uid;
4819 static inline int high2lowgid(int gid)
4821 if (gid > 65535)
4822 return 65534;
4823 else
4824 return gid;
4827 static inline int low2highuid(int uid)
4829 if ((int16_t)uid == -1)
4830 return -1;
4831 else
4832 return uid;
4835 static inline int low2highgid(int gid)
4837 if ((int16_t)gid == -1)
4838 return -1;
4839 else
4840 return gid;
4842 static inline int tswapid(int id)
4844 return tswap16(id);
4847 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4849 #else /* !USE_UID16 */
4850 static inline int high2lowuid(int uid)
4852 return uid;
4854 static inline int high2lowgid(int gid)
4856 return gid;
4858 static inline int low2highuid(int uid)
4860 return uid;
4862 static inline int low2highgid(int gid)
4864 return gid;
4866 static inline int tswapid(int id)
4868 return tswap32(id);
4871 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4873 #endif /* USE_UID16 */
4875 void syscall_init(void)
4877 IOCTLEntry *ie;
4878 const argtype *arg_type;
4879 int size;
4880 int i;
4882 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4883 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4884 #include "syscall_types.h"
4885 #undef STRUCT
4886 #undef STRUCT_SPECIAL
4888 /* Build target_to_host_errno_table[] table from
4889 * host_to_target_errno_table[]. */
4890 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4891 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4894 /* we patch the ioctl size if necessary. We rely on the fact that
4895 no ioctl has all the bits at '1' in the size field */
4896 ie = ioctl_entries;
4897 while (ie->target_cmd != 0) {
4898 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4899 TARGET_IOC_SIZEMASK) {
4900 arg_type = ie->arg_type;
4901 if (arg_type[0] != TYPE_PTR) {
4902 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4903 ie->target_cmd);
4904 exit(1);
4906 arg_type++;
4907 size = thunk_type_size(arg_type, 0);
4908 ie->target_cmd = (ie->target_cmd &
4909 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4910 (size << TARGET_IOC_SIZESHIFT);
4913 /* automatic consistency check if same arch */
4914 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4915 (defined(__x86_64__) && defined(TARGET_X86_64))
4916 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4917 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4918 ie->name, ie->target_cmd, ie->host_cmd);
4920 #endif
4921 ie++;
4925 #if TARGET_ABI_BITS == 32
4926 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4928 #ifdef TARGET_WORDS_BIGENDIAN
4929 return ((uint64_t)word0 << 32) | word1;
4930 #else
4931 return ((uint64_t)word1 << 32) | word0;
4932 #endif
4934 #else /* TARGET_ABI_BITS == 32 */
4935 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4937 return word0;
4939 #endif /* TARGET_ABI_BITS != 32 */
4941 #ifdef TARGET_NR_truncate64
4942 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4943 abi_long arg2,
4944 abi_long arg3,
4945 abi_long arg4)
4947 if (regpairs_aligned(cpu_env)) {
4948 arg2 = arg3;
4949 arg3 = arg4;
4951 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4953 #endif
4955 #ifdef TARGET_NR_ftruncate64
4956 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4957 abi_long arg2,
4958 abi_long arg3,
4959 abi_long arg4)
4961 if (regpairs_aligned(cpu_env)) {
4962 arg2 = arg3;
4963 arg3 = arg4;
4965 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4967 #endif
4969 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4970 abi_ulong target_addr)
4972 struct target_timespec *target_ts;
4974 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4975 return -TARGET_EFAULT;
4976 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4977 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4978 unlock_user_struct(target_ts, target_addr, 0);
4979 return 0;
4982 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4983 struct timespec *host_ts)
4985 struct target_timespec *target_ts;
4987 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4988 return -TARGET_EFAULT;
4989 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4990 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4991 unlock_user_struct(target_ts, target_addr, 1);
4992 return 0;
4995 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
4996 abi_ulong target_addr)
4998 struct target_itimerspec *target_itspec;
5000 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5001 return -TARGET_EFAULT;
5004 host_itspec->it_interval.tv_sec =
5005 tswapal(target_itspec->it_interval.tv_sec);
5006 host_itspec->it_interval.tv_nsec =
5007 tswapal(target_itspec->it_interval.tv_nsec);
5008 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5009 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5011 unlock_user_struct(target_itspec, target_addr, 1);
5012 return 0;
5015 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5016 struct itimerspec *host_its)
5018 struct target_itimerspec *target_itspec;
5020 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5021 return -TARGET_EFAULT;
5024 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5025 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5027 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5028 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5030 unlock_user_struct(target_itspec, target_addr, 0);
5031 return 0;
5034 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5035 abi_ulong target_addr)
5037 struct target_sigevent *target_sevp;
5039 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5040 return -TARGET_EFAULT;
5043 /* This union is awkward on 64 bit systems because it has a 32 bit
5044 * integer and a pointer in it; we follow the conversion approach
5045 * used for handling sigval types in signal.c so the guest should get
5046 * the correct value back even if we did a 64 bit byteswap and it's
5047 * using the 32 bit integer.
5049 host_sevp->sigev_value.sival_ptr =
5050 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5051 host_sevp->sigev_signo =
5052 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5053 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5054 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5056 unlock_user_struct(target_sevp, target_addr, 1);
5057 return 0;
5060 #if defined(TARGET_NR_mlockall)
5061 static inline int target_to_host_mlockall_arg(int arg)
5063 int result = 0;
5065 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5066 result |= MCL_CURRENT;
5068 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5069 result |= MCL_FUTURE;
5071 return result;
5073 #endif
5075 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5076 static inline abi_long host_to_target_stat64(void *cpu_env,
5077 abi_ulong target_addr,
5078 struct stat *host_st)
5080 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5081 if (((CPUARMState *)cpu_env)->eabi) {
5082 struct target_eabi_stat64 *target_st;
5084 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5085 return -TARGET_EFAULT;
5086 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5087 __put_user(host_st->st_dev, &target_st->st_dev);
5088 __put_user(host_st->st_ino, &target_st->st_ino);
5089 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5090 __put_user(host_st->st_ino, &target_st->__st_ino);
5091 #endif
5092 __put_user(host_st->st_mode, &target_st->st_mode);
5093 __put_user(host_st->st_nlink, &target_st->st_nlink);
5094 __put_user(host_st->st_uid, &target_st->st_uid);
5095 __put_user(host_st->st_gid, &target_st->st_gid);
5096 __put_user(host_st->st_rdev, &target_st->st_rdev);
5097 __put_user(host_st->st_size, &target_st->st_size);
5098 __put_user(host_st->st_blksize, &target_st->st_blksize);
5099 __put_user(host_st->st_blocks, &target_st->st_blocks);
5100 __put_user(host_st->st_atime, &target_st->target_st_atime);
5101 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5102 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5103 unlock_user_struct(target_st, target_addr, 1);
5104 } else
5105 #endif
5107 #if defined(TARGET_HAS_STRUCT_STAT64)
5108 struct target_stat64 *target_st;
5109 #else
5110 struct target_stat *target_st;
5111 #endif
5113 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5114 return -TARGET_EFAULT;
5115 memset(target_st, 0, sizeof(*target_st));
5116 __put_user(host_st->st_dev, &target_st->st_dev);
5117 __put_user(host_st->st_ino, &target_st->st_ino);
5118 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5119 __put_user(host_st->st_ino, &target_st->__st_ino);
5120 #endif
5121 __put_user(host_st->st_mode, &target_st->st_mode);
5122 __put_user(host_st->st_nlink, &target_st->st_nlink);
5123 __put_user(host_st->st_uid, &target_st->st_uid);
5124 __put_user(host_st->st_gid, &target_st->st_gid);
5125 __put_user(host_st->st_rdev, &target_st->st_rdev);
5126 /* XXX: better use of kernel struct */
5127 __put_user(host_st->st_size, &target_st->st_size);
5128 __put_user(host_st->st_blksize, &target_st->st_blksize);
5129 __put_user(host_st->st_blocks, &target_st->st_blocks);
5130 __put_user(host_st->st_atime, &target_st->target_st_atime);
5131 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5132 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5133 unlock_user_struct(target_st, target_addr, 1);
5136 return 0;
5138 #endif
5140 /* ??? Using host futex calls even when target atomic operations
5141 are not really atomic probably breaks things. However implementing
5142 futexes locally would make futexes shared between multiple processes
5143 tricky. However they're probably useless because guest atomic
5144 operations won't work either. */
5145 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5146 target_ulong uaddr2, int val3)
5148 struct timespec ts, *pts;
5149 int base_op;
5151 /* ??? We assume FUTEX_* constants are the same on both host
5152 and target. */
5153 #ifdef FUTEX_CMD_MASK
5154 base_op = op & FUTEX_CMD_MASK;
5155 #else
5156 base_op = op;
5157 #endif
5158 switch (base_op) {
5159 case FUTEX_WAIT:
5160 case FUTEX_WAIT_BITSET:
5161 if (timeout) {
5162 pts = &ts;
5163 target_to_host_timespec(pts, timeout);
5164 } else {
5165 pts = NULL;
5167 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5168 pts, NULL, val3));
5169 case FUTEX_WAKE:
5170 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5171 case FUTEX_FD:
5172 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5173 case FUTEX_REQUEUE:
5174 case FUTEX_CMP_REQUEUE:
5175 case FUTEX_WAKE_OP:
5176 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5177 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5178 But the prototype takes a `struct timespec *'; insert casts
5179 to satisfy the compiler. We do not need to tswap TIMEOUT
5180 since it's not compared to guest memory. */
5181 pts = (struct timespec *)(uintptr_t) timeout;
5182 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5183 g2h(uaddr2),
5184 (base_op == FUTEX_CMP_REQUEUE
5185 ? tswap32(val3)
5186 : val3)));
5187 default:
5188 return -TARGET_ENOSYS;
5192 /* Map host to target signal numbers for the wait family of syscalls.
5193 Assume all other status bits are the same. */
5194 int host_to_target_waitstatus(int status)
5196 if (WIFSIGNALED(status)) {
5197 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5199 if (WIFSTOPPED(status)) {
5200 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5201 | (status & 0xff);
5203 return status;
5206 static int open_self_cmdline(void *cpu_env, int fd)
5208 int fd_orig = -1;
5209 bool word_skipped = false;
5211 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5212 if (fd_orig < 0) {
5213 return fd_orig;
5216 while (true) {
5217 ssize_t nb_read;
5218 char buf[128];
5219 char *cp_buf = buf;
5221 nb_read = read(fd_orig, buf, sizeof(buf));
5222 if (nb_read < 0) {
5223 fd_orig = close(fd_orig);
5224 return -1;
5225 } else if (nb_read == 0) {
5226 break;
5229 if (!word_skipped) {
5230 /* Skip the first string, which is the path to qemu-*-static
5231 instead of the actual command. */
5232 cp_buf = memchr(buf, 0, sizeof(buf));
5233 if (cp_buf) {
5234 /* Null byte found, skip one string */
5235 cp_buf++;
5236 nb_read -= cp_buf - buf;
5237 word_skipped = true;
5241 if (word_skipped) {
5242 if (write(fd, cp_buf, nb_read) != nb_read) {
5243 close(fd_orig);
5244 return -1;
5249 return close(fd_orig);
5252 static int open_self_maps(void *cpu_env, int fd)
5254 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5255 TaskState *ts = cpu->opaque;
5256 FILE *fp;
5257 char *line = NULL;
5258 size_t len = 0;
5259 ssize_t read;
5261 fp = fopen("/proc/self/maps", "r");
5262 if (fp == NULL) {
5263 return -EACCES;
5266 while ((read = getline(&line, &len, fp)) != -1) {
5267 int fields, dev_maj, dev_min, inode;
5268 uint64_t min, max, offset;
5269 char flag_r, flag_w, flag_x, flag_p;
5270 char path[512] = "";
5271 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5272 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5273 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5275 if ((fields < 10) || (fields > 11)) {
5276 continue;
5278 if (h2g_valid(min)) {
5279 int flags = page_get_flags(h2g(min));
5280 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5281 if (page_check_range(h2g(min), max - min, flags) == -1) {
5282 continue;
5284 if (h2g(min) == ts->info->stack_limit) {
5285 pstrcpy(path, sizeof(path), " [stack]");
5287 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5288 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5289 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5290 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5291 path[0] ? " " : "", path);
5295 free(line);
5296 fclose(fp);
5298 return 0;
5301 static int open_self_stat(void *cpu_env, int fd)
5303 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5304 TaskState *ts = cpu->opaque;
5305 abi_ulong start_stack = ts->info->start_stack;
5306 int i;
5308 for (i = 0; i < 44; i++) {
5309 char buf[128];
5310 int len;
5311 uint64_t val = 0;
5313 if (i == 0) {
5314 /* pid */
5315 val = getpid();
5316 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5317 } else if (i == 1) {
5318 /* app name */
5319 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5320 } else if (i == 27) {
5321 /* stack bottom */
5322 val = start_stack;
5323 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5324 } else {
5325 /* for the rest, there is MasterCard */
5326 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5329 len = strlen(buf);
5330 if (write(fd, buf, len) != len) {
5331 return -1;
5335 return 0;
5338 static int open_self_auxv(void *cpu_env, int fd)
5340 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5341 TaskState *ts = cpu->opaque;
5342 abi_ulong auxv = ts->info->saved_auxv;
5343 abi_ulong len = ts->info->auxv_len;
5344 char *ptr;
5347 * Auxiliary vector is stored in target process stack.
5348 * read in whole auxv vector and copy it to file
5350 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5351 if (ptr != NULL) {
5352 while (len > 0) {
5353 ssize_t r;
5354 r = write(fd, ptr, len);
5355 if (r <= 0) {
5356 break;
5358 len -= r;
5359 ptr += r;
5361 lseek(fd, 0, SEEK_SET);
5362 unlock_user(ptr, auxv, len);
5365 return 0;
5368 static int is_proc_myself(const char *filename, const char *entry)
5370 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5371 filename += strlen("/proc/");
5372 if (!strncmp(filename, "self/", strlen("self/"))) {
5373 filename += strlen("self/");
5374 } else if (*filename >= '1' && *filename <= '9') {
5375 char myself[80];
5376 snprintf(myself, sizeof(myself), "%d/", getpid());
5377 if (!strncmp(filename, myself, strlen(myself))) {
5378 filename += strlen(myself);
5379 } else {
5380 return 0;
5382 } else {
5383 return 0;
5385 if (!strcmp(filename, entry)) {
5386 return 1;
5389 return 0;
5392 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5393 static int is_proc(const char *filename, const char *entry)
5395 return strcmp(filename, entry) == 0;
5398 static int open_net_route(void *cpu_env, int fd)
5400 FILE *fp;
5401 char *line = NULL;
5402 size_t len = 0;
5403 ssize_t read;
5405 fp = fopen("/proc/net/route", "r");
5406 if (fp == NULL) {
5407 return -EACCES;
5410 /* read header */
5412 read = getline(&line, &len, fp);
5413 dprintf(fd, "%s", line);
5415 /* read routes */
5417 while ((read = getline(&line, &len, fp)) != -1) {
5418 char iface[16];
5419 uint32_t dest, gw, mask;
5420 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5421 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5422 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5423 &mask, &mtu, &window, &irtt);
5424 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5425 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5426 metric, tswap32(mask), mtu, window, irtt);
5429 free(line);
5430 fclose(fp);
5432 return 0;
5434 #endif
5436 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5438 struct fake_open {
5439 const char *filename;
5440 int (*fill)(void *cpu_env, int fd);
5441 int (*cmp)(const char *s1, const char *s2);
5443 const struct fake_open *fake_open;
5444 static const struct fake_open fakes[] = {
5445 { "maps", open_self_maps, is_proc_myself },
5446 { "stat", open_self_stat, is_proc_myself },
5447 { "auxv", open_self_auxv, is_proc_myself },
5448 { "cmdline", open_self_cmdline, is_proc_myself },
5449 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5450 { "/proc/net/route", open_net_route, is_proc },
5451 #endif
5452 { NULL, NULL, NULL }
5455 if (is_proc_myself(pathname, "exe")) {
5456 int execfd = qemu_getauxval(AT_EXECFD);
5457 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5460 for (fake_open = fakes; fake_open->filename; fake_open++) {
5461 if (fake_open->cmp(pathname, fake_open->filename)) {
5462 break;
5466 if (fake_open->filename) {
5467 const char *tmpdir;
5468 char filename[PATH_MAX];
5469 int fd, r;
5471 /* create temporary file to map stat to */
5472 tmpdir = getenv("TMPDIR");
5473 if (!tmpdir)
5474 tmpdir = "/tmp";
5475 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5476 fd = mkstemp(filename);
5477 if (fd < 0) {
5478 return fd;
5480 unlink(filename);
5482 if ((r = fake_open->fill(cpu_env, fd))) {
5483 close(fd);
5484 return r;
5486 lseek(fd, 0, SEEK_SET);
5488 return fd;
5491 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5494 #define TIMER_MAGIC 0x0caf0000
5495 #define TIMER_MAGIC_MASK 0xffff0000
5497 /* Convert QEMU provided timer ID back to internal 16bit index format */
5498 static target_timer_t get_timer_id(abi_long arg)
5500 target_timer_t timerid = arg;
5502 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5503 return -TARGET_EINVAL;
5506 timerid &= 0xffff;
5508 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5509 return -TARGET_EINVAL;
5512 return timerid;
5515 /* do_syscall() should always have a single exit point at the end so
5516 that actions, such as logging of syscall results, can be performed.
5517 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5518 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5519 abi_long arg2, abi_long arg3, abi_long arg4,
5520 abi_long arg5, abi_long arg6, abi_long arg7,
5521 abi_long arg8)
5523 CPUState *cpu = ENV_GET_CPU(cpu_env);
5524 abi_long ret;
5525 struct stat st;
5526 struct statfs stfs;
5527 void *p;
5529 #ifdef DEBUG
5530 gemu_log("syscall %d", num);
5531 #endif
5532 if(do_strace)
5533 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5535 switch(num) {
5536 case TARGET_NR_exit:
5537 /* In old applications this may be used to implement _exit(2).
5538 However in threaded applictions it is used for thread termination,
5539 and _exit_group is used for application termination.
5540 Do thread termination if we have more then one thread. */
5541 /* FIXME: This probably breaks if a signal arrives. We should probably
5542 be disabling signals. */
5543 if (CPU_NEXT(first_cpu)) {
5544 TaskState *ts;
5546 cpu_list_lock();
5547 /* Remove the CPU from the list. */
5548 QTAILQ_REMOVE(&cpus, cpu, node);
5549 cpu_list_unlock();
5550 ts = cpu->opaque;
5551 if (ts->child_tidptr) {
5552 put_user_u32(0, ts->child_tidptr);
5553 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5554 NULL, NULL, 0);
5556 thread_cpu = NULL;
5557 object_unref(OBJECT(cpu));
5558 g_free(ts);
5559 pthread_exit(NULL);
5561 #ifdef TARGET_GPROF
5562 _mcleanup();
5563 #endif
5564 gdb_exit(cpu_env, arg1);
5565 _exit(arg1);
5566 ret = 0; /* avoid warning */
5567 break;
5568 case TARGET_NR_read:
5569 if (arg3 == 0)
5570 ret = 0;
5571 else {
5572 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5573 goto efault;
5574 ret = get_errno(read(arg1, p, arg3));
5575 unlock_user(p, arg2, ret);
5577 break;
5578 case TARGET_NR_write:
5579 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5580 goto efault;
5581 ret = get_errno(write(arg1, p, arg3));
5582 unlock_user(p, arg2, 0);
5583 break;
5584 case TARGET_NR_open:
5585 if (!(p = lock_user_string(arg1)))
5586 goto efault;
5587 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5588 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5589 arg3));
5590 unlock_user(p, arg1, 0);
5591 break;
5592 case TARGET_NR_openat:
5593 if (!(p = lock_user_string(arg2)))
5594 goto efault;
5595 ret = get_errno(do_openat(cpu_env, arg1, p,
5596 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5597 arg4));
5598 unlock_user(p, arg2, 0);
5599 break;
5600 case TARGET_NR_close:
5601 ret = get_errno(close(arg1));
5602 break;
5603 case TARGET_NR_brk:
5604 ret = do_brk(arg1);
5605 break;
5606 case TARGET_NR_fork:
5607 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5608 break;
5609 #ifdef TARGET_NR_waitpid
5610 case TARGET_NR_waitpid:
5612 int status;
5613 ret = get_errno(waitpid(arg1, &status, arg3));
5614 if (!is_error(ret) && arg2 && ret
5615 && put_user_s32(host_to_target_waitstatus(status), arg2))
5616 goto efault;
5618 break;
5619 #endif
5620 #ifdef TARGET_NR_waitid
5621 case TARGET_NR_waitid:
5623 siginfo_t info;
5624 info.si_pid = 0;
5625 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5626 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5627 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5628 goto efault;
5629 host_to_target_siginfo(p, &info);
5630 unlock_user(p, arg3, sizeof(target_siginfo_t));
5633 break;
5634 #endif
5635 #ifdef TARGET_NR_creat /* not on alpha */
5636 case TARGET_NR_creat:
5637 if (!(p = lock_user_string(arg1)))
5638 goto efault;
5639 ret = get_errno(creat(p, arg2));
5640 unlock_user(p, arg1, 0);
5641 break;
5642 #endif
5643 case TARGET_NR_link:
5645 void * p2;
5646 p = lock_user_string(arg1);
5647 p2 = lock_user_string(arg2);
5648 if (!p || !p2)
5649 ret = -TARGET_EFAULT;
5650 else
5651 ret = get_errno(link(p, p2));
5652 unlock_user(p2, arg2, 0);
5653 unlock_user(p, arg1, 0);
5655 break;
5656 #if defined(TARGET_NR_linkat)
5657 case TARGET_NR_linkat:
5659 void * p2 = NULL;
5660 if (!arg2 || !arg4)
5661 goto efault;
5662 p = lock_user_string(arg2);
5663 p2 = lock_user_string(arg4);
5664 if (!p || !p2)
5665 ret = -TARGET_EFAULT;
5666 else
5667 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5668 unlock_user(p, arg2, 0);
5669 unlock_user(p2, arg4, 0);
5671 break;
5672 #endif
5673 case TARGET_NR_unlink:
5674 if (!(p = lock_user_string(arg1)))
5675 goto efault;
5676 ret = get_errno(unlink(p));
5677 unlock_user(p, arg1, 0);
5678 break;
5679 #if defined(TARGET_NR_unlinkat)
5680 case TARGET_NR_unlinkat:
5681 if (!(p = lock_user_string(arg2)))
5682 goto efault;
5683 ret = get_errno(unlinkat(arg1, p, arg3));
5684 unlock_user(p, arg2, 0);
5685 break;
5686 #endif
5687 case TARGET_NR_execve:
5689 char **argp, **envp;
5690 int argc, envc;
5691 abi_ulong gp;
5692 abi_ulong guest_argp;
5693 abi_ulong guest_envp;
5694 abi_ulong addr;
5695 char **q;
5696 int total_size = 0;
5698 argc = 0;
5699 guest_argp = arg2;
5700 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5701 if (get_user_ual(addr, gp))
5702 goto efault;
5703 if (!addr)
5704 break;
5705 argc++;
5707 envc = 0;
5708 guest_envp = arg3;
5709 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5710 if (get_user_ual(addr, gp))
5711 goto efault;
5712 if (!addr)
5713 break;
5714 envc++;
5717 argp = alloca((argc + 1) * sizeof(void *));
5718 envp = alloca((envc + 1) * sizeof(void *));
5720 for (gp = guest_argp, q = argp; gp;
5721 gp += sizeof(abi_ulong), q++) {
5722 if (get_user_ual(addr, gp))
5723 goto execve_efault;
5724 if (!addr)
5725 break;
5726 if (!(*q = lock_user_string(addr)))
5727 goto execve_efault;
5728 total_size += strlen(*q) + 1;
5730 *q = NULL;
5732 for (gp = guest_envp, q = envp; gp;
5733 gp += sizeof(abi_ulong), q++) {
5734 if (get_user_ual(addr, gp))
5735 goto execve_efault;
5736 if (!addr)
5737 break;
5738 if (!(*q = lock_user_string(addr)))
5739 goto execve_efault;
5740 total_size += strlen(*q) + 1;
5742 *q = NULL;
5744 /* This case will not be caught by the host's execve() if its
5745 page size is bigger than the target's. */
5746 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5747 ret = -TARGET_E2BIG;
5748 goto execve_end;
5750 if (!(p = lock_user_string(arg1)))
5751 goto execve_efault;
5752 ret = get_errno(execve(p, argp, envp));
5753 unlock_user(p, arg1, 0);
5755 goto execve_end;
5757 execve_efault:
5758 ret = -TARGET_EFAULT;
5760 execve_end:
5761 for (gp = guest_argp, q = argp; *q;
5762 gp += sizeof(abi_ulong), q++) {
5763 if (get_user_ual(addr, gp)
5764 || !addr)
5765 break;
5766 unlock_user(*q, addr, 0);
5768 for (gp = guest_envp, q = envp; *q;
5769 gp += sizeof(abi_ulong), q++) {
5770 if (get_user_ual(addr, gp)
5771 || !addr)
5772 break;
5773 unlock_user(*q, addr, 0);
5776 break;
5777 case TARGET_NR_chdir:
5778 if (!(p = lock_user_string(arg1)))
5779 goto efault;
5780 ret = get_errno(chdir(p));
5781 unlock_user(p, arg1, 0);
5782 break;
5783 #ifdef TARGET_NR_time
5784 case TARGET_NR_time:
5786 time_t host_time;
5787 ret = get_errno(time(&host_time));
5788 if (!is_error(ret)
5789 && arg1
5790 && put_user_sal(host_time, arg1))
5791 goto efault;
5793 break;
5794 #endif
5795 case TARGET_NR_mknod:
5796 if (!(p = lock_user_string(arg1)))
5797 goto efault;
5798 ret = get_errno(mknod(p, arg2, arg3));
5799 unlock_user(p, arg1, 0);
5800 break;
5801 #if defined(TARGET_NR_mknodat)
5802 case TARGET_NR_mknodat:
5803 if (!(p = lock_user_string(arg2)))
5804 goto efault;
5805 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5806 unlock_user(p, arg2, 0);
5807 break;
5808 #endif
5809 case TARGET_NR_chmod:
5810 if (!(p = lock_user_string(arg1)))
5811 goto efault;
5812 ret = get_errno(chmod(p, arg2));
5813 unlock_user(p, arg1, 0);
5814 break;
5815 #ifdef TARGET_NR_break
5816 case TARGET_NR_break:
5817 goto unimplemented;
5818 #endif
5819 #ifdef TARGET_NR_oldstat
5820 case TARGET_NR_oldstat:
5821 goto unimplemented;
5822 #endif
5823 case TARGET_NR_lseek:
5824 ret = get_errno(lseek(arg1, arg2, arg3));
5825 break;
5826 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5827 /* Alpha specific */
5828 case TARGET_NR_getxpid:
5829 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5830 ret = get_errno(getpid());
5831 break;
5832 #endif
5833 #ifdef TARGET_NR_getpid
5834 case TARGET_NR_getpid:
5835 ret = get_errno(getpid());
5836 break;
5837 #endif
5838 case TARGET_NR_mount:
5840 /* need to look at the data field */
5841 void *p2, *p3;
5843 if (arg1) {
5844 p = lock_user_string(arg1);
5845 if (!p) {
5846 goto efault;
5848 } else {
5849 p = NULL;
5852 p2 = lock_user_string(arg2);
5853 if (!p2) {
5854 if (arg1) {
5855 unlock_user(p, arg1, 0);
5857 goto efault;
5860 if (arg3) {
5861 p3 = lock_user_string(arg3);
5862 if (!p3) {
5863 if (arg1) {
5864 unlock_user(p, arg1, 0);
5866 unlock_user(p2, arg2, 0);
5867 goto efault;
5869 } else {
5870 p3 = NULL;
5873 /* FIXME - arg5 should be locked, but it isn't clear how to
5874 * do that since it's not guaranteed to be a NULL-terminated
5875 * string.
5877 if (!arg5) {
5878 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
5879 } else {
5880 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
5882 ret = get_errno(ret);
5884 if (arg1) {
5885 unlock_user(p, arg1, 0);
5887 unlock_user(p2, arg2, 0);
5888 if (arg3) {
5889 unlock_user(p3, arg3, 0);
5892 break;
5893 #ifdef TARGET_NR_umount
5894 case TARGET_NR_umount:
5895 if (!(p = lock_user_string(arg1)))
5896 goto efault;
5897 ret = get_errno(umount(p));
5898 unlock_user(p, arg1, 0);
5899 break;
5900 #endif
5901 #ifdef TARGET_NR_stime /* not on alpha */
5902 case TARGET_NR_stime:
5904 time_t host_time;
5905 if (get_user_sal(host_time, arg1))
5906 goto efault;
5907 ret = get_errno(stime(&host_time));
5909 break;
5910 #endif
5911 case TARGET_NR_ptrace:
5912 goto unimplemented;
5913 #ifdef TARGET_NR_alarm /* not on alpha */
5914 case TARGET_NR_alarm:
5915 ret = alarm(arg1);
5916 break;
5917 #endif
5918 #ifdef TARGET_NR_oldfstat
5919 case TARGET_NR_oldfstat:
5920 goto unimplemented;
5921 #endif
5922 #ifdef TARGET_NR_pause /* not on alpha */
5923 case TARGET_NR_pause:
5924 ret = get_errno(pause());
5925 break;
5926 #endif
5927 #ifdef TARGET_NR_utime
5928 case TARGET_NR_utime:
5930 struct utimbuf tbuf, *host_tbuf;
5931 struct target_utimbuf *target_tbuf;
5932 if (arg2) {
5933 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5934 goto efault;
5935 tbuf.actime = tswapal(target_tbuf->actime);
5936 tbuf.modtime = tswapal(target_tbuf->modtime);
5937 unlock_user_struct(target_tbuf, arg2, 0);
5938 host_tbuf = &tbuf;
5939 } else {
5940 host_tbuf = NULL;
5942 if (!(p = lock_user_string(arg1)))
5943 goto efault;
5944 ret = get_errno(utime(p, host_tbuf));
5945 unlock_user(p, arg1, 0);
5947 break;
5948 #endif
5949 case TARGET_NR_utimes:
5951 struct timeval *tvp, tv[2];
5952 if (arg2) {
5953 if (copy_from_user_timeval(&tv[0], arg2)
5954 || copy_from_user_timeval(&tv[1],
5955 arg2 + sizeof(struct target_timeval)))
5956 goto efault;
5957 tvp = tv;
5958 } else {
5959 tvp = NULL;
5961 if (!(p = lock_user_string(arg1)))
5962 goto efault;
5963 ret = get_errno(utimes(p, tvp));
5964 unlock_user(p, arg1, 0);
5966 break;
5967 #if defined(TARGET_NR_futimesat)
5968 case TARGET_NR_futimesat:
5970 struct timeval *tvp, tv[2];
5971 if (arg3) {
5972 if (copy_from_user_timeval(&tv[0], arg3)
5973 || copy_from_user_timeval(&tv[1],
5974 arg3 + sizeof(struct target_timeval)))
5975 goto efault;
5976 tvp = tv;
5977 } else {
5978 tvp = NULL;
5980 if (!(p = lock_user_string(arg2)))
5981 goto efault;
5982 ret = get_errno(futimesat(arg1, path(p), tvp));
5983 unlock_user(p, arg2, 0);
5985 break;
5986 #endif
5987 #ifdef TARGET_NR_stty
5988 case TARGET_NR_stty:
5989 goto unimplemented;
5990 #endif
5991 #ifdef TARGET_NR_gtty
5992 case TARGET_NR_gtty:
5993 goto unimplemented;
5994 #endif
5995 case TARGET_NR_access:
5996 if (!(p = lock_user_string(arg1)))
5997 goto efault;
5998 ret = get_errno(access(path(p), arg2));
5999 unlock_user(p, arg1, 0);
6000 break;
6001 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6002 case TARGET_NR_faccessat:
6003 if (!(p = lock_user_string(arg2)))
6004 goto efault;
6005 ret = get_errno(faccessat(arg1, p, arg3, 0));
6006 unlock_user(p, arg2, 0);
6007 break;
6008 #endif
6009 #ifdef TARGET_NR_nice /* not on alpha */
6010 case TARGET_NR_nice:
6011 ret = get_errno(nice(arg1));
6012 break;
6013 #endif
6014 #ifdef TARGET_NR_ftime
6015 case TARGET_NR_ftime:
6016 goto unimplemented;
6017 #endif
6018 case TARGET_NR_sync:
6019 sync();
6020 ret = 0;
6021 break;
6022 case TARGET_NR_kill:
6023 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6024 break;
6025 case TARGET_NR_rename:
6027 void *p2;
6028 p = lock_user_string(arg1);
6029 p2 = lock_user_string(arg2);
6030 if (!p || !p2)
6031 ret = -TARGET_EFAULT;
6032 else
6033 ret = get_errno(rename(p, p2));
6034 unlock_user(p2, arg2, 0);
6035 unlock_user(p, arg1, 0);
6037 break;
6038 #if defined(TARGET_NR_renameat)
6039 case TARGET_NR_renameat:
6041 void *p2;
6042 p = lock_user_string(arg2);
6043 p2 = lock_user_string(arg4);
6044 if (!p || !p2)
6045 ret = -TARGET_EFAULT;
6046 else
6047 ret = get_errno(renameat(arg1, p, arg3, p2));
6048 unlock_user(p2, arg4, 0);
6049 unlock_user(p, arg2, 0);
6051 break;
6052 #endif
6053 case TARGET_NR_mkdir:
6054 if (!(p = lock_user_string(arg1)))
6055 goto efault;
6056 ret = get_errno(mkdir(p, arg2));
6057 unlock_user(p, arg1, 0);
6058 break;
6059 #if defined(TARGET_NR_mkdirat)
6060 case TARGET_NR_mkdirat:
6061 if (!(p = lock_user_string(arg2)))
6062 goto efault;
6063 ret = get_errno(mkdirat(arg1, p, arg3));
6064 unlock_user(p, arg2, 0);
6065 break;
6066 #endif
6067 case TARGET_NR_rmdir:
6068 if (!(p = lock_user_string(arg1)))
6069 goto efault;
6070 ret = get_errno(rmdir(p));
6071 unlock_user(p, arg1, 0);
6072 break;
6073 case TARGET_NR_dup:
6074 ret = get_errno(dup(arg1));
6075 break;
6076 case TARGET_NR_pipe:
6077 ret = do_pipe(cpu_env, arg1, 0, 0);
6078 break;
6079 #ifdef TARGET_NR_pipe2
6080 case TARGET_NR_pipe2:
6081 ret = do_pipe(cpu_env, arg1,
6082 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6083 break;
6084 #endif
6085 case TARGET_NR_times:
6087 struct target_tms *tmsp;
6088 struct tms tms;
6089 ret = get_errno(times(&tms));
6090 if (arg1) {
6091 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6092 if (!tmsp)
6093 goto efault;
6094 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6095 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6096 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6097 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6099 if (!is_error(ret))
6100 ret = host_to_target_clock_t(ret);
6102 break;
6103 #ifdef TARGET_NR_prof
6104 case TARGET_NR_prof:
6105 goto unimplemented;
6106 #endif
6107 #ifdef TARGET_NR_signal
6108 case TARGET_NR_signal:
6109 goto unimplemented;
6110 #endif
6111 case TARGET_NR_acct:
6112 if (arg1 == 0) {
6113 ret = get_errno(acct(NULL));
6114 } else {
6115 if (!(p = lock_user_string(arg1)))
6116 goto efault;
6117 ret = get_errno(acct(path(p)));
6118 unlock_user(p, arg1, 0);
6120 break;
6121 #ifdef TARGET_NR_umount2
6122 case TARGET_NR_umount2:
6123 if (!(p = lock_user_string(arg1)))
6124 goto efault;
6125 ret = get_errno(umount2(p, arg2));
6126 unlock_user(p, arg1, 0);
6127 break;
6128 #endif
6129 #ifdef TARGET_NR_lock
6130 case TARGET_NR_lock:
6131 goto unimplemented;
6132 #endif
6133 case TARGET_NR_ioctl:
6134 ret = do_ioctl(arg1, arg2, arg3);
6135 break;
6136 case TARGET_NR_fcntl:
6137 ret = do_fcntl(arg1, arg2, arg3);
6138 break;
6139 #ifdef TARGET_NR_mpx
6140 case TARGET_NR_mpx:
6141 goto unimplemented;
6142 #endif
6143 case TARGET_NR_setpgid:
6144 ret = get_errno(setpgid(arg1, arg2));
6145 break;
6146 #ifdef TARGET_NR_ulimit
6147 case TARGET_NR_ulimit:
6148 goto unimplemented;
6149 #endif
6150 #ifdef TARGET_NR_oldolduname
6151 case TARGET_NR_oldolduname:
6152 goto unimplemented;
6153 #endif
6154 case TARGET_NR_umask:
6155 ret = get_errno(umask(arg1));
6156 break;
6157 case TARGET_NR_chroot:
6158 if (!(p = lock_user_string(arg1)))
6159 goto efault;
6160 ret = get_errno(chroot(p));
6161 unlock_user(p, arg1, 0);
6162 break;
6163 case TARGET_NR_ustat:
6164 goto unimplemented;
6165 case TARGET_NR_dup2:
6166 ret = get_errno(dup2(arg1, arg2));
6167 break;
6168 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6169 case TARGET_NR_dup3:
6170 ret = get_errno(dup3(arg1, arg2, arg3));
6171 break;
6172 #endif
6173 #ifdef TARGET_NR_getppid /* not on alpha */
6174 case TARGET_NR_getppid:
6175 ret = get_errno(getppid());
6176 break;
6177 #endif
6178 case TARGET_NR_getpgrp:
6179 ret = get_errno(getpgrp());
6180 break;
6181 case TARGET_NR_setsid:
6182 ret = get_errno(setsid());
6183 break;
6184 #ifdef TARGET_NR_sigaction
6185 case TARGET_NR_sigaction:
6187 #if defined(TARGET_ALPHA)
6188 struct target_sigaction act, oact, *pact = 0;
6189 struct target_old_sigaction *old_act;
6190 if (arg2) {
6191 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6192 goto efault;
6193 act._sa_handler = old_act->_sa_handler;
6194 target_siginitset(&act.sa_mask, old_act->sa_mask);
6195 act.sa_flags = old_act->sa_flags;
6196 act.sa_restorer = 0;
6197 unlock_user_struct(old_act, arg2, 0);
6198 pact = &act;
6200 ret = get_errno(do_sigaction(arg1, pact, &oact));
6201 if (!is_error(ret) && arg3) {
6202 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6203 goto efault;
6204 old_act->_sa_handler = oact._sa_handler;
6205 old_act->sa_mask = oact.sa_mask.sig[0];
6206 old_act->sa_flags = oact.sa_flags;
6207 unlock_user_struct(old_act, arg3, 1);
6209 #elif defined(TARGET_MIPS)
6210 struct target_sigaction act, oact, *pact, *old_act;
6212 if (arg2) {
6213 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6214 goto efault;
6215 act._sa_handler = old_act->_sa_handler;
6216 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6217 act.sa_flags = old_act->sa_flags;
6218 unlock_user_struct(old_act, arg2, 0);
6219 pact = &act;
6220 } else {
6221 pact = NULL;
6224 ret = get_errno(do_sigaction(arg1, pact, &oact));
6226 if (!is_error(ret) && arg3) {
6227 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6228 goto efault;
6229 old_act->_sa_handler = oact._sa_handler;
6230 old_act->sa_flags = oact.sa_flags;
6231 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6232 old_act->sa_mask.sig[1] = 0;
6233 old_act->sa_mask.sig[2] = 0;
6234 old_act->sa_mask.sig[3] = 0;
6235 unlock_user_struct(old_act, arg3, 1);
6237 #else
6238 struct target_old_sigaction *old_act;
6239 struct target_sigaction act, oact, *pact;
6240 if (arg2) {
6241 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6242 goto efault;
6243 act._sa_handler = old_act->_sa_handler;
6244 target_siginitset(&act.sa_mask, old_act->sa_mask);
6245 act.sa_flags = old_act->sa_flags;
6246 act.sa_restorer = old_act->sa_restorer;
6247 unlock_user_struct(old_act, arg2, 0);
6248 pact = &act;
6249 } else {
6250 pact = NULL;
6252 ret = get_errno(do_sigaction(arg1, pact, &oact));
6253 if (!is_error(ret) && arg3) {
6254 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6255 goto efault;
6256 old_act->_sa_handler = oact._sa_handler;
6257 old_act->sa_mask = oact.sa_mask.sig[0];
6258 old_act->sa_flags = oact.sa_flags;
6259 old_act->sa_restorer = oact.sa_restorer;
6260 unlock_user_struct(old_act, arg3, 1);
6262 #endif
6264 break;
6265 #endif
6266 case TARGET_NR_rt_sigaction:
6268 #if defined(TARGET_ALPHA)
6269 struct target_sigaction act, oact, *pact = 0;
6270 struct target_rt_sigaction *rt_act;
6271 /* ??? arg4 == sizeof(sigset_t). */
6272 if (arg2) {
6273 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6274 goto efault;
6275 act._sa_handler = rt_act->_sa_handler;
6276 act.sa_mask = rt_act->sa_mask;
6277 act.sa_flags = rt_act->sa_flags;
6278 act.sa_restorer = arg5;
6279 unlock_user_struct(rt_act, arg2, 0);
6280 pact = &act;
6282 ret = get_errno(do_sigaction(arg1, pact, &oact));
6283 if (!is_error(ret) && arg3) {
6284 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6285 goto efault;
6286 rt_act->_sa_handler = oact._sa_handler;
6287 rt_act->sa_mask = oact.sa_mask;
6288 rt_act->sa_flags = oact.sa_flags;
6289 unlock_user_struct(rt_act, arg3, 1);
6291 #else
6292 struct target_sigaction *act;
6293 struct target_sigaction *oact;
6295 if (arg2) {
6296 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6297 goto efault;
6298 } else
6299 act = NULL;
6300 if (arg3) {
6301 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6302 ret = -TARGET_EFAULT;
6303 goto rt_sigaction_fail;
6305 } else
6306 oact = NULL;
6307 ret = get_errno(do_sigaction(arg1, act, oact));
6308 rt_sigaction_fail:
6309 if (act)
6310 unlock_user_struct(act, arg2, 0);
6311 if (oact)
6312 unlock_user_struct(oact, arg3, 1);
6313 #endif
6315 break;
6316 #ifdef TARGET_NR_sgetmask /* not on alpha */
6317 case TARGET_NR_sgetmask:
6319 sigset_t cur_set;
6320 abi_ulong target_set;
6321 do_sigprocmask(0, NULL, &cur_set);
6322 host_to_target_old_sigset(&target_set, &cur_set);
6323 ret = target_set;
6325 break;
6326 #endif
6327 #ifdef TARGET_NR_ssetmask /* not on alpha */
6328 case TARGET_NR_ssetmask:
6330 sigset_t set, oset, cur_set;
6331 abi_ulong target_set = arg1;
6332 do_sigprocmask(0, NULL, &cur_set);
6333 target_to_host_old_sigset(&set, &target_set);
6334 sigorset(&set, &set, &cur_set);
6335 do_sigprocmask(SIG_SETMASK, &set, &oset);
6336 host_to_target_old_sigset(&target_set, &oset);
6337 ret = target_set;
6339 break;
6340 #endif
6341 #ifdef TARGET_NR_sigprocmask
6342 case TARGET_NR_sigprocmask:
6344 #if defined(TARGET_ALPHA)
6345 sigset_t set, oldset;
6346 abi_ulong mask;
6347 int how;
6349 switch (arg1) {
6350 case TARGET_SIG_BLOCK:
6351 how = SIG_BLOCK;
6352 break;
6353 case TARGET_SIG_UNBLOCK:
6354 how = SIG_UNBLOCK;
6355 break;
6356 case TARGET_SIG_SETMASK:
6357 how = SIG_SETMASK;
6358 break;
6359 default:
6360 ret = -TARGET_EINVAL;
6361 goto fail;
6363 mask = arg2;
6364 target_to_host_old_sigset(&set, &mask);
6366 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6367 if (!is_error(ret)) {
6368 host_to_target_old_sigset(&mask, &oldset);
6369 ret = mask;
6370 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6372 #else
6373 sigset_t set, oldset, *set_ptr;
6374 int how;
6376 if (arg2) {
6377 switch (arg1) {
6378 case TARGET_SIG_BLOCK:
6379 how = SIG_BLOCK;
6380 break;
6381 case TARGET_SIG_UNBLOCK:
6382 how = SIG_UNBLOCK;
6383 break;
6384 case TARGET_SIG_SETMASK:
6385 how = SIG_SETMASK;
6386 break;
6387 default:
6388 ret = -TARGET_EINVAL;
6389 goto fail;
6391 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6392 goto efault;
6393 target_to_host_old_sigset(&set, p);
6394 unlock_user(p, arg2, 0);
6395 set_ptr = &set;
6396 } else {
6397 how = 0;
6398 set_ptr = NULL;
6400 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6401 if (!is_error(ret) && arg3) {
6402 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6403 goto efault;
6404 host_to_target_old_sigset(p, &oldset);
6405 unlock_user(p, arg3, sizeof(target_sigset_t));
6407 #endif
6409 break;
6410 #endif
6411 case TARGET_NR_rt_sigprocmask:
6413 int how = arg1;
6414 sigset_t set, oldset, *set_ptr;
6416 if (arg2) {
6417 switch(how) {
6418 case TARGET_SIG_BLOCK:
6419 how = SIG_BLOCK;
6420 break;
6421 case TARGET_SIG_UNBLOCK:
6422 how = SIG_UNBLOCK;
6423 break;
6424 case TARGET_SIG_SETMASK:
6425 how = SIG_SETMASK;
6426 break;
6427 default:
6428 ret = -TARGET_EINVAL;
6429 goto fail;
6431 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6432 goto efault;
6433 target_to_host_sigset(&set, p);
6434 unlock_user(p, arg2, 0);
6435 set_ptr = &set;
6436 } else {
6437 how = 0;
6438 set_ptr = NULL;
6440 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6441 if (!is_error(ret) && arg3) {
6442 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6443 goto efault;
6444 host_to_target_sigset(p, &oldset);
6445 unlock_user(p, arg3, sizeof(target_sigset_t));
6448 break;
6449 #ifdef TARGET_NR_sigpending
6450 case TARGET_NR_sigpending:
6452 sigset_t set;
6453 ret = get_errno(sigpending(&set));
6454 if (!is_error(ret)) {
6455 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6456 goto efault;
6457 host_to_target_old_sigset(p, &set);
6458 unlock_user(p, arg1, sizeof(target_sigset_t));
6461 break;
6462 #endif
6463 case TARGET_NR_rt_sigpending:
6465 sigset_t set;
6466 ret = get_errno(sigpending(&set));
6467 if (!is_error(ret)) {
6468 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6469 goto efault;
6470 host_to_target_sigset(p, &set);
6471 unlock_user(p, arg1, sizeof(target_sigset_t));
6474 break;
6475 #ifdef TARGET_NR_sigsuspend
6476 case TARGET_NR_sigsuspend:
6478 sigset_t set;
6479 #if defined(TARGET_ALPHA)
6480 abi_ulong mask = arg1;
6481 target_to_host_old_sigset(&set, &mask);
6482 #else
6483 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6484 goto efault;
6485 target_to_host_old_sigset(&set, p);
6486 unlock_user(p, arg1, 0);
6487 #endif
6488 ret = get_errno(sigsuspend(&set));
6490 break;
6491 #endif
6492 case TARGET_NR_rt_sigsuspend:
6494 sigset_t set;
6495 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6496 goto efault;
6497 target_to_host_sigset(&set, p);
6498 unlock_user(p, arg1, 0);
6499 ret = get_errno(sigsuspend(&set));
6501 break;
6502 case TARGET_NR_rt_sigtimedwait:
6504 sigset_t set;
6505 struct timespec uts, *puts;
6506 siginfo_t uinfo;
6508 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6509 goto efault;
6510 target_to_host_sigset(&set, p);
6511 unlock_user(p, arg1, 0);
6512 if (arg3) {
6513 puts = &uts;
6514 target_to_host_timespec(puts, arg3);
6515 } else {
6516 puts = NULL;
6518 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6519 if (!is_error(ret)) {
6520 if (arg2) {
6521 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6523 if (!p) {
6524 goto efault;
6526 host_to_target_siginfo(p, &uinfo);
6527 unlock_user(p, arg2, sizeof(target_siginfo_t));
6529 ret = host_to_target_signal(ret);
6532 break;
6533 case TARGET_NR_rt_sigqueueinfo:
6535 siginfo_t uinfo;
6536 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6537 goto efault;
6538 target_to_host_siginfo(&uinfo, p);
6539 unlock_user(p, arg1, 0);
6540 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6542 break;
6543 #ifdef TARGET_NR_sigreturn
6544 case TARGET_NR_sigreturn:
6545 /* NOTE: ret is eax, so not transcoding must be done */
6546 ret = do_sigreturn(cpu_env);
6547 break;
6548 #endif
6549 case TARGET_NR_rt_sigreturn:
6550 /* NOTE: ret is eax, so not transcoding must be done */
6551 ret = do_rt_sigreturn(cpu_env);
6552 break;
6553 case TARGET_NR_sethostname:
6554 if (!(p = lock_user_string(arg1)))
6555 goto efault;
6556 ret = get_errno(sethostname(p, arg2));
6557 unlock_user(p, arg1, 0);
6558 break;
6559 case TARGET_NR_setrlimit:
6561 int resource = target_to_host_resource(arg1);
6562 struct target_rlimit *target_rlim;
6563 struct rlimit rlim;
6564 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6565 goto efault;
6566 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6567 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6568 unlock_user_struct(target_rlim, arg2, 0);
6569 ret = get_errno(setrlimit(resource, &rlim));
6571 break;
6572 case TARGET_NR_getrlimit:
6574 int resource = target_to_host_resource(arg1);
6575 struct target_rlimit *target_rlim;
6576 struct rlimit rlim;
6578 ret = get_errno(getrlimit(resource, &rlim));
6579 if (!is_error(ret)) {
6580 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6581 goto efault;
6582 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6583 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6584 unlock_user_struct(target_rlim, arg2, 1);
6587 break;
6588 case TARGET_NR_getrusage:
6590 struct rusage rusage;
6591 ret = get_errno(getrusage(arg1, &rusage));
6592 if (!is_error(ret)) {
6593 ret = host_to_target_rusage(arg2, &rusage);
6596 break;
6597 case TARGET_NR_gettimeofday:
6599 struct timeval tv;
6600 ret = get_errno(gettimeofday(&tv, NULL));
6601 if (!is_error(ret)) {
6602 if (copy_to_user_timeval(arg1, &tv))
6603 goto efault;
6606 break;
6607 case TARGET_NR_settimeofday:
6609 struct timeval tv, *ptv = NULL;
6610 struct timezone tz, *ptz = NULL;
6612 if (arg1) {
6613 if (copy_from_user_timeval(&tv, arg1)) {
6614 goto efault;
6616 ptv = &tv;
6619 if (arg2) {
6620 if (copy_from_user_timezone(&tz, arg2)) {
6621 goto efault;
6623 ptz = &tz;
6626 ret = get_errno(settimeofday(ptv, ptz));
6628 break;
6629 #if defined(TARGET_NR_select)
6630 case TARGET_NR_select:
6631 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6632 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6633 #else
6635 struct target_sel_arg_struct *sel;
6636 abi_ulong inp, outp, exp, tvp;
6637 long nsel;
6639 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6640 goto efault;
6641 nsel = tswapal(sel->n);
6642 inp = tswapal(sel->inp);
6643 outp = tswapal(sel->outp);
6644 exp = tswapal(sel->exp);
6645 tvp = tswapal(sel->tvp);
6646 unlock_user_struct(sel, arg1, 0);
6647 ret = do_select(nsel, inp, outp, exp, tvp);
6649 #endif
6650 break;
6651 #endif
6652 #ifdef TARGET_NR_pselect6
6653 case TARGET_NR_pselect6:
6655 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6656 fd_set rfds, wfds, efds;
6657 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6658 struct timespec ts, *ts_ptr;
6661 * The 6th arg is actually two args smashed together,
6662 * so we cannot use the C library.
6664 sigset_t set;
6665 struct {
6666 sigset_t *set;
6667 size_t size;
6668 } sig, *sig_ptr;
6670 abi_ulong arg_sigset, arg_sigsize, *arg7;
6671 target_sigset_t *target_sigset;
6673 n = arg1;
6674 rfd_addr = arg2;
6675 wfd_addr = arg3;
6676 efd_addr = arg4;
6677 ts_addr = arg5;
6679 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6680 if (ret) {
6681 goto fail;
6683 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6684 if (ret) {
6685 goto fail;
6687 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6688 if (ret) {
6689 goto fail;
6693 * This takes a timespec, and not a timeval, so we cannot
6694 * use the do_select() helper ...
6696 if (ts_addr) {
6697 if (target_to_host_timespec(&ts, ts_addr)) {
6698 goto efault;
6700 ts_ptr = &ts;
6701 } else {
6702 ts_ptr = NULL;
6705 /* Extract the two packed args for the sigset */
6706 if (arg6) {
6707 sig_ptr = &sig;
6708 sig.size = _NSIG / 8;
6710 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6711 if (!arg7) {
6712 goto efault;
6714 arg_sigset = tswapal(arg7[0]);
6715 arg_sigsize = tswapal(arg7[1]);
6716 unlock_user(arg7, arg6, 0);
6718 if (arg_sigset) {
6719 sig.set = &set;
6720 if (arg_sigsize != sizeof(*target_sigset)) {
6721 /* Like the kernel, we enforce correct size sigsets */
6722 ret = -TARGET_EINVAL;
6723 goto fail;
6725 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6726 sizeof(*target_sigset), 1);
6727 if (!target_sigset) {
6728 goto efault;
6730 target_to_host_sigset(&set, target_sigset);
6731 unlock_user(target_sigset, arg_sigset, 0);
6732 } else {
6733 sig.set = NULL;
6735 } else {
6736 sig_ptr = NULL;
6739 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6740 ts_ptr, sig_ptr));
6742 if (!is_error(ret)) {
6743 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6744 goto efault;
6745 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6746 goto efault;
6747 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6748 goto efault;
6750 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6751 goto efault;
6754 break;
6755 #endif
6756 case TARGET_NR_symlink:
6758 void *p2;
6759 p = lock_user_string(arg1);
6760 p2 = lock_user_string(arg2);
6761 if (!p || !p2)
6762 ret = -TARGET_EFAULT;
6763 else
6764 ret = get_errno(symlink(p, p2));
6765 unlock_user(p2, arg2, 0);
6766 unlock_user(p, arg1, 0);
6768 break;
6769 #if defined(TARGET_NR_symlinkat)
6770 case TARGET_NR_symlinkat:
6772 void *p2;
6773 p = lock_user_string(arg1);
6774 p2 = lock_user_string(arg3);
6775 if (!p || !p2)
6776 ret = -TARGET_EFAULT;
6777 else
6778 ret = get_errno(symlinkat(p, arg2, p2));
6779 unlock_user(p2, arg3, 0);
6780 unlock_user(p, arg1, 0);
6782 break;
6783 #endif
6784 #ifdef TARGET_NR_oldlstat
6785 case TARGET_NR_oldlstat:
6786 goto unimplemented;
6787 #endif
6788 case TARGET_NR_readlink:
6790 void *p2;
6791 p = lock_user_string(arg1);
6792 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6793 if (!p || !p2) {
6794 ret = -TARGET_EFAULT;
6795 } else if (!arg3) {
6796 /* Short circuit this for the magic exe check. */
6797 ret = -TARGET_EINVAL;
6798 } else if (is_proc_myself((const char *)p, "exe")) {
6799 char real[PATH_MAX], *temp;
6800 temp = realpath(exec_path, real);
6801 /* Return value is # of bytes that we wrote to the buffer. */
6802 if (temp == NULL) {
6803 ret = get_errno(-1);
6804 } else {
6805 /* Don't worry about sign mismatch as earlier mapping
6806 * logic would have thrown a bad address error. */
6807 ret = MIN(strlen(real), arg3);
6808 /* We cannot NUL terminate the string. */
6809 memcpy(p2, real, ret);
6811 } else {
6812 ret = get_errno(readlink(path(p), p2, arg3));
6814 unlock_user(p2, arg2, ret);
6815 unlock_user(p, arg1, 0);
6817 break;
6818 #if defined(TARGET_NR_readlinkat)
6819 case TARGET_NR_readlinkat:
6821 void *p2;
6822 p = lock_user_string(arg2);
6823 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6824 if (!p || !p2) {
6825 ret = -TARGET_EFAULT;
6826 } else if (is_proc_myself((const char *)p, "exe")) {
6827 char real[PATH_MAX], *temp;
6828 temp = realpath(exec_path, real);
6829 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6830 snprintf((char *)p2, arg4, "%s", real);
6831 } else {
6832 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6834 unlock_user(p2, arg3, ret);
6835 unlock_user(p, arg2, 0);
6837 break;
6838 #endif
6839 #ifdef TARGET_NR_uselib
6840 case TARGET_NR_uselib:
6841 goto unimplemented;
6842 #endif
6843 #ifdef TARGET_NR_swapon
6844 case TARGET_NR_swapon:
6845 if (!(p = lock_user_string(arg1)))
6846 goto efault;
6847 ret = get_errno(swapon(p, arg2));
6848 unlock_user(p, arg1, 0);
6849 break;
6850 #endif
6851 case TARGET_NR_reboot:
6852 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6853 /* arg4 must be ignored in all other cases */
6854 p = lock_user_string(arg4);
6855 if (!p) {
6856 goto efault;
6858 ret = get_errno(reboot(arg1, arg2, arg3, p));
6859 unlock_user(p, arg4, 0);
6860 } else {
6861 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6863 break;
6864 #ifdef TARGET_NR_readdir
6865 case TARGET_NR_readdir:
6866 goto unimplemented;
6867 #endif
6868 #ifdef TARGET_NR_mmap
6869 case TARGET_NR_mmap:
6870 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6871 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6872 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6873 || defined(TARGET_S390X)
6875 abi_ulong *v;
6876 abi_ulong v1, v2, v3, v4, v5, v6;
6877 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6878 goto efault;
6879 v1 = tswapal(v[0]);
6880 v2 = tswapal(v[1]);
6881 v3 = tswapal(v[2]);
6882 v4 = tswapal(v[3]);
6883 v5 = tswapal(v[4]);
6884 v6 = tswapal(v[5]);
6885 unlock_user(v, arg1, 0);
6886 ret = get_errno(target_mmap(v1, v2, v3,
6887 target_to_host_bitmask(v4, mmap_flags_tbl),
6888 v5, v6));
6890 #else
6891 ret = get_errno(target_mmap(arg1, arg2, arg3,
6892 target_to_host_bitmask(arg4, mmap_flags_tbl),
6893 arg5,
6894 arg6));
6895 #endif
6896 break;
6897 #endif
6898 #ifdef TARGET_NR_mmap2
6899 case TARGET_NR_mmap2:
6900 #ifndef MMAP_SHIFT
6901 #define MMAP_SHIFT 12
6902 #endif
6903 ret = get_errno(target_mmap(arg1, arg2, arg3,
6904 target_to_host_bitmask(arg4, mmap_flags_tbl),
6905 arg5,
6906 arg6 << MMAP_SHIFT));
6907 break;
6908 #endif
6909 case TARGET_NR_munmap:
6910 ret = get_errno(target_munmap(arg1, arg2));
6911 break;
6912 case TARGET_NR_mprotect:
6914 TaskState *ts = cpu->opaque;
6915 /* Special hack to detect libc making the stack executable. */
6916 if ((arg3 & PROT_GROWSDOWN)
6917 && arg1 >= ts->info->stack_limit
6918 && arg1 <= ts->info->start_stack) {
6919 arg3 &= ~PROT_GROWSDOWN;
6920 arg2 = arg2 + arg1 - ts->info->stack_limit;
6921 arg1 = ts->info->stack_limit;
6924 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6925 break;
6926 #ifdef TARGET_NR_mremap
6927 case TARGET_NR_mremap:
6928 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6929 break;
6930 #endif
6931 /* ??? msync/mlock/munlock are broken for softmmu. */
6932 #ifdef TARGET_NR_msync
6933 case TARGET_NR_msync:
6934 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6935 break;
6936 #endif
6937 #ifdef TARGET_NR_mlock
6938 case TARGET_NR_mlock:
6939 ret = get_errno(mlock(g2h(arg1), arg2));
6940 break;
6941 #endif
6942 #ifdef TARGET_NR_munlock
6943 case TARGET_NR_munlock:
6944 ret = get_errno(munlock(g2h(arg1), arg2));
6945 break;
6946 #endif
6947 #ifdef TARGET_NR_mlockall
6948 case TARGET_NR_mlockall:
6949 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
6950 break;
6951 #endif
6952 #ifdef TARGET_NR_munlockall
6953 case TARGET_NR_munlockall:
6954 ret = get_errno(munlockall());
6955 break;
6956 #endif
6957 case TARGET_NR_truncate:
6958 if (!(p = lock_user_string(arg1)))
6959 goto efault;
6960 ret = get_errno(truncate(p, arg2));
6961 unlock_user(p, arg1, 0);
6962 break;
6963 case TARGET_NR_ftruncate:
6964 ret = get_errno(ftruncate(arg1, arg2));
6965 break;
6966 case TARGET_NR_fchmod:
6967 ret = get_errno(fchmod(arg1, arg2));
6968 break;
6969 #if defined(TARGET_NR_fchmodat)
6970 case TARGET_NR_fchmodat:
6971 if (!(p = lock_user_string(arg2)))
6972 goto efault;
6973 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6974 unlock_user(p, arg2, 0);
6975 break;
6976 #endif
6977 case TARGET_NR_getpriority:
6978 /* Note that negative values are valid for getpriority, so we must
6979 differentiate based on errno settings. */
6980 errno = 0;
6981 ret = getpriority(arg1, arg2);
6982 if (ret == -1 && errno != 0) {
6983 ret = -host_to_target_errno(errno);
6984 break;
6986 #ifdef TARGET_ALPHA
6987 /* Return value is the unbiased priority. Signal no error. */
6988 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6989 #else
6990 /* Return value is a biased priority to avoid negative numbers. */
6991 ret = 20 - ret;
6992 #endif
6993 break;
6994 case TARGET_NR_setpriority:
6995 ret = get_errno(setpriority(arg1, arg2, arg3));
6996 break;
6997 #ifdef TARGET_NR_profil
6998 case TARGET_NR_profil:
6999 goto unimplemented;
7000 #endif
7001 case TARGET_NR_statfs:
7002 if (!(p = lock_user_string(arg1)))
7003 goto efault;
7004 ret = get_errno(statfs(path(p), &stfs));
7005 unlock_user(p, arg1, 0);
7006 convert_statfs:
7007 if (!is_error(ret)) {
7008 struct target_statfs *target_stfs;
7010 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7011 goto efault;
7012 __put_user(stfs.f_type, &target_stfs->f_type);
7013 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7014 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7015 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7016 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7017 __put_user(stfs.f_files, &target_stfs->f_files);
7018 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7019 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7020 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7021 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7022 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7023 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7024 unlock_user_struct(target_stfs, arg2, 1);
7026 break;
7027 case TARGET_NR_fstatfs:
7028 ret = get_errno(fstatfs(arg1, &stfs));
7029 goto convert_statfs;
7030 #ifdef TARGET_NR_statfs64
7031 case TARGET_NR_statfs64:
7032 if (!(p = lock_user_string(arg1)))
7033 goto efault;
7034 ret = get_errno(statfs(path(p), &stfs));
7035 unlock_user(p, arg1, 0);
7036 convert_statfs64:
7037 if (!is_error(ret)) {
7038 struct target_statfs64 *target_stfs;
7040 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7041 goto efault;
7042 __put_user(stfs.f_type, &target_stfs->f_type);
7043 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7044 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7045 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7046 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7047 __put_user(stfs.f_files, &target_stfs->f_files);
7048 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7049 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7050 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7051 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7052 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7053 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7054 unlock_user_struct(target_stfs, arg3, 1);
7056 break;
7057 case TARGET_NR_fstatfs64:
7058 ret = get_errno(fstatfs(arg1, &stfs));
7059 goto convert_statfs64;
7060 #endif
7061 #ifdef TARGET_NR_ioperm
7062 case TARGET_NR_ioperm:
7063 goto unimplemented;
7064 #endif
7065 #ifdef TARGET_NR_socketcall
7066 case TARGET_NR_socketcall:
7067 ret = do_socketcall(arg1, arg2);
7068 break;
7069 #endif
7070 #ifdef TARGET_NR_accept
7071 case TARGET_NR_accept:
7072 ret = do_accept4(arg1, arg2, arg3, 0);
7073 break;
7074 #endif
7075 #ifdef TARGET_NR_accept4
7076 case TARGET_NR_accept4:
7077 #ifdef CONFIG_ACCEPT4
7078 ret = do_accept4(arg1, arg2, arg3, arg4);
7079 #else
7080 goto unimplemented;
7081 #endif
7082 break;
7083 #endif
7084 #ifdef TARGET_NR_bind
7085 case TARGET_NR_bind:
7086 ret = do_bind(arg1, arg2, arg3);
7087 break;
7088 #endif
7089 #ifdef TARGET_NR_connect
7090 case TARGET_NR_connect:
7091 ret = do_connect(arg1, arg2, arg3);
7092 break;
7093 #endif
7094 #ifdef TARGET_NR_getpeername
7095 case TARGET_NR_getpeername:
7096 ret = do_getpeername(arg1, arg2, arg3);
7097 break;
7098 #endif
7099 #ifdef TARGET_NR_getsockname
7100 case TARGET_NR_getsockname:
7101 ret = do_getsockname(arg1, arg2, arg3);
7102 break;
7103 #endif
7104 #ifdef TARGET_NR_getsockopt
7105 case TARGET_NR_getsockopt:
7106 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7107 break;
7108 #endif
7109 #ifdef TARGET_NR_listen
7110 case TARGET_NR_listen:
7111 ret = get_errno(listen(arg1, arg2));
7112 break;
7113 #endif
7114 #ifdef TARGET_NR_recv
7115 case TARGET_NR_recv:
7116 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7117 break;
7118 #endif
7119 #ifdef TARGET_NR_recvfrom
7120 case TARGET_NR_recvfrom:
7121 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7122 break;
7123 #endif
7124 #ifdef TARGET_NR_recvmsg
7125 case TARGET_NR_recvmsg:
7126 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7127 break;
7128 #endif
7129 #ifdef TARGET_NR_send
7130 case TARGET_NR_send:
7131 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7132 break;
7133 #endif
7134 #ifdef TARGET_NR_sendmsg
7135 case TARGET_NR_sendmsg:
7136 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7137 break;
7138 #endif
7139 #ifdef TARGET_NR_sendmmsg
7140 case TARGET_NR_sendmmsg:
7141 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7142 break;
7143 case TARGET_NR_recvmmsg:
7144 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7145 break;
7146 #endif
7147 #ifdef TARGET_NR_sendto
7148 case TARGET_NR_sendto:
7149 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7150 break;
7151 #endif
7152 #ifdef TARGET_NR_shutdown
7153 case TARGET_NR_shutdown:
7154 ret = get_errno(shutdown(arg1, arg2));
7155 break;
7156 #endif
7157 #ifdef TARGET_NR_socket
7158 case TARGET_NR_socket:
7159 ret = do_socket(arg1, arg2, arg3);
7160 break;
7161 #endif
7162 #ifdef TARGET_NR_socketpair
7163 case TARGET_NR_socketpair:
7164 ret = do_socketpair(arg1, arg2, arg3, arg4);
7165 break;
7166 #endif
7167 #ifdef TARGET_NR_setsockopt
7168 case TARGET_NR_setsockopt:
7169 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7170 break;
7171 #endif
7173 case TARGET_NR_syslog:
7174 if (!(p = lock_user_string(arg2)))
7175 goto efault;
7176 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7177 unlock_user(p, arg2, 0);
7178 break;
7180 case TARGET_NR_setitimer:
7182 struct itimerval value, ovalue, *pvalue;
7184 if (arg2) {
7185 pvalue = &value;
7186 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7187 || copy_from_user_timeval(&pvalue->it_value,
7188 arg2 + sizeof(struct target_timeval)))
7189 goto efault;
7190 } else {
7191 pvalue = NULL;
7193 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7194 if (!is_error(ret) && arg3) {
7195 if (copy_to_user_timeval(arg3,
7196 &ovalue.it_interval)
7197 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7198 &ovalue.it_value))
7199 goto efault;
7202 break;
7203 case TARGET_NR_getitimer:
7205 struct itimerval value;
7207 ret = get_errno(getitimer(arg1, &value));
7208 if (!is_error(ret) && arg2) {
7209 if (copy_to_user_timeval(arg2,
7210 &value.it_interval)
7211 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7212 &value.it_value))
7213 goto efault;
7216 break;
7217 case TARGET_NR_stat:
7218 if (!(p = lock_user_string(arg1)))
7219 goto efault;
7220 ret = get_errno(stat(path(p), &st));
7221 unlock_user(p, arg1, 0);
7222 goto do_stat;
7223 case TARGET_NR_lstat:
7224 if (!(p = lock_user_string(arg1)))
7225 goto efault;
7226 ret = get_errno(lstat(path(p), &st));
7227 unlock_user(p, arg1, 0);
7228 goto do_stat;
7229 case TARGET_NR_fstat:
7231 ret = get_errno(fstat(arg1, &st));
7232 do_stat:
7233 if (!is_error(ret)) {
7234 struct target_stat *target_st;
7236 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7237 goto efault;
7238 memset(target_st, 0, sizeof(*target_st));
7239 __put_user(st.st_dev, &target_st->st_dev);
7240 __put_user(st.st_ino, &target_st->st_ino);
7241 __put_user(st.st_mode, &target_st->st_mode);
7242 __put_user(st.st_uid, &target_st->st_uid);
7243 __put_user(st.st_gid, &target_st->st_gid);
7244 __put_user(st.st_nlink, &target_st->st_nlink);
7245 __put_user(st.st_rdev, &target_st->st_rdev);
7246 __put_user(st.st_size, &target_st->st_size);
7247 __put_user(st.st_blksize, &target_st->st_blksize);
7248 __put_user(st.st_blocks, &target_st->st_blocks);
7249 __put_user(st.st_atime, &target_st->target_st_atime);
7250 __put_user(st.st_mtime, &target_st->target_st_mtime);
7251 __put_user(st.st_ctime, &target_st->target_st_ctime);
7252 unlock_user_struct(target_st, arg2, 1);
7255 break;
7256 #ifdef TARGET_NR_olduname
7257 case TARGET_NR_olduname:
7258 goto unimplemented;
7259 #endif
7260 #ifdef TARGET_NR_iopl
7261 case TARGET_NR_iopl:
7262 goto unimplemented;
7263 #endif
7264 case TARGET_NR_vhangup:
7265 ret = get_errno(vhangup());
7266 break;
7267 #ifdef TARGET_NR_idle
7268 case TARGET_NR_idle:
7269 goto unimplemented;
7270 #endif
7271 #ifdef TARGET_NR_syscall
7272 case TARGET_NR_syscall:
7273 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7274 arg6, arg7, arg8, 0);
7275 break;
7276 #endif
7277 case TARGET_NR_wait4:
7279 int status;
7280 abi_long status_ptr = arg2;
7281 struct rusage rusage, *rusage_ptr;
7282 abi_ulong target_rusage = arg4;
7283 abi_long rusage_err;
7284 if (target_rusage)
7285 rusage_ptr = &rusage;
7286 else
7287 rusage_ptr = NULL;
7288 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7289 if (!is_error(ret)) {
7290 if (status_ptr && ret) {
7291 status = host_to_target_waitstatus(status);
7292 if (put_user_s32(status, status_ptr))
7293 goto efault;
7295 if (target_rusage) {
7296 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7297 if (rusage_err) {
7298 ret = rusage_err;
7303 break;
7304 #ifdef TARGET_NR_swapoff
7305 case TARGET_NR_swapoff:
7306 if (!(p = lock_user_string(arg1)))
7307 goto efault;
7308 ret = get_errno(swapoff(p));
7309 unlock_user(p, arg1, 0);
7310 break;
7311 #endif
7312 case TARGET_NR_sysinfo:
7314 struct target_sysinfo *target_value;
7315 struct sysinfo value;
7316 ret = get_errno(sysinfo(&value));
7317 if (!is_error(ret) && arg1)
7319 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7320 goto efault;
7321 __put_user(value.uptime, &target_value->uptime);
7322 __put_user(value.loads[0], &target_value->loads[0]);
7323 __put_user(value.loads[1], &target_value->loads[1]);
7324 __put_user(value.loads[2], &target_value->loads[2]);
7325 __put_user(value.totalram, &target_value->totalram);
7326 __put_user(value.freeram, &target_value->freeram);
7327 __put_user(value.sharedram, &target_value->sharedram);
7328 __put_user(value.bufferram, &target_value->bufferram);
7329 __put_user(value.totalswap, &target_value->totalswap);
7330 __put_user(value.freeswap, &target_value->freeswap);
7331 __put_user(value.procs, &target_value->procs);
7332 __put_user(value.totalhigh, &target_value->totalhigh);
7333 __put_user(value.freehigh, &target_value->freehigh);
7334 __put_user(value.mem_unit, &target_value->mem_unit);
7335 unlock_user_struct(target_value, arg1, 1);
7338 break;
7339 #ifdef TARGET_NR_ipc
7340 case TARGET_NR_ipc:
7341 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7342 break;
7343 #endif
7344 #ifdef TARGET_NR_semget
7345 case TARGET_NR_semget:
7346 ret = get_errno(semget(arg1, arg2, arg3));
7347 break;
7348 #endif
7349 #ifdef TARGET_NR_semop
7350 case TARGET_NR_semop:
7351 ret = do_semop(arg1, arg2, arg3);
7352 break;
7353 #endif
7354 #ifdef TARGET_NR_semctl
7355 case TARGET_NR_semctl:
7356 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7357 break;
7358 #endif
7359 #ifdef TARGET_NR_msgctl
7360 case TARGET_NR_msgctl:
7361 ret = do_msgctl(arg1, arg2, arg3);
7362 break;
7363 #endif
7364 #ifdef TARGET_NR_msgget
7365 case TARGET_NR_msgget:
7366 ret = get_errno(msgget(arg1, arg2));
7367 break;
7368 #endif
7369 #ifdef TARGET_NR_msgrcv
7370 case TARGET_NR_msgrcv:
7371 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7372 break;
7373 #endif
7374 #ifdef TARGET_NR_msgsnd
7375 case TARGET_NR_msgsnd:
7376 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7377 break;
7378 #endif
7379 #ifdef TARGET_NR_shmget
7380 case TARGET_NR_shmget:
7381 ret = get_errno(shmget(arg1, arg2, arg3));
7382 break;
7383 #endif
7384 #ifdef TARGET_NR_shmctl
7385 case TARGET_NR_shmctl:
7386 ret = do_shmctl(arg1, arg2, arg3);
7387 break;
7388 #endif
7389 #ifdef TARGET_NR_shmat
7390 case TARGET_NR_shmat:
7391 ret = do_shmat(arg1, arg2, arg3);
7392 break;
7393 #endif
7394 #ifdef TARGET_NR_shmdt
7395 case TARGET_NR_shmdt:
7396 ret = do_shmdt(arg1);
7397 break;
7398 #endif
7399 case TARGET_NR_fsync:
7400 ret = get_errno(fsync(arg1));
7401 break;
7402 case TARGET_NR_clone:
7403 /* Linux manages to have three different orderings for its
7404 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7405 * match the kernel's CONFIG_CLONE_* settings.
7406 * Microblaze is further special in that it uses a sixth
7407 * implicit argument to clone for the TLS pointer.
7409 #if defined(TARGET_MICROBLAZE)
7410 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7411 #elif defined(TARGET_CLONE_BACKWARDS)
7412 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7413 #elif defined(TARGET_CLONE_BACKWARDS2)
7414 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7415 #else
7416 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7417 #endif
7418 break;
7419 #ifdef __NR_exit_group
7420 /* new thread calls */
7421 case TARGET_NR_exit_group:
7422 #ifdef TARGET_GPROF
7423 _mcleanup();
7424 #endif
7425 gdb_exit(cpu_env, arg1);
7426 ret = get_errno(exit_group(arg1));
7427 break;
7428 #endif
7429 case TARGET_NR_setdomainname:
7430 if (!(p = lock_user_string(arg1)))
7431 goto efault;
7432 ret = get_errno(setdomainname(p, arg2));
7433 unlock_user(p, arg1, 0);
7434 break;
7435 case TARGET_NR_uname:
7436 /* no need to transcode because we use the linux syscall */
7438 struct new_utsname * buf;
7440 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7441 goto efault;
7442 ret = get_errno(sys_uname(buf));
7443 if (!is_error(ret)) {
7444 /* Overrite the native machine name with whatever is being
7445 emulated. */
7446 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7447 /* Allow the user to override the reported release. */
7448 if (qemu_uname_release && *qemu_uname_release)
7449 strcpy (buf->release, qemu_uname_release);
7451 unlock_user_struct(buf, arg1, 1);
7453 break;
7454 #ifdef TARGET_I386
7455 case TARGET_NR_modify_ldt:
7456 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7457 break;
7458 #if !defined(TARGET_X86_64)
7459 case TARGET_NR_vm86old:
7460 goto unimplemented;
7461 case TARGET_NR_vm86:
7462 ret = do_vm86(cpu_env, arg1, arg2);
7463 break;
7464 #endif
7465 #endif
7466 case TARGET_NR_adjtimex:
7467 goto unimplemented;
7468 #ifdef TARGET_NR_create_module
7469 case TARGET_NR_create_module:
7470 #endif
7471 case TARGET_NR_init_module:
7472 case TARGET_NR_delete_module:
7473 #ifdef TARGET_NR_get_kernel_syms
7474 case TARGET_NR_get_kernel_syms:
7475 #endif
7476 goto unimplemented;
7477 case TARGET_NR_quotactl:
7478 goto unimplemented;
7479 case TARGET_NR_getpgid:
7480 ret = get_errno(getpgid(arg1));
7481 break;
7482 case TARGET_NR_fchdir:
7483 ret = get_errno(fchdir(arg1));
7484 break;
7485 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7486 case TARGET_NR_bdflush:
7487 goto unimplemented;
7488 #endif
7489 #ifdef TARGET_NR_sysfs
7490 case TARGET_NR_sysfs:
7491 goto unimplemented;
7492 #endif
7493 case TARGET_NR_personality:
7494 ret = get_errno(personality(arg1));
7495 break;
7496 #ifdef TARGET_NR_afs_syscall
7497 case TARGET_NR_afs_syscall:
7498 goto unimplemented;
7499 #endif
7500 #ifdef TARGET_NR__llseek /* Not on alpha */
7501 case TARGET_NR__llseek:
7503 int64_t res;
7504 #if !defined(__NR_llseek)
7505 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7506 if (res == -1) {
7507 ret = get_errno(res);
7508 } else {
7509 ret = 0;
7511 #else
7512 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7513 #endif
7514 if ((ret == 0) && put_user_s64(res, arg4)) {
7515 goto efault;
7518 break;
7519 #endif
7520 case TARGET_NR_getdents:
7521 #ifdef __NR_getdents
7522 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7524 struct target_dirent *target_dirp;
7525 struct linux_dirent *dirp;
7526 abi_long count = arg3;
7528 dirp = malloc(count);
7529 if (!dirp) {
7530 ret = -TARGET_ENOMEM;
7531 goto fail;
7534 ret = get_errno(sys_getdents(arg1, dirp, count));
7535 if (!is_error(ret)) {
7536 struct linux_dirent *de;
7537 struct target_dirent *tde;
7538 int len = ret;
7539 int reclen, treclen;
7540 int count1, tnamelen;
7542 count1 = 0;
7543 de = dirp;
7544 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7545 goto efault;
7546 tde = target_dirp;
7547 while (len > 0) {
7548 reclen = de->d_reclen;
7549 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7550 assert(tnamelen >= 0);
7551 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7552 assert(count1 + treclen <= count);
7553 tde->d_reclen = tswap16(treclen);
7554 tde->d_ino = tswapal(de->d_ino);
7555 tde->d_off = tswapal(de->d_off);
7556 memcpy(tde->d_name, de->d_name, tnamelen);
7557 de = (struct linux_dirent *)((char *)de + reclen);
7558 len -= reclen;
7559 tde = (struct target_dirent *)((char *)tde + treclen);
7560 count1 += treclen;
7562 ret = count1;
7563 unlock_user(target_dirp, arg2, ret);
7565 free(dirp);
7567 #else
7569 struct linux_dirent *dirp;
7570 abi_long count = arg3;
7572 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7573 goto efault;
7574 ret = get_errno(sys_getdents(arg1, dirp, count));
7575 if (!is_error(ret)) {
7576 struct linux_dirent *de;
7577 int len = ret;
7578 int reclen;
7579 de = dirp;
7580 while (len > 0) {
7581 reclen = de->d_reclen;
7582 if (reclen > len)
7583 break;
7584 de->d_reclen = tswap16(reclen);
7585 tswapls(&de->d_ino);
7586 tswapls(&de->d_off);
7587 de = (struct linux_dirent *)((char *)de + reclen);
7588 len -= reclen;
7591 unlock_user(dirp, arg2, ret);
7593 #endif
7594 #else
7595 /* Implement getdents in terms of getdents64 */
7597 struct linux_dirent64 *dirp;
7598 abi_long count = arg3;
7600 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7601 if (!dirp) {
7602 goto efault;
7604 ret = get_errno(sys_getdents64(arg1, dirp, count));
7605 if (!is_error(ret)) {
7606 /* Convert the dirent64 structs to target dirent. We do this
7607 * in-place, since we can guarantee that a target_dirent is no
7608 * larger than a dirent64; however this means we have to be
7609 * careful to read everything before writing in the new format.
7611 struct linux_dirent64 *de;
7612 struct target_dirent *tde;
7613 int len = ret;
7614 int tlen = 0;
7616 de = dirp;
7617 tde = (struct target_dirent *)dirp;
7618 while (len > 0) {
7619 int namelen, treclen;
7620 int reclen = de->d_reclen;
7621 uint64_t ino = de->d_ino;
7622 int64_t off = de->d_off;
7623 uint8_t type = de->d_type;
7625 namelen = strlen(de->d_name);
7626 treclen = offsetof(struct target_dirent, d_name)
7627 + namelen + 2;
7628 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7630 memmove(tde->d_name, de->d_name, namelen + 1);
7631 tde->d_ino = tswapal(ino);
7632 tde->d_off = tswapal(off);
7633 tde->d_reclen = tswap16(treclen);
7634 /* The target_dirent type is in what was formerly a padding
7635 * byte at the end of the structure:
7637 *(((char *)tde) + treclen - 1) = type;
7639 de = (struct linux_dirent64 *)((char *)de + reclen);
7640 tde = (struct target_dirent *)((char *)tde + treclen);
7641 len -= reclen;
7642 tlen += treclen;
7644 ret = tlen;
7646 unlock_user(dirp, arg2, ret);
7648 #endif
7649 break;
7650 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7651 case TARGET_NR_getdents64:
7653 struct linux_dirent64 *dirp;
7654 abi_long count = arg3;
7655 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7656 goto efault;
7657 ret = get_errno(sys_getdents64(arg1, dirp, count));
7658 if (!is_error(ret)) {
7659 struct linux_dirent64 *de;
7660 int len = ret;
7661 int reclen;
7662 de = dirp;
7663 while (len > 0) {
7664 reclen = de->d_reclen;
7665 if (reclen > len)
7666 break;
7667 de->d_reclen = tswap16(reclen);
7668 tswap64s((uint64_t *)&de->d_ino);
7669 tswap64s((uint64_t *)&de->d_off);
7670 de = (struct linux_dirent64 *)((char *)de + reclen);
7671 len -= reclen;
7674 unlock_user(dirp, arg2, ret);
7676 break;
7677 #endif /* TARGET_NR_getdents64 */
7678 #if defined(TARGET_NR__newselect)
7679 case TARGET_NR__newselect:
7680 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7681 break;
7682 #endif
7683 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7684 # ifdef TARGET_NR_poll
7685 case TARGET_NR_poll:
7686 # endif
7687 # ifdef TARGET_NR_ppoll
7688 case TARGET_NR_ppoll:
7689 # endif
7691 struct target_pollfd *target_pfd;
7692 unsigned int nfds = arg2;
7693 int timeout = arg3;
7694 struct pollfd *pfd;
7695 unsigned int i;
7697 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7698 if (!target_pfd)
7699 goto efault;
7701 pfd = alloca(sizeof(struct pollfd) * nfds);
7702 for(i = 0; i < nfds; i++) {
7703 pfd[i].fd = tswap32(target_pfd[i].fd);
7704 pfd[i].events = tswap16(target_pfd[i].events);
7707 # ifdef TARGET_NR_ppoll
7708 if (num == TARGET_NR_ppoll) {
7709 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7710 target_sigset_t *target_set;
7711 sigset_t _set, *set = &_set;
7713 if (arg3) {
7714 if (target_to_host_timespec(timeout_ts, arg3)) {
7715 unlock_user(target_pfd, arg1, 0);
7716 goto efault;
7718 } else {
7719 timeout_ts = NULL;
7722 if (arg4) {
7723 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7724 if (!target_set) {
7725 unlock_user(target_pfd, arg1, 0);
7726 goto efault;
7728 target_to_host_sigset(set, target_set);
7729 } else {
7730 set = NULL;
7733 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7735 if (!is_error(ret) && arg3) {
7736 host_to_target_timespec(arg3, timeout_ts);
7738 if (arg4) {
7739 unlock_user(target_set, arg4, 0);
7741 } else
7742 # endif
7743 ret = get_errno(poll(pfd, nfds, timeout));
7745 if (!is_error(ret)) {
7746 for(i = 0; i < nfds; i++) {
7747 target_pfd[i].revents = tswap16(pfd[i].revents);
7750 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7752 break;
7753 #endif
7754 case TARGET_NR_flock:
7755 /* NOTE: the flock constant seems to be the same for every
7756 Linux platform */
7757 ret = get_errno(flock(arg1, arg2));
7758 break;
7759 case TARGET_NR_readv:
7761 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7762 if (vec != NULL) {
7763 ret = get_errno(readv(arg1, vec, arg3));
7764 unlock_iovec(vec, arg2, arg3, 1);
7765 } else {
7766 ret = -host_to_target_errno(errno);
7769 break;
7770 case TARGET_NR_writev:
7772 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7773 if (vec != NULL) {
7774 ret = get_errno(writev(arg1, vec, arg3));
7775 unlock_iovec(vec, arg2, arg3, 0);
7776 } else {
7777 ret = -host_to_target_errno(errno);
7780 break;
7781 case TARGET_NR_getsid:
7782 ret = get_errno(getsid(arg1));
7783 break;
7784 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7785 case TARGET_NR_fdatasync:
7786 ret = get_errno(fdatasync(arg1));
7787 break;
7788 #endif
7789 case TARGET_NR__sysctl:
7790 /* We don't implement this, but ENOTDIR is always a safe
7791 return value. */
7792 ret = -TARGET_ENOTDIR;
7793 break;
7794 case TARGET_NR_sched_getaffinity:
7796 unsigned int mask_size;
7797 unsigned long *mask;
7800 * sched_getaffinity needs multiples of ulong, so need to take
7801 * care of mismatches between target ulong and host ulong sizes.
7803 if (arg2 & (sizeof(abi_ulong) - 1)) {
7804 ret = -TARGET_EINVAL;
7805 break;
7807 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7809 mask = alloca(mask_size);
7810 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7812 if (!is_error(ret)) {
7813 if (ret > arg2) {
7814 /* More data returned than the caller's buffer will fit.
7815 * This only happens if sizeof(abi_long) < sizeof(long)
7816 * and the caller passed us a buffer holding an odd number
7817 * of abi_longs. If the host kernel is actually using the
7818 * extra 4 bytes then fail EINVAL; otherwise we can just
7819 * ignore them and only copy the interesting part.
7821 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
7822 if (numcpus > arg2 * 8) {
7823 ret = -TARGET_EINVAL;
7824 break;
7826 ret = arg2;
7829 if (copy_to_user(arg3, mask, ret)) {
7830 goto efault;
7834 break;
7835 case TARGET_NR_sched_setaffinity:
7837 unsigned int mask_size;
7838 unsigned long *mask;
7841 * sched_setaffinity needs multiples of ulong, so need to take
7842 * care of mismatches between target ulong and host ulong sizes.
7844 if (arg2 & (sizeof(abi_ulong) - 1)) {
7845 ret = -TARGET_EINVAL;
7846 break;
7848 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7850 mask = alloca(mask_size);
7851 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7852 goto efault;
7854 memcpy(mask, p, arg2);
7855 unlock_user_struct(p, arg2, 0);
7857 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7859 break;
7860 case TARGET_NR_sched_setparam:
7862 struct sched_param *target_schp;
7863 struct sched_param schp;
7865 if (arg2 == 0) {
7866 return -TARGET_EINVAL;
7868 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7869 goto efault;
7870 schp.sched_priority = tswap32(target_schp->sched_priority);
7871 unlock_user_struct(target_schp, arg2, 0);
7872 ret = get_errno(sched_setparam(arg1, &schp));
7874 break;
7875 case TARGET_NR_sched_getparam:
7877 struct sched_param *target_schp;
7878 struct sched_param schp;
7880 if (arg2 == 0) {
7881 return -TARGET_EINVAL;
7883 ret = get_errno(sched_getparam(arg1, &schp));
7884 if (!is_error(ret)) {
7885 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7886 goto efault;
7887 target_schp->sched_priority = tswap32(schp.sched_priority);
7888 unlock_user_struct(target_schp, arg2, 1);
7891 break;
7892 case TARGET_NR_sched_setscheduler:
7894 struct sched_param *target_schp;
7895 struct sched_param schp;
7896 if (arg3 == 0) {
7897 return -TARGET_EINVAL;
7899 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7900 goto efault;
7901 schp.sched_priority = tswap32(target_schp->sched_priority);
7902 unlock_user_struct(target_schp, arg3, 0);
7903 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7905 break;
7906 case TARGET_NR_sched_getscheduler:
7907 ret = get_errno(sched_getscheduler(arg1));
7908 break;
7909 case TARGET_NR_sched_yield:
7910 ret = get_errno(sched_yield());
7911 break;
7912 case TARGET_NR_sched_get_priority_max:
7913 ret = get_errno(sched_get_priority_max(arg1));
7914 break;
7915 case TARGET_NR_sched_get_priority_min:
7916 ret = get_errno(sched_get_priority_min(arg1));
7917 break;
7918 case TARGET_NR_sched_rr_get_interval:
7920 struct timespec ts;
7921 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7922 if (!is_error(ret)) {
7923 ret = host_to_target_timespec(arg2, &ts);
7926 break;
7927 case TARGET_NR_nanosleep:
7929 struct timespec req, rem;
7930 target_to_host_timespec(&req, arg1);
7931 ret = get_errno(nanosleep(&req, &rem));
7932 if (is_error(ret) && arg2) {
7933 host_to_target_timespec(arg2, &rem);
7936 break;
7937 #ifdef TARGET_NR_query_module
7938 case TARGET_NR_query_module:
7939 goto unimplemented;
7940 #endif
7941 #ifdef TARGET_NR_nfsservctl
7942 case TARGET_NR_nfsservctl:
7943 goto unimplemented;
7944 #endif
7945 case TARGET_NR_prctl:
7946 switch (arg1) {
7947 case PR_GET_PDEATHSIG:
7949 int deathsig;
7950 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7951 if (!is_error(ret) && arg2
7952 && put_user_ual(deathsig, arg2)) {
7953 goto efault;
7955 break;
7957 #ifdef PR_GET_NAME
7958 case PR_GET_NAME:
7960 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7961 if (!name) {
7962 goto efault;
7964 ret = get_errno(prctl(arg1, (unsigned long)name,
7965 arg3, arg4, arg5));
7966 unlock_user(name, arg2, 16);
7967 break;
7969 case PR_SET_NAME:
7971 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7972 if (!name) {
7973 goto efault;
7975 ret = get_errno(prctl(arg1, (unsigned long)name,
7976 arg3, arg4, arg5));
7977 unlock_user(name, arg2, 0);
7978 break;
7980 #endif
7981 default:
7982 /* Most prctl options have no pointer arguments */
7983 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7984 break;
7986 break;
7987 #ifdef TARGET_NR_arch_prctl
7988 case TARGET_NR_arch_prctl:
7989 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7990 ret = do_arch_prctl(cpu_env, arg1, arg2);
7991 break;
7992 #else
7993 goto unimplemented;
7994 #endif
7995 #endif
7996 #ifdef TARGET_NR_pread64
7997 case TARGET_NR_pread64:
7998 if (regpairs_aligned(cpu_env)) {
7999 arg4 = arg5;
8000 arg5 = arg6;
8002 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8003 goto efault;
8004 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8005 unlock_user(p, arg2, ret);
8006 break;
8007 case TARGET_NR_pwrite64:
8008 if (regpairs_aligned(cpu_env)) {
8009 arg4 = arg5;
8010 arg5 = arg6;
8012 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8013 goto efault;
8014 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8015 unlock_user(p, arg2, 0);
8016 break;
8017 #endif
8018 case TARGET_NR_getcwd:
8019 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8020 goto efault;
8021 ret = get_errno(sys_getcwd1(p, arg2));
8022 unlock_user(p, arg1, ret);
8023 break;
8024 case TARGET_NR_capget:
8025 case TARGET_NR_capset:
8027 struct target_user_cap_header *target_header;
8028 struct target_user_cap_data *target_data = NULL;
8029 struct __user_cap_header_struct header;
8030 struct __user_cap_data_struct data[2];
8031 struct __user_cap_data_struct *dataptr = NULL;
8032 int i, target_datalen;
8033 int data_items = 1;
8035 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8036 goto efault;
8038 header.version = tswap32(target_header->version);
8039 header.pid = tswap32(target_header->pid);
8041 if (header.version != _LINUX_CAPABILITY_VERSION) {
8042 /* Version 2 and up takes pointer to two user_data structs */
8043 data_items = 2;
8046 target_datalen = sizeof(*target_data) * data_items;
8048 if (arg2) {
8049 if (num == TARGET_NR_capget) {
8050 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8051 } else {
8052 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8054 if (!target_data) {
8055 unlock_user_struct(target_header, arg1, 0);
8056 goto efault;
8059 if (num == TARGET_NR_capset) {
8060 for (i = 0; i < data_items; i++) {
8061 data[i].effective = tswap32(target_data[i].effective);
8062 data[i].permitted = tswap32(target_data[i].permitted);
8063 data[i].inheritable = tswap32(target_data[i].inheritable);
8067 dataptr = data;
8070 if (num == TARGET_NR_capget) {
8071 ret = get_errno(capget(&header, dataptr));
8072 } else {
8073 ret = get_errno(capset(&header, dataptr));
8076 /* The kernel always updates version for both capget and capset */
8077 target_header->version = tswap32(header.version);
8078 unlock_user_struct(target_header, arg1, 1);
8080 if (arg2) {
8081 if (num == TARGET_NR_capget) {
8082 for (i = 0; i < data_items; i++) {
8083 target_data[i].effective = tswap32(data[i].effective);
8084 target_data[i].permitted = tswap32(data[i].permitted);
8085 target_data[i].inheritable = tswap32(data[i].inheritable);
8087 unlock_user(target_data, arg2, target_datalen);
8088 } else {
8089 unlock_user(target_data, arg2, 0);
8092 break;
8094 case TARGET_NR_sigaltstack:
8095 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8096 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8097 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8098 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8099 break;
8100 #else
8101 goto unimplemented;
8102 #endif
8104 #ifdef CONFIG_SENDFILE
8105 case TARGET_NR_sendfile:
8107 off_t *offp = NULL;
8108 off_t off;
8109 if (arg3) {
8110 ret = get_user_sal(off, arg3);
8111 if (is_error(ret)) {
8112 break;
8114 offp = &off;
8116 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8117 if (!is_error(ret) && arg3) {
8118 abi_long ret2 = put_user_sal(off, arg3);
8119 if (is_error(ret2)) {
8120 ret = ret2;
8123 break;
8125 #ifdef TARGET_NR_sendfile64
8126 case TARGET_NR_sendfile64:
8128 off_t *offp = NULL;
8129 off_t off;
8130 if (arg3) {
8131 ret = get_user_s64(off, arg3);
8132 if (is_error(ret)) {
8133 break;
8135 offp = &off;
8137 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8138 if (!is_error(ret) && arg3) {
8139 abi_long ret2 = put_user_s64(off, arg3);
8140 if (is_error(ret2)) {
8141 ret = ret2;
8144 break;
8146 #endif
8147 #else
8148 case TARGET_NR_sendfile:
8149 #ifdef TARGET_NR_sendfile64
8150 case TARGET_NR_sendfile64:
8151 #endif
8152 goto unimplemented;
8153 #endif
8155 #ifdef TARGET_NR_getpmsg
8156 case TARGET_NR_getpmsg:
8157 goto unimplemented;
8158 #endif
8159 #ifdef TARGET_NR_putpmsg
8160 case TARGET_NR_putpmsg:
8161 goto unimplemented;
8162 #endif
8163 #ifdef TARGET_NR_vfork
8164 case TARGET_NR_vfork:
8165 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8166 0, 0, 0, 0));
8167 break;
8168 #endif
8169 #ifdef TARGET_NR_ugetrlimit
8170 case TARGET_NR_ugetrlimit:
8172 struct rlimit rlim;
8173 int resource = target_to_host_resource(arg1);
8174 ret = get_errno(getrlimit(resource, &rlim));
8175 if (!is_error(ret)) {
8176 struct target_rlimit *target_rlim;
8177 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8178 goto efault;
8179 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8180 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8181 unlock_user_struct(target_rlim, arg2, 1);
8183 break;
8185 #endif
8186 #ifdef TARGET_NR_truncate64
8187 case TARGET_NR_truncate64:
8188 if (!(p = lock_user_string(arg1)))
8189 goto efault;
8190 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8191 unlock_user(p, arg1, 0);
8192 break;
8193 #endif
8194 #ifdef TARGET_NR_ftruncate64
8195 case TARGET_NR_ftruncate64:
8196 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8197 break;
8198 #endif
8199 #ifdef TARGET_NR_stat64
8200 case TARGET_NR_stat64:
8201 if (!(p = lock_user_string(arg1)))
8202 goto efault;
8203 ret = get_errno(stat(path(p), &st));
8204 unlock_user(p, arg1, 0);
8205 if (!is_error(ret))
8206 ret = host_to_target_stat64(cpu_env, arg2, &st);
8207 break;
8208 #endif
8209 #ifdef TARGET_NR_lstat64
8210 case TARGET_NR_lstat64:
8211 if (!(p = lock_user_string(arg1)))
8212 goto efault;
8213 ret = get_errno(lstat(path(p), &st));
8214 unlock_user(p, arg1, 0);
8215 if (!is_error(ret))
8216 ret = host_to_target_stat64(cpu_env, arg2, &st);
8217 break;
8218 #endif
8219 #ifdef TARGET_NR_fstat64
8220 case TARGET_NR_fstat64:
8221 ret = get_errno(fstat(arg1, &st));
8222 if (!is_error(ret))
8223 ret = host_to_target_stat64(cpu_env, arg2, &st);
8224 break;
8225 #endif
8226 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8227 #ifdef TARGET_NR_fstatat64
8228 case TARGET_NR_fstatat64:
8229 #endif
8230 #ifdef TARGET_NR_newfstatat
8231 case TARGET_NR_newfstatat:
8232 #endif
8233 if (!(p = lock_user_string(arg2)))
8234 goto efault;
8235 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8236 if (!is_error(ret))
8237 ret = host_to_target_stat64(cpu_env, arg3, &st);
8238 break;
8239 #endif
8240 case TARGET_NR_lchown:
8241 if (!(p = lock_user_string(arg1)))
8242 goto efault;
8243 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8244 unlock_user(p, arg1, 0);
8245 break;
8246 #ifdef TARGET_NR_getuid
8247 case TARGET_NR_getuid:
8248 ret = get_errno(high2lowuid(getuid()));
8249 break;
8250 #endif
8251 #ifdef TARGET_NR_getgid
8252 case TARGET_NR_getgid:
8253 ret = get_errno(high2lowgid(getgid()));
8254 break;
8255 #endif
8256 #ifdef TARGET_NR_geteuid
8257 case TARGET_NR_geteuid:
8258 ret = get_errno(high2lowuid(geteuid()));
8259 break;
8260 #endif
8261 #ifdef TARGET_NR_getegid
8262 case TARGET_NR_getegid:
8263 ret = get_errno(high2lowgid(getegid()));
8264 break;
8265 #endif
8266 case TARGET_NR_setreuid:
8267 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8268 break;
8269 case TARGET_NR_setregid:
8270 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8271 break;
8272 case TARGET_NR_getgroups:
8274 int gidsetsize = arg1;
8275 target_id *target_grouplist;
8276 gid_t *grouplist;
8277 int i;
8279 grouplist = alloca(gidsetsize * sizeof(gid_t));
8280 ret = get_errno(getgroups(gidsetsize, grouplist));
8281 if (gidsetsize == 0)
8282 break;
8283 if (!is_error(ret)) {
8284 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8285 if (!target_grouplist)
8286 goto efault;
8287 for(i = 0;i < ret; i++)
8288 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8289 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8292 break;
8293 case TARGET_NR_setgroups:
8295 int gidsetsize = arg1;
8296 target_id *target_grouplist;
8297 gid_t *grouplist = NULL;
8298 int i;
8299 if (gidsetsize) {
8300 grouplist = alloca(gidsetsize * sizeof(gid_t));
8301 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8302 if (!target_grouplist) {
8303 ret = -TARGET_EFAULT;
8304 goto fail;
8306 for (i = 0; i < gidsetsize; i++) {
8307 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8309 unlock_user(target_grouplist, arg2, 0);
8311 ret = get_errno(setgroups(gidsetsize, grouplist));
8313 break;
8314 case TARGET_NR_fchown:
8315 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8316 break;
8317 #if defined(TARGET_NR_fchownat)
8318 case TARGET_NR_fchownat:
8319 if (!(p = lock_user_string(arg2)))
8320 goto efault;
8321 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8322 low2highgid(arg4), arg5));
8323 unlock_user(p, arg2, 0);
8324 break;
8325 #endif
8326 #ifdef TARGET_NR_setresuid
8327 case TARGET_NR_setresuid:
8328 ret = get_errno(setresuid(low2highuid(arg1),
8329 low2highuid(arg2),
8330 low2highuid(arg3)));
8331 break;
8332 #endif
8333 #ifdef TARGET_NR_getresuid
8334 case TARGET_NR_getresuid:
8336 uid_t ruid, euid, suid;
8337 ret = get_errno(getresuid(&ruid, &euid, &suid));
8338 if (!is_error(ret)) {
8339 if (put_user_id(high2lowuid(ruid), arg1)
8340 || put_user_id(high2lowuid(euid), arg2)
8341 || put_user_id(high2lowuid(suid), arg3))
8342 goto efault;
8345 break;
8346 #endif
8347 #ifdef TARGET_NR_getresgid
8348 case TARGET_NR_setresgid:
8349 ret = get_errno(setresgid(low2highgid(arg1),
8350 low2highgid(arg2),
8351 low2highgid(arg3)));
8352 break;
8353 #endif
8354 #ifdef TARGET_NR_getresgid
8355 case TARGET_NR_getresgid:
8357 gid_t rgid, egid, sgid;
8358 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8359 if (!is_error(ret)) {
8360 if (put_user_id(high2lowgid(rgid), arg1)
8361 || put_user_id(high2lowgid(egid), arg2)
8362 || put_user_id(high2lowgid(sgid), arg3))
8363 goto efault;
8366 break;
8367 #endif
8368 case TARGET_NR_chown:
8369 if (!(p = lock_user_string(arg1)))
8370 goto efault;
8371 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8372 unlock_user(p, arg1, 0);
8373 break;
8374 case TARGET_NR_setuid:
8375 ret = get_errno(setuid(low2highuid(arg1)));
8376 break;
8377 case TARGET_NR_setgid:
8378 ret = get_errno(setgid(low2highgid(arg1)));
8379 break;
8380 case TARGET_NR_setfsuid:
8381 ret = get_errno(setfsuid(arg1));
8382 break;
8383 case TARGET_NR_setfsgid:
8384 ret = get_errno(setfsgid(arg1));
8385 break;
8387 #ifdef TARGET_NR_lchown32
8388 case TARGET_NR_lchown32:
8389 if (!(p = lock_user_string(arg1)))
8390 goto efault;
8391 ret = get_errno(lchown(p, arg2, arg3));
8392 unlock_user(p, arg1, 0);
8393 break;
8394 #endif
8395 #ifdef TARGET_NR_getuid32
8396 case TARGET_NR_getuid32:
8397 ret = get_errno(getuid());
8398 break;
8399 #endif
8401 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8402 /* Alpha specific */
8403 case TARGET_NR_getxuid:
8405 uid_t euid;
8406 euid=geteuid();
8407 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8409 ret = get_errno(getuid());
8410 break;
8411 #endif
8412 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8413 /* Alpha specific */
8414 case TARGET_NR_getxgid:
8416 uid_t egid;
8417 egid=getegid();
8418 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8420 ret = get_errno(getgid());
8421 break;
8422 #endif
8423 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8424 /* Alpha specific */
8425 case TARGET_NR_osf_getsysinfo:
8426 ret = -TARGET_EOPNOTSUPP;
8427 switch (arg1) {
8428 case TARGET_GSI_IEEE_FP_CONTROL:
8430 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8432 /* Copied from linux ieee_fpcr_to_swcr. */
8433 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8434 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8435 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8436 | SWCR_TRAP_ENABLE_DZE
8437 | SWCR_TRAP_ENABLE_OVF);
8438 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8439 | SWCR_TRAP_ENABLE_INE);
8440 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8441 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8443 if (put_user_u64 (swcr, arg2))
8444 goto efault;
8445 ret = 0;
8447 break;
8449 /* case GSI_IEEE_STATE_AT_SIGNAL:
8450 -- Not implemented in linux kernel.
8451 case GSI_UACPROC:
8452 -- Retrieves current unaligned access state; not much used.
8453 case GSI_PROC_TYPE:
8454 -- Retrieves implver information; surely not used.
8455 case GSI_GET_HWRPB:
8456 -- Grabs a copy of the HWRPB; surely not used.
8459 break;
8460 #endif
8461 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8462 /* Alpha specific */
8463 case TARGET_NR_osf_setsysinfo:
8464 ret = -TARGET_EOPNOTSUPP;
8465 switch (arg1) {
8466 case TARGET_SSI_IEEE_FP_CONTROL:
8468 uint64_t swcr, fpcr, orig_fpcr;
8470 if (get_user_u64 (swcr, arg2)) {
8471 goto efault;
8473 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8474 fpcr = orig_fpcr & FPCR_DYN_MASK;
8476 /* Copied from linux ieee_swcr_to_fpcr. */
8477 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8478 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8479 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8480 | SWCR_TRAP_ENABLE_DZE
8481 | SWCR_TRAP_ENABLE_OVF)) << 48;
8482 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8483 | SWCR_TRAP_ENABLE_INE)) << 57;
8484 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8485 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8487 cpu_alpha_store_fpcr(cpu_env, fpcr);
8488 ret = 0;
8490 break;
8492 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8494 uint64_t exc, fpcr, orig_fpcr;
8495 int si_code;
8497 if (get_user_u64(exc, arg2)) {
8498 goto efault;
8501 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8503 /* We only add to the exception status here. */
8504 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8506 cpu_alpha_store_fpcr(cpu_env, fpcr);
8507 ret = 0;
8509 /* Old exceptions are not signaled. */
8510 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8512 /* If any exceptions set by this call,
8513 and are unmasked, send a signal. */
8514 si_code = 0;
8515 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8516 si_code = TARGET_FPE_FLTRES;
8518 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8519 si_code = TARGET_FPE_FLTUND;
8521 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8522 si_code = TARGET_FPE_FLTOVF;
8524 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8525 si_code = TARGET_FPE_FLTDIV;
8527 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8528 si_code = TARGET_FPE_FLTINV;
8530 if (si_code != 0) {
8531 target_siginfo_t info;
8532 info.si_signo = SIGFPE;
8533 info.si_errno = 0;
8534 info.si_code = si_code;
8535 info._sifields._sigfault._addr
8536 = ((CPUArchState *)cpu_env)->pc;
8537 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8540 break;
8542 /* case SSI_NVPAIRS:
8543 -- Used with SSIN_UACPROC to enable unaligned accesses.
8544 case SSI_IEEE_STATE_AT_SIGNAL:
8545 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8546 -- Not implemented in linux kernel
8549 break;
8550 #endif
8551 #ifdef TARGET_NR_osf_sigprocmask
8552 /* Alpha specific. */
8553 case TARGET_NR_osf_sigprocmask:
8555 abi_ulong mask;
8556 int how;
8557 sigset_t set, oldset;
8559 switch(arg1) {
8560 case TARGET_SIG_BLOCK:
8561 how = SIG_BLOCK;
8562 break;
8563 case TARGET_SIG_UNBLOCK:
8564 how = SIG_UNBLOCK;
8565 break;
8566 case TARGET_SIG_SETMASK:
8567 how = SIG_SETMASK;
8568 break;
8569 default:
8570 ret = -TARGET_EINVAL;
8571 goto fail;
8573 mask = arg2;
8574 target_to_host_old_sigset(&set, &mask);
8575 do_sigprocmask(how, &set, &oldset);
8576 host_to_target_old_sigset(&mask, &oldset);
8577 ret = mask;
8579 break;
8580 #endif
8582 #ifdef TARGET_NR_getgid32
8583 case TARGET_NR_getgid32:
8584 ret = get_errno(getgid());
8585 break;
8586 #endif
8587 #ifdef TARGET_NR_geteuid32
8588 case TARGET_NR_geteuid32:
8589 ret = get_errno(geteuid());
8590 break;
8591 #endif
8592 #ifdef TARGET_NR_getegid32
8593 case TARGET_NR_getegid32:
8594 ret = get_errno(getegid());
8595 break;
8596 #endif
8597 #ifdef TARGET_NR_setreuid32
8598 case TARGET_NR_setreuid32:
8599 ret = get_errno(setreuid(arg1, arg2));
8600 break;
8601 #endif
8602 #ifdef TARGET_NR_setregid32
8603 case TARGET_NR_setregid32:
8604 ret = get_errno(setregid(arg1, arg2));
8605 break;
8606 #endif
8607 #ifdef TARGET_NR_getgroups32
8608 case TARGET_NR_getgroups32:
8610 int gidsetsize = arg1;
8611 uint32_t *target_grouplist;
8612 gid_t *grouplist;
8613 int i;
8615 grouplist = alloca(gidsetsize * sizeof(gid_t));
8616 ret = get_errno(getgroups(gidsetsize, grouplist));
8617 if (gidsetsize == 0)
8618 break;
8619 if (!is_error(ret)) {
8620 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8621 if (!target_grouplist) {
8622 ret = -TARGET_EFAULT;
8623 goto fail;
8625 for(i = 0;i < ret; i++)
8626 target_grouplist[i] = tswap32(grouplist[i]);
8627 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8630 break;
8631 #endif
8632 #ifdef TARGET_NR_setgroups32
8633 case TARGET_NR_setgroups32:
8635 int gidsetsize = arg1;
8636 uint32_t *target_grouplist;
8637 gid_t *grouplist;
8638 int i;
8640 grouplist = alloca(gidsetsize * sizeof(gid_t));
8641 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8642 if (!target_grouplist) {
8643 ret = -TARGET_EFAULT;
8644 goto fail;
8646 for(i = 0;i < gidsetsize; i++)
8647 grouplist[i] = tswap32(target_grouplist[i]);
8648 unlock_user(target_grouplist, arg2, 0);
8649 ret = get_errno(setgroups(gidsetsize, grouplist));
8651 break;
8652 #endif
8653 #ifdef TARGET_NR_fchown32
8654 case TARGET_NR_fchown32:
8655 ret = get_errno(fchown(arg1, arg2, arg3));
8656 break;
8657 #endif
8658 #ifdef TARGET_NR_setresuid32
8659 case TARGET_NR_setresuid32:
8660 ret = get_errno(setresuid(arg1, arg2, arg3));
8661 break;
8662 #endif
8663 #ifdef TARGET_NR_getresuid32
8664 case TARGET_NR_getresuid32:
8666 uid_t ruid, euid, suid;
8667 ret = get_errno(getresuid(&ruid, &euid, &suid));
8668 if (!is_error(ret)) {
8669 if (put_user_u32(ruid, arg1)
8670 || put_user_u32(euid, arg2)
8671 || put_user_u32(suid, arg3))
8672 goto efault;
8675 break;
8676 #endif
8677 #ifdef TARGET_NR_setresgid32
8678 case TARGET_NR_setresgid32:
8679 ret = get_errno(setresgid(arg1, arg2, arg3));
8680 break;
8681 #endif
8682 #ifdef TARGET_NR_getresgid32
8683 case TARGET_NR_getresgid32:
8685 gid_t rgid, egid, sgid;
8686 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8687 if (!is_error(ret)) {
8688 if (put_user_u32(rgid, arg1)
8689 || put_user_u32(egid, arg2)
8690 || put_user_u32(sgid, arg3))
8691 goto efault;
8694 break;
8695 #endif
8696 #ifdef TARGET_NR_chown32
8697 case TARGET_NR_chown32:
8698 if (!(p = lock_user_string(arg1)))
8699 goto efault;
8700 ret = get_errno(chown(p, arg2, arg3));
8701 unlock_user(p, arg1, 0);
8702 break;
8703 #endif
8704 #ifdef TARGET_NR_setuid32
8705 case TARGET_NR_setuid32:
8706 ret = get_errno(setuid(arg1));
8707 break;
8708 #endif
8709 #ifdef TARGET_NR_setgid32
8710 case TARGET_NR_setgid32:
8711 ret = get_errno(setgid(arg1));
8712 break;
8713 #endif
8714 #ifdef TARGET_NR_setfsuid32
8715 case TARGET_NR_setfsuid32:
8716 ret = get_errno(setfsuid(arg1));
8717 break;
8718 #endif
8719 #ifdef TARGET_NR_setfsgid32
8720 case TARGET_NR_setfsgid32:
8721 ret = get_errno(setfsgid(arg1));
8722 break;
8723 #endif
8725 case TARGET_NR_pivot_root:
8726 goto unimplemented;
8727 #ifdef TARGET_NR_mincore
8728 case TARGET_NR_mincore:
8730 void *a;
8731 ret = -TARGET_EFAULT;
8732 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8733 goto efault;
8734 if (!(p = lock_user_string(arg3)))
8735 goto mincore_fail;
8736 ret = get_errno(mincore(a, arg2, p));
8737 unlock_user(p, arg3, ret);
8738 mincore_fail:
8739 unlock_user(a, arg1, 0);
8741 break;
8742 #endif
8743 #ifdef TARGET_NR_arm_fadvise64_64
8744 case TARGET_NR_arm_fadvise64_64:
8747 * arm_fadvise64_64 looks like fadvise64_64 but
8748 * with different argument order
8750 abi_long temp;
8751 temp = arg3;
8752 arg3 = arg4;
8753 arg4 = temp;
8755 #endif
8756 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8757 #ifdef TARGET_NR_fadvise64_64
8758 case TARGET_NR_fadvise64_64:
8759 #endif
8760 #ifdef TARGET_NR_fadvise64
8761 case TARGET_NR_fadvise64:
8762 #endif
8763 #ifdef TARGET_S390X
8764 switch (arg4) {
8765 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8766 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8767 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8768 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8769 default: break;
8771 #endif
8772 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8773 break;
8774 #endif
8775 #ifdef TARGET_NR_madvise
8776 case TARGET_NR_madvise:
8777 /* A straight passthrough may not be safe because qemu sometimes
8778 turns private file-backed mappings into anonymous mappings.
8779 This will break MADV_DONTNEED.
8780 This is a hint, so ignoring and returning success is ok. */
8781 ret = get_errno(0);
8782 break;
8783 #endif
8784 #if TARGET_ABI_BITS == 32
8785 case TARGET_NR_fcntl64:
8787 int cmd;
8788 struct flock64 fl;
8789 struct target_flock64 *target_fl;
8790 #ifdef TARGET_ARM
8791 struct target_eabi_flock64 *target_efl;
8792 #endif
8794 cmd = target_to_host_fcntl_cmd(arg2);
8795 if (cmd == -TARGET_EINVAL) {
8796 ret = cmd;
8797 break;
8800 switch(arg2) {
8801 case TARGET_F_GETLK64:
8802 #ifdef TARGET_ARM
8803 if (((CPUARMState *)cpu_env)->eabi) {
8804 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8805 goto efault;
8806 fl.l_type = tswap16(target_efl->l_type);
8807 fl.l_whence = tswap16(target_efl->l_whence);
8808 fl.l_start = tswap64(target_efl->l_start);
8809 fl.l_len = tswap64(target_efl->l_len);
8810 fl.l_pid = tswap32(target_efl->l_pid);
8811 unlock_user_struct(target_efl, arg3, 0);
8812 } else
8813 #endif
8815 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8816 goto efault;
8817 fl.l_type = tswap16(target_fl->l_type);
8818 fl.l_whence = tswap16(target_fl->l_whence);
8819 fl.l_start = tswap64(target_fl->l_start);
8820 fl.l_len = tswap64(target_fl->l_len);
8821 fl.l_pid = tswap32(target_fl->l_pid);
8822 unlock_user_struct(target_fl, arg3, 0);
8824 ret = get_errno(fcntl(arg1, cmd, &fl));
8825 if (ret == 0) {
8826 #ifdef TARGET_ARM
8827 if (((CPUARMState *)cpu_env)->eabi) {
8828 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8829 goto efault;
8830 target_efl->l_type = tswap16(fl.l_type);
8831 target_efl->l_whence = tswap16(fl.l_whence);
8832 target_efl->l_start = tswap64(fl.l_start);
8833 target_efl->l_len = tswap64(fl.l_len);
8834 target_efl->l_pid = tswap32(fl.l_pid);
8835 unlock_user_struct(target_efl, arg3, 1);
8836 } else
8837 #endif
8839 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8840 goto efault;
8841 target_fl->l_type = tswap16(fl.l_type);
8842 target_fl->l_whence = tswap16(fl.l_whence);
8843 target_fl->l_start = tswap64(fl.l_start);
8844 target_fl->l_len = tswap64(fl.l_len);
8845 target_fl->l_pid = tswap32(fl.l_pid);
8846 unlock_user_struct(target_fl, arg3, 1);
8849 break;
8851 case TARGET_F_SETLK64:
8852 case TARGET_F_SETLKW64:
8853 #ifdef TARGET_ARM
8854 if (((CPUARMState *)cpu_env)->eabi) {
8855 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8856 goto efault;
8857 fl.l_type = tswap16(target_efl->l_type);
8858 fl.l_whence = tswap16(target_efl->l_whence);
8859 fl.l_start = tswap64(target_efl->l_start);
8860 fl.l_len = tswap64(target_efl->l_len);
8861 fl.l_pid = tswap32(target_efl->l_pid);
8862 unlock_user_struct(target_efl, arg3, 0);
8863 } else
8864 #endif
8866 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8867 goto efault;
8868 fl.l_type = tswap16(target_fl->l_type);
8869 fl.l_whence = tswap16(target_fl->l_whence);
8870 fl.l_start = tswap64(target_fl->l_start);
8871 fl.l_len = tswap64(target_fl->l_len);
8872 fl.l_pid = tswap32(target_fl->l_pid);
8873 unlock_user_struct(target_fl, arg3, 0);
8875 ret = get_errno(fcntl(arg1, cmd, &fl));
8876 break;
8877 default:
8878 ret = do_fcntl(arg1, arg2, arg3);
8879 break;
8881 break;
8883 #endif
8884 #ifdef TARGET_NR_cacheflush
8885 case TARGET_NR_cacheflush:
8886 /* self-modifying code is handled automatically, so nothing needed */
8887 ret = 0;
8888 break;
8889 #endif
8890 #ifdef TARGET_NR_security
8891 case TARGET_NR_security:
8892 goto unimplemented;
8893 #endif
8894 #ifdef TARGET_NR_getpagesize
8895 case TARGET_NR_getpagesize:
8896 ret = TARGET_PAGE_SIZE;
8897 break;
8898 #endif
8899 case TARGET_NR_gettid:
8900 ret = get_errno(gettid());
8901 break;
8902 #ifdef TARGET_NR_readahead
8903 case TARGET_NR_readahead:
8904 #if TARGET_ABI_BITS == 32
8905 if (regpairs_aligned(cpu_env)) {
8906 arg2 = arg3;
8907 arg3 = arg4;
8908 arg4 = arg5;
8910 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8911 #else
8912 ret = get_errno(readahead(arg1, arg2, arg3));
8913 #endif
8914 break;
8915 #endif
8916 #ifdef CONFIG_ATTR
8917 #ifdef TARGET_NR_setxattr
8918 case TARGET_NR_listxattr:
8919 case TARGET_NR_llistxattr:
8921 void *p, *b = 0;
8922 if (arg2) {
8923 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8924 if (!b) {
8925 ret = -TARGET_EFAULT;
8926 break;
8929 p = lock_user_string(arg1);
8930 if (p) {
8931 if (num == TARGET_NR_listxattr) {
8932 ret = get_errno(listxattr(p, b, arg3));
8933 } else {
8934 ret = get_errno(llistxattr(p, b, arg3));
8936 } else {
8937 ret = -TARGET_EFAULT;
8939 unlock_user(p, arg1, 0);
8940 unlock_user(b, arg2, arg3);
8941 break;
8943 case TARGET_NR_flistxattr:
8945 void *b = 0;
8946 if (arg2) {
8947 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8948 if (!b) {
8949 ret = -TARGET_EFAULT;
8950 break;
8953 ret = get_errno(flistxattr(arg1, b, arg3));
8954 unlock_user(b, arg2, arg3);
8955 break;
8957 case TARGET_NR_setxattr:
8958 case TARGET_NR_lsetxattr:
8960 void *p, *n, *v = 0;
8961 if (arg3) {
8962 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8963 if (!v) {
8964 ret = -TARGET_EFAULT;
8965 break;
8968 p = lock_user_string(arg1);
8969 n = lock_user_string(arg2);
8970 if (p && n) {
8971 if (num == TARGET_NR_setxattr) {
8972 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8973 } else {
8974 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8976 } else {
8977 ret = -TARGET_EFAULT;
8979 unlock_user(p, arg1, 0);
8980 unlock_user(n, arg2, 0);
8981 unlock_user(v, arg3, 0);
8983 break;
8984 case TARGET_NR_fsetxattr:
8986 void *n, *v = 0;
8987 if (arg3) {
8988 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8989 if (!v) {
8990 ret = -TARGET_EFAULT;
8991 break;
8994 n = lock_user_string(arg2);
8995 if (n) {
8996 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8997 } else {
8998 ret = -TARGET_EFAULT;
9000 unlock_user(n, arg2, 0);
9001 unlock_user(v, arg3, 0);
9003 break;
9004 case TARGET_NR_getxattr:
9005 case TARGET_NR_lgetxattr:
9007 void *p, *n, *v = 0;
9008 if (arg3) {
9009 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9010 if (!v) {
9011 ret = -TARGET_EFAULT;
9012 break;
9015 p = lock_user_string(arg1);
9016 n = lock_user_string(arg2);
9017 if (p && n) {
9018 if (num == TARGET_NR_getxattr) {
9019 ret = get_errno(getxattr(p, n, v, arg4));
9020 } else {
9021 ret = get_errno(lgetxattr(p, n, v, arg4));
9023 } else {
9024 ret = -TARGET_EFAULT;
9026 unlock_user(p, arg1, 0);
9027 unlock_user(n, arg2, 0);
9028 unlock_user(v, arg3, arg4);
9030 break;
9031 case TARGET_NR_fgetxattr:
9033 void *n, *v = 0;
9034 if (arg3) {
9035 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9036 if (!v) {
9037 ret = -TARGET_EFAULT;
9038 break;
9041 n = lock_user_string(arg2);
9042 if (n) {
9043 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9044 } else {
9045 ret = -TARGET_EFAULT;
9047 unlock_user(n, arg2, 0);
9048 unlock_user(v, arg3, arg4);
9050 break;
9051 case TARGET_NR_removexattr:
9052 case TARGET_NR_lremovexattr:
9054 void *p, *n;
9055 p = lock_user_string(arg1);
9056 n = lock_user_string(arg2);
9057 if (p && n) {
9058 if (num == TARGET_NR_removexattr) {
9059 ret = get_errno(removexattr(p, n));
9060 } else {
9061 ret = get_errno(lremovexattr(p, n));
9063 } else {
9064 ret = -TARGET_EFAULT;
9066 unlock_user(p, arg1, 0);
9067 unlock_user(n, arg2, 0);
9069 break;
9070 case TARGET_NR_fremovexattr:
9072 void *n;
9073 n = lock_user_string(arg2);
9074 if (n) {
9075 ret = get_errno(fremovexattr(arg1, n));
9076 } else {
9077 ret = -TARGET_EFAULT;
9079 unlock_user(n, arg2, 0);
9081 break;
9082 #endif
9083 #endif /* CONFIG_ATTR */
9084 #ifdef TARGET_NR_set_thread_area
9085 case TARGET_NR_set_thread_area:
9086 #if defined(TARGET_MIPS)
9087 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9088 ret = 0;
9089 break;
9090 #elif defined(TARGET_CRIS)
9091 if (arg1 & 0xff)
9092 ret = -TARGET_EINVAL;
9093 else {
9094 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9095 ret = 0;
9097 break;
9098 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9099 ret = do_set_thread_area(cpu_env, arg1);
9100 break;
9101 #elif defined(TARGET_M68K)
9103 TaskState *ts = cpu->opaque;
9104 ts->tp_value = arg1;
9105 ret = 0;
9106 break;
9108 #else
9109 goto unimplemented_nowarn;
9110 #endif
9111 #endif
9112 #ifdef TARGET_NR_get_thread_area
9113 case TARGET_NR_get_thread_area:
9114 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9115 ret = do_get_thread_area(cpu_env, arg1);
9116 break;
9117 #elif defined(TARGET_M68K)
9119 TaskState *ts = cpu->opaque;
9120 ret = ts->tp_value;
9121 break;
9123 #else
9124 goto unimplemented_nowarn;
9125 #endif
9126 #endif
9127 #ifdef TARGET_NR_getdomainname
9128 case TARGET_NR_getdomainname:
9129 goto unimplemented_nowarn;
9130 #endif
9132 #ifdef TARGET_NR_clock_gettime
9133 case TARGET_NR_clock_gettime:
9135 struct timespec ts;
9136 ret = get_errno(clock_gettime(arg1, &ts));
9137 if (!is_error(ret)) {
9138 host_to_target_timespec(arg2, &ts);
9140 break;
9142 #endif
9143 #ifdef TARGET_NR_clock_getres
9144 case TARGET_NR_clock_getres:
9146 struct timespec ts;
9147 ret = get_errno(clock_getres(arg1, &ts));
9148 if (!is_error(ret)) {
9149 host_to_target_timespec(arg2, &ts);
9151 break;
9153 #endif
9154 #ifdef TARGET_NR_clock_nanosleep
9155 case TARGET_NR_clock_nanosleep:
9157 struct timespec ts;
9158 target_to_host_timespec(&ts, arg3);
9159 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9160 if (arg4)
9161 host_to_target_timespec(arg4, &ts);
9163 #if defined(TARGET_PPC)
9164 /* clock_nanosleep is odd in that it returns positive errno values.
9165 * On PPC, CR0 bit 3 should be set in such a situation. */
9166 if (ret) {
9167 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9169 #endif
9170 break;
9172 #endif
9174 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9175 case TARGET_NR_set_tid_address:
9176 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9177 break;
9178 #endif
9180 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9181 case TARGET_NR_tkill:
9182 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9183 break;
9184 #endif
9186 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9187 case TARGET_NR_tgkill:
9188 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9189 target_to_host_signal(arg3)));
9190 break;
9191 #endif
9193 #ifdef TARGET_NR_set_robust_list
9194 case TARGET_NR_set_robust_list:
9195 case TARGET_NR_get_robust_list:
9196 /* The ABI for supporting robust futexes has userspace pass
9197 * the kernel a pointer to a linked list which is updated by
9198 * userspace after the syscall; the list is walked by the kernel
9199 * when the thread exits. Since the linked list in QEMU guest
9200 * memory isn't a valid linked list for the host and we have
9201 * no way to reliably intercept the thread-death event, we can't
9202 * support these. Silently return ENOSYS so that guest userspace
9203 * falls back to a non-robust futex implementation (which should
9204 * be OK except in the corner case of the guest crashing while
9205 * holding a mutex that is shared with another process via
9206 * shared memory).
9208 goto unimplemented_nowarn;
9209 #endif
9211 #if defined(TARGET_NR_utimensat)
9212 case TARGET_NR_utimensat:
9214 struct timespec *tsp, ts[2];
9215 if (!arg3) {
9216 tsp = NULL;
9217 } else {
9218 target_to_host_timespec(ts, arg3);
9219 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9220 tsp = ts;
9222 if (!arg2)
9223 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9224 else {
9225 if (!(p = lock_user_string(arg2))) {
9226 ret = -TARGET_EFAULT;
9227 goto fail;
9229 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9230 unlock_user(p, arg2, 0);
9233 break;
9234 #endif
9235 case TARGET_NR_futex:
9236 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9237 break;
9238 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9239 case TARGET_NR_inotify_init:
9240 ret = get_errno(sys_inotify_init());
9241 break;
9242 #endif
9243 #ifdef CONFIG_INOTIFY1
9244 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9245 case TARGET_NR_inotify_init1:
9246 ret = get_errno(sys_inotify_init1(arg1));
9247 break;
9248 #endif
9249 #endif
9250 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9251 case TARGET_NR_inotify_add_watch:
9252 p = lock_user_string(arg2);
9253 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9254 unlock_user(p, arg2, 0);
9255 break;
9256 #endif
9257 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9258 case TARGET_NR_inotify_rm_watch:
9259 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9260 break;
9261 #endif
9263 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9264 case TARGET_NR_mq_open:
9266 struct mq_attr posix_mq_attr, *attrp;
9268 p = lock_user_string(arg1 - 1);
9269 if (arg4 != 0) {
9270 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9271 attrp = &posix_mq_attr;
9272 } else {
9273 attrp = 0;
9275 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9276 unlock_user (p, arg1, 0);
9278 break;
9280 case TARGET_NR_mq_unlink:
9281 p = lock_user_string(arg1 - 1);
9282 ret = get_errno(mq_unlink(p));
9283 unlock_user (p, arg1, 0);
9284 break;
9286 case TARGET_NR_mq_timedsend:
9288 struct timespec ts;
9290 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9291 if (arg5 != 0) {
9292 target_to_host_timespec(&ts, arg5);
9293 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9294 host_to_target_timespec(arg5, &ts);
9296 else
9297 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9298 unlock_user (p, arg2, arg3);
9300 break;
9302 case TARGET_NR_mq_timedreceive:
9304 struct timespec ts;
9305 unsigned int prio;
9307 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9308 if (arg5 != 0) {
9309 target_to_host_timespec(&ts, arg5);
9310 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9311 host_to_target_timespec(arg5, &ts);
9313 else
9314 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9315 unlock_user (p, arg2, arg3);
9316 if (arg4 != 0)
9317 put_user_u32(prio, arg4);
9319 break;
9321 /* Not implemented for now... */
9322 /* case TARGET_NR_mq_notify: */
9323 /* break; */
9325 case TARGET_NR_mq_getsetattr:
9327 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9328 ret = 0;
9329 if (arg3 != 0) {
9330 ret = mq_getattr(arg1, &posix_mq_attr_out);
9331 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9333 if (arg2 != 0) {
9334 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9335 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9339 break;
9340 #endif
9342 #ifdef CONFIG_SPLICE
9343 #ifdef TARGET_NR_tee
9344 case TARGET_NR_tee:
9346 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9348 break;
9349 #endif
9350 #ifdef TARGET_NR_splice
9351 case TARGET_NR_splice:
9353 loff_t loff_in, loff_out;
9354 loff_t *ploff_in = NULL, *ploff_out = NULL;
9355 if (arg2) {
9356 if (get_user_u64(loff_in, arg2)) {
9357 goto efault;
9359 ploff_in = &loff_in;
9361 if (arg4) {
9362 if (get_user_u64(loff_out, arg4)) {
9363 goto efault;
9365 ploff_out = &loff_out;
9367 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9368 if (arg2) {
9369 if (put_user_u64(loff_in, arg2)) {
9370 goto efault;
9373 if (arg4) {
9374 if (put_user_u64(loff_out, arg4)) {
9375 goto efault;
9379 break;
9380 #endif
9381 #ifdef TARGET_NR_vmsplice
9382 case TARGET_NR_vmsplice:
9384 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9385 if (vec != NULL) {
9386 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9387 unlock_iovec(vec, arg2, arg3, 0);
9388 } else {
9389 ret = -host_to_target_errno(errno);
9392 break;
9393 #endif
9394 #endif /* CONFIG_SPLICE */
9395 #ifdef CONFIG_EVENTFD
9396 #if defined(TARGET_NR_eventfd)
9397 case TARGET_NR_eventfd:
9398 ret = get_errno(eventfd(arg1, 0));
9399 break;
9400 #endif
9401 #if defined(TARGET_NR_eventfd2)
9402 case TARGET_NR_eventfd2:
9404 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9405 if (arg2 & TARGET_O_NONBLOCK) {
9406 host_flags |= O_NONBLOCK;
9408 if (arg2 & TARGET_O_CLOEXEC) {
9409 host_flags |= O_CLOEXEC;
9411 ret = get_errno(eventfd(arg1, host_flags));
9412 break;
9414 #endif
9415 #endif /* CONFIG_EVENTFD */
9416 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9417 case TARGET_NR_fallocate:
9418 #if TARGET_ABI_BITS == 32
9419 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9420 target_offset64(arg5, arg6)));
9421 #else
9422 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9423 #endif
9424 break;
9425 #endif
9426 #if defined(CONFIG_SYNC_FILE_RANGE)
9427 #if defined(TARGET_NR_sync_file_range)
9428 case TARGET_NR_sync_file_range:
9429 #if TARGET_ABI_BITS == 32
9430 #if defined(TARGET_MIPS)
9431 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9432 target_offset64(arg5, arg6), arg7));
9433 #else
9434 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9435 target_offset64(arg4, arg5), arg6));
9436 #endif /* !TARGET_MIPS */
9437 #else
9438 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9439 #endif
9440 break;
9441 #endif
9442 #if defined(TARGET_NR_sync_file_range2)
9443 case TARGET_NR_sync_file_range2:
9444 /* This is like sync_file_range but the arguments are reordered */
9445 #if TARGET_ABI_BITS == 32
9446 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9447 target_offset64(arg5, arg6), arg2));
9448 #else
9449 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9450 #endif
9451 break;
9452 #endif
9453 #endif
9454 #if defined(CONFIG_EPOLL)
9455 #if defined(TARGET_NR_epoll_create)
9456 case TARGET_NR_epoll_create:
9457 ret = get_errno(epoll_create(arg1));
9458 break;
9459 #endif
9460 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9461 case TARGET_NR_epoll_create1:
9462 ret = get_errno(epoll_create1(arg1));
9463 break;
9464 #endif
9465 #if defined(TARGET_NR_epoll_ctl)
9466 case TARGET_NR_epoll_ctl:
9468 struct epoll_event ep;
9469 struct epoll_event *epp = 0;
9470 if (arg4) {
9471 struct target_epoll_event *target_ep;
9472 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9473 goto efault;
9475 ep.events = tswap32(target_ep->events);
9476 /* The epoll_data_t union is just opaque data to the kernel,
9477 * so we transfer all 64 bits across and need not worry what
9478 * actual data type it is.
9480 ep.data.u64 = tswap64(target_ep->data.u64);
9481 unlock_user_struct(target_ep, arg4, 0);
9482 epp = &ep;
9484 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9485 break;
9487 #endif
9489 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9490 #define IMPLEMENT_EPOLL_PWAIT
9491 #endif
9492 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9493 #if defined(TARGET_NR_epoll_wait)
9494 case TARGET_NR_epoll_wait:
9495 #endif
9496 #if defined(IMPLEMENT_EPOLL_PWAIT)
9497 case TARGET_NR_epoll_pwait:
9498 #endif
9500 struct target_epoll_event *target_ep;
9501 struct epoll_event *ep;
9502 int epfd = arg1;
9503 int maxevents = arg3;
9504 int timeout = arg4;
9506 target_ep = lock_user(VERIFY_WRITE, arg2,
9507 maxevents * sizeof(struct target_epoll_event), 1);
9508 if (!target_ep) {
9509 goto efault;
9512 ep = alloca(maxevents * sizeof(struct epoll_event));
9514 switch (num) {
9515 #if defined(IMPLEMENT_EPOLL_PWAIT)
9516 case TARGET_NR_epoll_pwait:
9518 target_sigset_t *target_set;
9519 sigset_t _set, *set = &_set;
9521 if (arg5) {
9522 target_set = lock_user(VERIFY_READ, arg5,
9523 sizeof(target_sigset_t), 1);
9524 if (!target_set) {
9525 unlock_user(target_ep, arg2, 0);
9526 goto efault;
9528 target_to_host_sigset(set, target_set);
9529 unlock_user(target_set, arg5, 0);
9530 } else {
9531 set = NULL;
9534 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9535 break;
9537 #endif
9538 #if defined(TARGET_NR_epoll_wait)
9539 case TARGET_NR_epoll_wait:
9540 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9541 break;
9542 #endif
9543 default:
9544 ret = -TARGET_ENOSYS;
9546 if (!is_error(ret)) {
9547 int i;
9548 for (i = 0; i < ret; i++) {
9549 target_ep[i].events = tswap32(ep[i].events);
9550 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9553 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9554 break;
9556 #endif
9557 #endif
9558 #ifdef TARGET_NR_prlimit64
9559 case TARGET_NR_prlimit64:
9561 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9562 struct target_rlimit64 *target_rnew, *target_rold;
9563 struct host_rlimit64 rnew, rold, *rnewp = 0;
9564 int resource = target_to_host_resource(arg2);
9565 if (arg3) {
9566 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9567 goto efault;
9569 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9570 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9571 unlock_user_struct(target_rnew, arg3, 0);
9572 rnewp = &rnew;
9575 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
9576 if (!is_error(ret) && arg4) {
9577 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9578 goto efault;
9580 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9581 target_rold->rlim_max = tswap64(rold.rlim_max);
9582 unlock_user_struct(target_rold, arg4, 1);
9584 break;
9586 #endif
9587 #ifdef TARGET_NR_gethostname
9588 case TARGET_NR_gethostname:
9590 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9591 if (name) {
9592 ret = get_errno(gethostname(name, arg2));
9593 unlock_user(name, arg1, arg2);
9594 } else {
9595 ret = -TARGET_EFAULT;
9597 break;
9599 #endif
9600 #ifdef TARGET_NR_atomic_cmpxchg_32
9601 case TARGET_NR_atomic_cmpxchg_32:
9603 /* should use start_exclusive from main.c */
9604 abi_ulong mem_value;
9605 if (get_user_u32(mem_value, arg6)) {
9606 target_siginfo_t info;
9607 info.si_signo = SIGSEGV;
9608 info.si_errno = 0;
9609 info.si_code = TARGET_SEGV_MAPERR;
9610 info._sifields._sigfault._addr = arg6;
9611 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9612 ret = 0xdeadbeef;
9615 if (mem_value == arg2)
9616 put_user_u32(arg1, arg6);
9617 ret = mem_value;
9618 break;
9620 #endif
9621 #ifdef TARGET_NR_atomic_barrier
9622 case TARGET_NR_atomic_barrier:
9624 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9625 ret = 0;
9626 break;
9628 #endif
9630 #ifdef TARGET_NR_timer_create
9631 case TARGET_NR_timer_create:
9633 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9635 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9637 int clkid = arg1;
9638 int timer_index = next_free_host_timer();
9640 if (timer_index < 0) {
9641 ret = -TARGET_EAGAIN;
9642 } else {
9643 timer_t *phtimer = g_posix_timers + timer_index;
9645 if (arg2) {
9646 phost_sevp = &host_sevp;
9647 ret = target_to_host_sigevent(phost_sevp, arg2);
9648 if (ret != 0) {
9649 break;
9653 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9654 if (ret) {
9655 phtimer = NULL;
9656 } else {
9657 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
9658 goto efault;
9662 break;
9664 #endif
9666 #ifdef TARGET_NR_timer_settime
9667 case TARGET_NR_timer_settime:
9669 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9670 * struct itimerspec * old_value */
9671 target_timer_t timerid = get_timer_id(arg1);
9673 if (timerid < 0) {
9674 ret = timerid;
9675 } else if (arg3 == 0) {
9676 ret = -TARGET_EINVAL;
9677 } else {
9678 timer_t htimer = g_posix_timers[timerid];
9679 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9681 target_to_host_itimerspec(&hspec_new, arg3);
9682 ret = get_errno(
9683 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9684 host_to_target_itimerspec(arg2, &hspec_old);
9686 break;
9688 #endif
9690 #ifdef TARGET_NR_timer_gettime
9691 case TARGET_NR_timer_gettime:
9693 /* args: timer_t timerid, struct itimerspec *curr_value */
9694 target_timer_t timerid = get_timer_id(arg1);
9696 if (timerid < 0) {
9697 ret = timerid;
9698 } else if (!arg2) {
9699 ret = -TARGET_EFAULT;
9700 } else {
9701 timer_t htimer = g_posix_timers[timerid];
9702 struct itimerspec hspec;
9703 ret = get_errno(timer_gettime(htimer, &hspec));
9705 if (host_to_target_itimerspec(arg2, &hspec)) {
9706 ret = -TARGET_EFAULT;
9709 break;
9711 #endif
9713 #ifdef TARGET_NR_timer_getoverrun
9714 case TARGET_NR_timer_getoverrun:
9716 /* args: timer_t timerid */
9717 target_timer_t timerid = get_timer_id(arg1);
9719 if (timerid < 0) {
9720 ret = timerid;
9721 } else {
9722 timer_t htimer = g_posix_timers[timerid];
9723 ret = get_errno(timer_getoverrun(htimer));
9725 break;
9727 #endif
9729 #ifdef TARGET_NR_timer_delete
9730 case TARGET_NR_timer_delete:
9732 /* args: timer_t timerid */
9733 target_timer_t timerid = get_timer_id(arg1);
9735 if (timerid < 0) {
9736 ret = timerid;
9737 } else {
9738 timer_t htimer = g_posix_timers[timerid];
9739 ret = get_errno(timer_delete(htimer));
9740 g_posix_timers[timerid] = 0;
9742 break;
9744 #endif
9746 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
9747 case TARGET_NR_timerfd_create:
9748 ret = get_errno(timerfd_create(arg1,
9749 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
9750 break;
9751 #endif
9753 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
9754 case TARGET_NR_timerfd_gettime:
9756 struct itimerspec its_curr;
9758 ret = get_errno(timerfd_gettime(arg1, &its_curr));
9760 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
9761 goto efault;
9764 break;
9765 #endif
9767 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
9768 case TARGET_NR_timerfd_settime:
9770 struct itimerspec its_new, its_old, *p_new;
9772 if (arg3) {
9773 if (target_to_host_itimerspec(&its_new, arg3)) {
9774 goto efault;
9776 p_new = &its_new;
9777 } else {
9778 p_new = NULL;
9781 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
9783 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
9784 goto efault;
9787 break;
9788 #endif
9790 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
9791 case TARGET_NR_ioprio_get:
9792 ret = get_errno(ioprio_get(arg1, arg2));
9793 break;
9794 #endif
9796 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
9797 case TARGET_NR_ioprio_set:
9798 ret = get_errno(ioprio_set(arg1, arg2, arg3));
9799 break;
9800 #endif
9802 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
9803 case TARGET_NR_setns:
9804 ret = get_errno(setns(arg1, arg2));
9805 break;
9806 #endif
9807 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
9808 case TARGET_NR_unshare:
9809 ret = get_errno(unshare(arg1));
9810 break;
9811 #endif
9813 default:
9814 unimplemented:
9815 gemu_log("qemu: Unsupported syscall: %d\n", num);
9816 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9817 unimplemented_nowarn:
9818 #endif
9819 ret = -TARGET_ENOSYS;
9820 break;
9822 fail:
9823 #ifdef DEBUG
9824 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9825 #endif
9826 if(do_strace)
9827 print_syscall_ret(num, ret);
9828 return ret;
9829 efault:
9830 ret = -TARGET_EFAULT;
9831 goto fail;