add macro file for coccinelle
[qemu.git] / linux-user / syscall.c
blob732936f0b969b1ce05ce575082077d76b632bdab
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <linux/capability.h>
47 #include <signal.h>
48 #include <sched.h>
49 #ifdef __ia64__
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
52 #endif
53 #include <sys/socket.h>
54 #include <sys/un.h>
55 #include <sys/uio.h>
56 #include <sys/poll.h>
57 #include <sys/times.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/statfs.h>
61 #include <utime.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef TARGET_GPROF
73 #include <sys/gmon.h>
74 #endif
75 #ifdef CONFIG_EVENTFD
76 #include <sys/eventfd.h>
77 #endif
78 #ifdef CONFIG_EPOLL
79 #include <sys/epoll.h>
80 #endif
81 #ifdef CONFIG_ATTR
82 #include "qemu/xattr.h"
83 #endif
84 #ifdef CONFIG_SENDFILE
85 #include <sys/sendfile.h>
86 #endif
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #include <linux/vt.h>
108 #include <linux/dm-ioctl.h>
109 #include <linux/reboot.h>
110 #include <linux/route.h>
111 #include <linux/filter.h>
112 #include <linux/blkpg.h>
113 #include "linux_loop.h"
114 #include "uname.h"
116 #include "qemu.h"
118 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
119 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
121 //#define DEBUG
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_futex __NR_futex
193 #define __NR_sys_inotify_init __NR_inotify_init
194 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
195 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
197 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
198 defined(__s390x__)
199 #define __NR__llseek __NR_lseek
200 #endif
202 /* Newer kernel ports have llseek() instead of _llseek() */
203 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
204 #define TARGET_NR__llseek TARGET_NR_llseek
205 #endif
207 #ifdef __NR_gettid
208 _syscall0(int, gettid)
209 #else
210 /* This is a replacement for the host gettid() and must return a host
211 errno. */
212 static int gettid(void) {
213 return -ENOSYS;
215 #endif
216 #ifdef __NR_getdents
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #endif
219 #if !defined(__NR_getdents) || \
220 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
221 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
222 #endif
223 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
224 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
225 loff_t *, res, uint, wh);
226 #endif
227 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
228 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
229 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
230 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
231 #endif
232 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
233 _syscall2(int,sys_tkill,int,tid,int,sig)
234 #endif
235 #ifdef __NR_exit_group
236 _syscall1(int,exit_group,int,error_code)
237 #endif
238 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
239 _syscall1(int,set_tid_address,int *,tidptr)
240 #endif
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
253 _syscall2(int, capget, struct __user_cap_header_struct *, header,
254 struct __user_cap_data_struct *, data);
255 _syscall2(int, capset, struct __user_cap_header_struct *, header,
256 struct __user_cap_data_struct *, data);
257 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
258 _syscall2(int, ioprio_get, int, which, int, who)
259 #endif
260 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
261 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
262 #endif
264 static bitmask_transtbl fcntl_flags_tbl[] = {
265 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
266 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
267 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
268 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
269 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
270 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
271 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
272 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
273 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
274 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
275 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
276 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
277 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
278 #if defined(O_DIRECT)
279 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
280 #endif
281 #if defined(O_NOATIME)
282 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
283 #endif
284 #if defined(O_CLOEXEC)
285 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
286 #endif
287 #if defined(O_PATH)
288 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
289 #endif
290 /* Don't terminate the list prematurely on 64-bit host+guest. */
291 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
292 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
293 #endif
294 { 0, 0, 0, 0 }
297 static int sys_getcwd1(char *buf, size_t size)
299 if (getcwd(buf, size) == NULL) {
300 /* getcwd() sets errno */
301 return (-1);
303 return strlen(buf)+1;
306 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
309 * open(2) has extra parameter 'mode' when called with
310 * flag O_CREAT.
312 if ((flags & O_CREAT) != 0) {
313 return (openat(dirfd, pathname, flags, mode));
315 return (openat(dirfd, pathname, flags));
318 #ifdef TARGET_NR_utimensat
319 #ifdef CONFIG_UTIMENSAT
320 static int sys_utimensat(int dirfd, const char *pathname,
321 const struct timespec times[2], int flags)
323 if (pathname == NULL)
324 return futimens(dirfd, times);
325 else
326 return utimensat(dirfd, pathname, times, flags);
328 #elif defined(__NR_utimensat)
329 #define __NR_sys_utimensat __NR_utimensat
330 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
331 const struct timespec *,tsp,int,flags)
332 #else
333 static int sys_utimensat(int dirfd, const char *pathname,
334 const struct timespec times[2], int flags)
336 errno = ENOSYS;
337 return -1;
339 #endif
340 #endif /* TARGET_NR_utimensat */
342 #ifdef CONFIG_INOTIFY
343 #include <sys/inotify.h>
345 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
346 static int sys_inotify_init(void)
348 return (inotify_init());
350 #endif
351 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
352 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
354 return (inotify_add_watch(fd, pathname, mask));
356 #endif
357 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
358 static int sys_inotify_rm_watch(int fd, int32_t wd)
360 return (inotify_rm_watch(fd, wd));
362 #endif
363 #ifdef CONFIG_INOTIFY1
364 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
365 static int sys_inotify_init1(int flags)
367 return (inotify_init1(flags));
369 #endif
370 #endif
371 #else
372 /* Userspace can usually survive runtime without inotify */
373 #undef TARGET_NR_inotify_init
374 #undef TARGET_NR_inotify_init1
375 #undef TARGET_NR_inotify_add_watch
376 #undef TARGET_NR_inotify_rm_watch
377 #endif /* CONFIG_INOTIFY */
379 #if defined(TARGET_NR_ppoll)
380 #ifndef __NR_ppoll
381 # define __NR_ppoll -1
382 #endif
383 #define __NR_sys_ppoll __NR_ppoll
384 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
385 struct timespec *, timeout, const sigset_t *, sigmask,
386 size_t, sigsetsize)
387 #endif
389 #if defined(TARGET_NR_pselect6)
390 #ifndef __NR_pselect6
391 # define __NR_pselect6 -1
392 #endif
393 #define __NR_sys_pselect6 __NR_pselect6
394 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
395 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
396 #endif
398 #if defined(TARGET_NR_prlimit64)
399 #ifndef __NR_prlimit64
400 # define __NR_prlimit64 -1
401 #endif
402 #define __NR_sys_prlimit64 __NR_prlimit64
403 /* The glibc rlimit structure may not be that used by the underlying syscall */
404 struct host_rlimit64 {
405 uint64_t rlim_cur;
406 uint64_t rlim_max;
408 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
409 const struct host_rlimit64 *, new_limit,
410 struct host_rlimit64 *, old_limit)
411 #endif
414 #if defined(TARGET_NR_timer_create)
415 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
416 static timer_t g_posix_timers[32] = { 0, } ;
418 static inline int next_free_host_timer(void)
420 int k ;
421 /* FIXME: Does finding the next free slot require a lock? */
422 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
423 if (g_posix_timers[k] == 0) {
424 g_posix_timers[k] = (timer_t) 1;
425 return k;
428 return -1;
430 #endif
432 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
433 #ifdef TARGET_ARM
434 static inline int regpairs_aligned(void *cpu_env) {
435 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
437 #elif defined(TARGET_MIPS)
438 static inline int regpairs_aligned(void *cpu_env) { return 1; }
439 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
440 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
441 * of registers which translates to the same as ARM/MIPS, because we start with
442 * r3 as arg1 */
443 static inline int regpairs_aligned(void *cpu_env) { return 1; }
444 #else
445 static inline int regpairs_aligned(void *cpu_env) { return 0; }
446 #endif
448 #define ERRNO_TABLE_SIZE 1200
450 /* target_to_host_errno_table[] is initialized from
451 * host_to_target_errno_table[] in syscall_init(). */
452 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
456 * This list is the union of errno values overridden in asm-<arch>/errno.h
457 * minus the errnos that are not actually generic to all archs.
459 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
460 [EIDRM] = TARGET_EIDRM,
461 [ECHRNG] = TARGET_ECHRNG,
462 [EL2NSYNC] = TARGET_EL2NSYNC,
463 [EL3HLT] = TARGET_EL3HLT,
464 [EL3RST] = TARGET_EL3RST,
465 [ELNRNG] = TARGET_ELNRNG,
466 [EUNATCH] = TARGET_EUNATCH,
467 [ENOCSI] = TARGET_ENOCSI,
468 [EL2HLT] = TARGET_EL2HLT,
469 [EDEADLK] = TARGET_EDEADLK,
470 [ENOLCK] = TARGET_ENOLCK,
471 [EBADE] = TARGET_EBADE,
472 [EBADR] = TARGET_EBADR,
473 [EXFULL] = TARGET_EXFULL,
474 [ENOANO] = TARGET_ENOANO,
475 [EBADRQC] = TARGET_EBADRQC,
476 [EBADSLT] = TARGET_EBADSLT,
477 [EBFONT] = TARGET_EBFONT,
478 [ENOSTR] = TARGET_ENOSTR,
479 [ENODATA] = TARGET_ENODATA,
480 [ETIME] = TARGET_ETIME,
481 [ENOSR] = TARGET_ENOSR,
482 [ENONET] = TARGET_ENONET,
483 [ENOPKG] = TARGET_ENOPKG,
484 [EREMOTE] = TARGET_EREMOTE,
485 [ENOLINK] = TARGET_ENOLINK,
486 [EADV] = TARGET_EADV,
487 [ESRMNT] = TARGET_ESRMNT,
488 [ECOMM] = TARGET_ECOMM,
489 [EPROTO] = TARGET_EPROTO,
490 [EDOTDOT] = TARGET_EDOTDOT,
491 [EMULTIHOP] = TARGET_EMULTIHOP,
492 [EBADMSG] = TARGET_EBADMSG,
493 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
494 [EOVERFLOW] = TARGET_EOVERFLOW,
495 [ENOTUNIQ] = TARGET_ENOTUNIQ,
496 [EBADFD] = TARGET_EBADFD,
497 [EREMCHG] = TARGET_EREMCHG,
498 [ELIBACC] = TARGET_ELIBACC,
499 [ELIBBAD] = TARGET_ELIBBAD,
500 [ELIBSCN] = TARGET_ELIBSCN,
501 [ELIBMAX] = TARGET_ELIBMAX,
502 [ELIBEXEC] = TARGET_ELIBEXEC,
503 [EILSEQ] = TARGET_EILSEQ,
504 [ENOSYS] = TARGET_ENOSYS,
505 [ELOOP] = TARGET_ELOOP,
506 [ERESTART] = TARGET_ERESTART,
507 [ESTRPIPE] = TARGET_ESTRPIPE,
508 [ENOTEMPTY] = TARGET_ENOTEMPTY,
509 [EUSERS] = TARGET_EUSERS,
510 [ENOTSOCK] = TARGET_ENOTSOCK,
511 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
512 [EMSGSIZE] = TARGET_EMSGSIZE,
513 [EPROTOTYPE] = TARGET_EPROTOTYPE,
514 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
515 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
516 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
517 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
518 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
519 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
520 [EADDRINUSE] = TARGET_EADDRINUSE,
521 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
522 [ENETDOWN] = TARGET_ENETDOWN,
523 [ENETUNREACH] = TARGET_ENETUNREACH,
524 [ENETRESET] = TARGET_ENETRESET,
525 [ECONNABORTED] = TARGET_ECONNABORTED,
526 [ECONNRESET] = TARGET_ECONNRESET,
527 [ENOBUFS] = TARGET_ENOBUFS,
528 [EISCONN] = TARGET_EISCONN,
529 [ENOTCONN] = TARGET_ENOTCONN,
530 [EUCLEAN] = TARGET_EUCLEAN,
531 [ENOTNAM] = TARGET_ENOTNAM,
532 [ENAVAIL] = TARGET_ENAVAIL,
533 [EISNAM] = TARGET_EISNAM,
534 [EREMOTEIO] = TARGET_EREMOTEIO,
535 [ESHUTDOWN] = TARGET_ESHUTDOWN,
536 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
537 [ETIMEDOUT] = TARGET_ETIMEDOUT,
538 [ECONNREFUSED] = TARGET_ECONNREFUSED,
539 [EHOSTDOWN] = TARGET_EHOSTDOWN,
540 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
541 [EALREADY] = TARGET_EALREADY,
542 [EINPROGRESS] = TARGET_EINPROGRESS,
543 [ESTALE] = TARGET_ESTALE,
544 [ECANCELED] = TARGET_ECANCELED,
545 [ENOMEDIUM] = TARGET_ENOMEDIUM,
546 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
547 #ifdef ENOKEY
548 [ENOKEY] = TARGET_ENOKEY,
549 #endif
550 #ifdef EKEYEXPIRED
551 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
552 #endif
553 #ifdef EKEYREVOKED
554 [EKEYREVOKED] = TARGET_EKEYREVOKED,
555 #endif
556 #ifdef EKEYREJECTED
557 [EKEYREJECTED] = TARGET_EKEYREJECTED,
558 #endif
559 #ifdef EOWNERDEAD
560 [EOWNERDEAD] = TARGET_EOWNERDEAD,
561 #endif
562 #ifdef ENOTRECOVERABLE
563 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
564 #endif
567 static inline int host_to_target_errno(int err)
569 if(host_to_target_errno_table[err])
570 return host_to_target_errno_table[err];
571 return err;
574 static inline int target_to_host_errno(int err)
576 if (target_to_host_errno_table[err])
577 return target_to_host_errno_table[err];
578 return err;
581 static inline abi_long get_errno(abi_long ret)
583 if (ret == -1)
584 return -host_to_target_errno(errno);
585 else
586 return ret;
589 static inline int is_error(abi_long ret)
591 return (abi_ulong)ret >= (abi_ulong)(-4096);
594 char *target_strerror(int err)
596 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
597 return NULL;
599 return strerror(target_to_host_errno(err));
602 static inline int host_to_target_sock_type(int host_type)
604 int target_type;
606 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
607 case SOCK_DGRAM:
608 target_type = TARGET_SOCK_DGRAM;
609 break;
610 case SOCK_STREAM:
611 target_type = TARGET_SOCK_STREAM;
612 break;
613 default:
614 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
615 break;
618 #if defined(SOCK_CLOEXEC)
619 if (host_type & SOCK_CLOEXEC) {
620 target_type |= TARGET_SOCK_CLOEXEC;
622 #endif
624 #if defined(SOCK_NONBLOCK)
625 if (host_type & SOCK_NONBLOCK) {
626 target_type |= TARGET_SOCK_NONBLOCK;
628 #endif
630 return target_type;
633 static abi_ulong target_brk;
634 static abi_ulong target_original_brk;
635 static abi_ulong brk_page;
637 void target_set_brk(abi_ulong new_brk)
639 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
640 brk_page = HOST_PAGE_ALIGN(target_brk);
643 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
644 #define DEBUGF_BRK(message, args...)
646 /* do_brk() must return target values and target errnos. */
647 abi_long do_brk(abi_ulong new_brk)
649 abi_long mapped_addr;
650 int new_alloc_size;
652 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
654 if (!new_brk) {
655 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
656 return target_brk;
658 if (new_brk < target_original_brk) {
659 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
660 target_brk);
661 return target_brk;
664 /* If the new brk is less than the highest page reserved to the
665 * target heap allocation, set it and we're almost done... */
666 if (new_brk <= brk_page) {
667 /* Heap contents are initialized to zero, as for anonymous
668 * mapped pages. */
669 if (new_brk > target_brk) {
670 memset(g2h(target_brk), 0, new_brk - target_brk);
672 target_brk = new_brk;
673 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
674 return target_brk;
677 /* We need to allocate more memory after the brk... Note that
678 * we don't use MAP_FIXED because that will map over the top of
679 * any existing mapping (like the one with the host libc or qemu
680 * itself); instead we treat "mapped but at wrong address" as
681 * a failure and unmap again.
683 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
684 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
685 PROT_READ|PROT_WRITE,
686 MAP_ANON|MAP_PRIVATE, 0, 0));
688 if (mapped_addr == brk_page) {
689 /* Heap contents are initialized to zero, as for anonymous
690 * mapped pages. Technically the new pages are already
691 * initialized to zero since they *are* anonymous mapped
692 * pages, however we have to take care with the contents that
693 * come from the remaining part of the previous page: it may
694 * contains garbage data due to a previous heap usage (grown
695 * then shrunken). */
696 memset(g2h(target_brk), 0, brk_page - target_brk);
698 target_brk = new_brk;
699 brk_page = HOST_PAGE_ALIGN(target_brk);
700 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
701 target_brk);
702 return target_brk;
703 } else if (mapped_addr != -1) {
704 /* Mapped but at wrong address, meaning there wasn't actually
705 * enough space for this brk.
707 target_munmap(mapped_addr, new_alloc_size);
708 mapped_addr = -1;
709 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
711 else {
712 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
715 #if defined(TARGET_ALPHA)
716 /* We (partially) emulate OSF/1 on Alpha, which requires we
717 return a proper errno, not an unchanged brk value. */
718 return -TARGET_ENOMEM;
719 #endif
720 /* For everything else, return the previous break. */
721 return target_brk;
724 static inline abi_long copy_from_user_fdset(fd_set *fds,
725 abi_ulong target_fds_addr,
726 int n)
728 int i, nw, j, k;
729 abi_ulong b, *target_fds;
731 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
732 if (!(target_fds = lock_user(VERIFY_READ,
733 target_fds_addr,
734 sizeof(abi_ulong) * nw,
735 1)))
736 return -TARGET_EFAULT;
738 FD_ZERO(fds);
739 k = 0;
740 for (i = 0; i < nw; i++) {
741 /* grab the abi_ulong */
742 __get_user(b, &target_fds[i]);
743 for (j = 0; j < TARGET_ABI_BITS; j++) {
744 /* check the bit inside the abi_ulong */
745 if ((b >> j) & 1)
746 FD_SET(k, fds);
747 k++;
751 unlock_user(target_fds, target_fds_addr, 0);
753 return 0;
756 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
757 abi_ulong target_fds_addr,
758 int n)
760 if (target_fds_addr) {
761 if (copy_from_user_fdset(fds, target_fds_addr, n))
762 return -TARGET_EFAULT;
763 *fds_ptr = fds;
764 } else {
765 *fds_ptr = NULL;
767 return 0;
770 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
771 const fd_set *fds,
772 int n)
774 int i, nw, j, k;
775 abi_long v;
776 abi_ulong *target_fds;
778 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
779 if (!(target_fds = lock_user(VERIFY_WRITE,
780 target_fds_addr,
781 sizeof(abi_ulong) * nw,
782 0)))
783 return -TARGET_EFAULT;
785 k = 0;
786 for (i = 0; i < nw; i++) {
787 v = 0;
788 for (j = 0; j < TARGET_ABI_BITS; j++) {
789 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
790 k++;
792 __put_user(v, &target_fds[i]);
795 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
797 return 0;
800 #if defined(__alpha__)
801 #define HOST_HZ 1024
802 #else
803 #define HOST_HZ 100
804 #endif
806 static inline abi_long host_to_target_clock_t(long ticks)
808 #if HOST_HZ == TARGET_HZ
809 return ticks;
810 #else
811 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
812 #endif
815 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
816 const struct rusage *rusage)
818 struct target_rusage *target_rusage;
820 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
821 return -TARGET_EFAULT;
822 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
823 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
824 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
825 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
826 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
827 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
828 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
829 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
830 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
831 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
832 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
833 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
834 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
835 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
836 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
837 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
838 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
839 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
840 unlock_user_struct(target_rusage, target_addr, 1);
842 return 0;
845 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
847 abi_ulong target_rlim_swap;
848 rlim_t result;
850 target_rlim_swap = tswapal(target_rlim);
851 if (target_rlim_swap == TARGET_RLIM_INFINITY)
852 return RLIM_INFINITY;
854 result = target_rlim_swap;
855 if (target_rlim_swap != (rlim_t)result)
856 return RLIM_INFINITY;
858 return result;
861 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
863 abi_ulong target_rlim_swap;
864 abi_ulong result;
866 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
867 target_rlim_swap = TARGET_RLIM_INFINITY;
868 else
869 target_rlim_swap = rlim;
870 result = tswapal(target_rlim_swap);
872 return result;
875 static inline int target_to_host_resource(int code)
877 switch (code) {
878 case TARGET_RLIMIT_AS:
879 return RLIMIT_AS;
880 case TARGET_RLIMIT_CORE:
881 return RLIMIT_CORE;
882 case TARGET_RLIMIT_CPU:
883 return RLIMIT_CPU;
884 case TARGET_RLIMIT_DATA:
885 return RLIMIT_DATA;
886 case TARGET_RLIMIT_FSIZE:
887 return RLIMIT_FSIZE;
888 case TARGET_RLIMIT_LOCKS:
889 return RLIMIT_LOCKS;
890 case TARGET_RLIMIT_MEMLOCK:
891 return RLIMIT_MEMLOCK;
892 case TARGET_RLIMIT_MSGQUEUE:
893 return RLIMIT_MSGQUEUE;
894 case TARGET_RLIMIT_NICE:
895 return RLIMIT_NICE;
896 case TARGET_RLIMIT_NOFILE:
897 return RLIMIT_NOFILE;
898 case TARGET_RLIMIT_NPROC:
899 return RLIMIT_NPROC;
900 case TARGET_RLIMIT_RSS:
901 return RLIMIT_RSS;
902 case TARGET_RLIMIT_RTPRIO:
903 return RLIMIT_RTPRIO;
904 case TARGET_RLIMIT_SIGPENDING:
905 return RLIMIT_SIGPENDING;
906 case TARGET_RLIMIT_STACK:
907 return RLIMIT_STACK;
908 default:
909 return code;
913 static inline abi_long copy_from_user_timeval(struct timeval *tv,
914 abi_ulong target_tv_addr)
916 struct target_timeval *target_tv;
918 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
919 return -TARGET_EFAULT;
921 __get_user(tv->tv_sec, &target_tv->tv_sec);
922 __get_user(tv->tv_usec, &target_tv->tv_usec);
924 unlock_user_struct(target_tv, target_tv_addr, 0);
926 return 0;
929 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
930 const struct timeval *tv)
932 struct target_timeval *target_tv;
934 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
935 return -TARGET_EFAULT;
937 __put_user(tv->tv_sec, &target_tv->tv_sec);
938 __put_user(tv->tv_usec, &target_tv->tv_usec);
940 unlock_user_struct(target_tv, target_tv_addr, 1);
942 return 0;
945 static inline abi_long copy_from_user_timezone(struct timezone *tz,
946 abi_ulong target_tz_addr)
948 struct target_timezone *target_tz;
950 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
951 return -TARGET_EFAULT;
954 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
955 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
957 unlock_user_struct(target_tz, target_tz_addr, 0);
959 return 0;
962 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
963 #include <mqueue.h>
965 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
966 abi_ulong target_mq_attr_addr)
968 struct target_mq_attr *target_mq_attr;
970 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
971 target_mq_attr_addr, 1))
972 return -TARGET_EFAULT;
974 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
975 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
976 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
977 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
979 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
981 return 0;
984 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
985 const struct mq_attr *attr)
987 struct target_mq_attr *target_mq_attr;
989 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
990 target_mq_attr_addr, 0))
991 return -TARGET_EFAULT;
993 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
994 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
995 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
996 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
998 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1000 return 0;
1002 #endif
1004 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1005 /* do_select() must return target values and target errnos. */
1006 static abi_long do_select(int n,
1007 abi_ulong rfd_addr, abi_ulong wfd_addr,
1008 abi_ulong efd_addr, abi_ulong target_tv_addr)
1010 fd_set rfds, wfds, efds;
1011 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1012 struct timeval tv, *tv_ptr;
1013 abi_long ret;
1015 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1016 if (ret) {
1017 return ret;
1019 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1020 if (ret) {
1021 return ret;
1023 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1024 if (ret) {
1025 return ret;
1028 if (target_tv_addr) {
1029 if (copy_from_user_timeval(&tv, target_tv_addr))
1030 return -TARGET_EFAULT;
1031 tv_ptr = &tv;
1032 } else {
1033 tv_ptr = NULL;
1036 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1038 if (!is_error(ret)) {
1039 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1040 return -TARGET_EFAULT;
1041 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1042 return -TARGET_EFAULT;
1043 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1044 return -TARGET_EFAULT;
1046 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1047 return -TARGET_EFAULT;
1050 return ret;
1052 #endif
1054 static abi_long do_pipe2(int host_pipe[], int flags)
1056 #ifdef CONFIG_PIPE2
1057 return pipe2(host_pipe, flags);
1058 #else
1059 return -ENOSYS;
1060 #endif
1063 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1064 int flags, int is_pipe2)
1066 int host_pipe[2];
1067 abi_long ret;
1068 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1070 if (is_error(ret))
1071 return get_errno(ret);
1073 /* Several targets have special calling conventions for the original
1074 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1075 if (!is_pipe2) {
1076 #if defined(TARGET_ALPHA)
1077 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1078 return host_pipe[0];
1079 #elif defined(TARGET_MIPS)
1080 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1081 return host_pipe[0];
1082 #elif defined(TARGET_SH4)
1083 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1084 return host_pipe[0];
1085 #elif defined(TARGET_SPARC)
1086 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1087 return host_pipe[0];
1088 #endif
1091 if (put_user_s32(host_pipe[0], pipedes)
1092 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1093 return -TARGET_EFAULT;
1094 return get_errno(ret);
1097 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1098 abi_ulong target_addr,
1099 socklen_t len)
1101 struct target_ip_mreqn *target_smreqn;
1103 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1104 if (!target_smreqn)
1105 return -TARGET_EFAULT;
1106 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1107 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1108 if (len == sizeof(struct target_ip_mreqn))
1109 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1110 unlock_user(target_smreqn, target_addr, 0);
1112 return 0;
1115 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1116 abi_ulong target_addr,
1117 socklen_t len)
1119 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1120 sa_family_t sa_family;
1121 struct target_sockaddr *target_saddr;
1123 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1124 if (!target_saddr)
1125 return -TARGET_EFAULT;
1127 sa_family = tswap16(target_saddr->sa_family);
1129 /* Oops. The caller might send a incomplete sun_path; sun_path
1130 * must be terminated by \0 (see the manual page), but
1131 * unfortunately it is quite common to specify sockaddr_un
1132 * length as "strlen(x->sun_path)" while it should be
1133 * "strlen(...) + 1". We'll fix that here if needed.
1134 * Linux kernel has a similar feature.
1137 if (sa_family == AF_UNIX) {
1138 if (len < unix_maxlen && len > 0) {
1139 char *cp = (char*)target_saddr;
1141 if ( cp[len-1] && !cp[len] )
1142 len++;
1144 if (len > unix_maxlen)
1145 len = unix_maxlen;
1148 memcpy(addr, target_saddr, len);
1149 addr->sa_family = sa_family;
1150 if (sa_family == AF_PACKET) {
1151 struct target_sockaddr_ll *lladdr;
1153 lladdr = (struct target_sockaddr_ll *)addr;
1154 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1155 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1157 unlock_user(target_saddr, target_addr, 0);
1159 return 0;
1162 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1163 struct sockaddr *addr,
1164 socklen_t len)
1166 struct target_sockaddr *target_saddr;
1168 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1169 if (!target_saddr)
1170 return -TARGET_EFAULT;
1171 memcpy(target_saddr, addr, len);
1172 target_saddr->sa_family = tswap16(addr->sa_family);
1173 unlock_user(target_saddr, target_addr, len);
1175 return 0;
1178 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1179 struct target_msghdr *target_msgh)
1181 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1182 abi_long msg_controllen;
1183 abi_ulong target_cmsg_addr;
1184 struct target_cmsghdr *target_cmsg;
1185 socklen_t space = 0;
1187 msg_controllen = tswapal(target_msgh->msg_controllen);
1188 if (msg_controllen < sizeof (struct target_cmsghdr))
1189 goto the_end;
1190 target_cmsg_addr = tswapal(target_msgh->msg_control);
1191 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1192 if (!target_cmsg)
1193 return -TARGET_EFAULT;
1195 while (cmsg && target_cmsg) {
1196 void *data = CMSG_DATA(cmsg);
1197 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1199 int len = tswapal(target_cmsg->cmsg_len)
1200 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1202 space += CMSG_SPACE(len);
1203 if (space > msgh->msg_controllen) {
1204 space -= CMSG_SPACE(len);
1205 /* This is a QEMU bug, since we allocated the payload
1206 * area ourselves (unlike overflow in host-to-target
1207 * conversion, which is just the guest giving us a buffer
1208 * that's too small). It can't happen for the payload types
1209 * we currently support; if it becomes an issue in future
1210 * we would need to improve our allocation strategy to
1211 * something more intelligent than "twice the size of the
1212 * target buffer we're reading from".
1214 gemu_log("Host cmsg overflow\n");
1215 break;
1218 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1219 cmsg->cmsg_level = SOL_SOCKET;
1220 } else {
1221 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1223 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1224 cmsg->cmsg_len = CMSG_LEN(len);
1226 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1227 int *fd = (int *)data;
1228 int *target_fd = (int *)target_data;
1229 int i, numfds = len / sizeof(int);
1231 for (i = 0; i < numfds; i++) {
1232 __get_user(fd[i], target_fd + i);
1234 } else if (cmsg->cmsg_level == SOL_SOCKET
1235 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1236 struct ucred *cred = (struct ucred *)data;
1237 struct target_ucred *target_cred =
1238 (struct target_ucred *)target_data;
1240 __get_user(cred->pid, &target_cred->pid);
1241 __get_user(cred->uid, &target_cred->uid);
1242 __get_user(cred->gid, &target_cred->gid);
1243 } else {
1244 gemu_log("Unsupported ancillary data: %d/%d\n",
1245 cmsg->cmsg_level, cmsg->cmsg_type);
1246 memcpy(data, target_data, len);
1249 cmsg = CMSG_NXTHDR(msgh, cmsg);
1250 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1252 unlock_user(target_cmsg, target_cmsg_addr, 0);
1253 the_end:
1254 msgh->msg_controllen = space;
1255 return 0;
1258 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1259 struct msghdr *msgh)
1261 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1262 abi_long msg_controllen;
1263 abi_ulong target_cmsg_addr;
1264 struct target_cmsghdr *target_cmsg;
1265 socklen_t space = 0;
1267 msg_controllen = tswapal(target_msgh->msg_controllen);
1268 if (msg_controllen < sizeof (struct target_cmsghdr))
1269 goto the_end;
1270 target_cmsg_addr = tswapal(target_msgh->msg_control);
1271 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1272 if (!target_cmsg)
1273 return -TARGET_EFAULT;
1275 while (cmsg && target_cmsg) {
1276 void *data = CMSG_DATA(cmsg);
1277 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1279 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1280 int tgt_len, tgt_space;
1282 /* We never copy a half-header but may copy half-data;
1283 * this is Linux's behaviour in put_cmsg(). Note that
1284 * truncation here is a guest problem (which we report
1285 * to the guest via the CTRUNC bit), unlike truncation
1286 * in target_to_host_cmsg, which is a QEMU bug.
1288 if (msg_controllen < sizeof(struct cmsghdr)) {
1289 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1290 break;
1293 if (cmsg->cmsg_level == SOL_SOCKET) {
1294 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1295 } else {
1296 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1298 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1300 tgt_len = TARGET_CMSG_LEN(len);
1302 /* Payload types which need a different size of payload on
1303 * the target must adjust tgt_len here.
1305 switch (cmsg->cmsg_level) {
1306 case SOL_SOCKET:
1307 switch (cmsg->cmsg_type) {
1308 case SO_TIMESTAMP:
1309 tgt_len = sizeof(struct target_timeval);
1310 break;
1311 default:
1312 break;
1314 default:
1315 break;
1318 if (msg_controllen < tgt_len) {
1319 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1320 tgt_len = msg_controllen;
1323 /* We must now copy-and-convert len bytes of payload
1324 * into tgt_len bytes of destination space. Bear in mind
1325 * that in both source and destination we may be dealing
1326 * with a truncated value!
1328 switch (cmsg->cmsg_level) {
1329 case SOL_SOCKET:
1330 switch (cmsg->cmsg_type) {
1331 case SCM_RIGHTS:
1333 int *fd = (int *)data;
1334 int *target_fd = (int *)target_data;
1335 int i, numfds = tgt_len / sizeof(int);
1337 for (i = 0; i < numfds; i++) {
1338 __put_user(fd[i], target_fd + i);
1340 break;
1342 case SO_TIMESTAMP:
1344 struct timeval *tv = (struct timeval *)data;
1345 struct target_timeval *target_tv =
1346 (struct target_timeval *)target_data;
1348 if (len != sizeof(struct timeval) ||
1349 tgt_len != sizeof(struct target_timeval)) {
1350 goto unimplemented;
1353 /* copy struct timeval to target */
1354 __put_user(tv->tv_sec, &target_tv->tv_sec);
1355 __put_user(tv->tv_usec, &target_tv->tv_usec);
1356 break;
1358 case SCM_CREDENTIALS:
1360 struct ucred *cred = (struct ucred *)data;
1361 struct target_ucred *target_cred =
1362 (struct target_ucred *)target_data;
1364 __put_user(cred->pid, &target_cred->pid);
1365 __put_user(cred->uid, &target_cred->uid);
1366 __put_user(cred->gid, &target_cred->gid);
1367 break;
1369 default:
1370 goto unimplemented;
1372 break;
1374 default:
1375 unimplemented:
1376 gemu_log("Unsupported ancillary data: %d/%d\n",
1377 cmsg->cmsg_level, cmsg->cmsg_type);
1378 memcpy(target_data, data, MIN(len, tgt_len));
1379 if (tgt_len > len) {
1380 memset(target_data + len, 0, tgt_len - len);
1384 target_cmsg->cmsg_len = tswapal(tgt_len);
1385 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1386 if (msg_controllen < tgt_space) {
1387 tgt_space = msg_controllen;
1389 msg_controllen -= tgt_space;
1390 space += tgt_space;
1391 cmsg = CMSG_NXTHDR(msgh, cmsg);
1392 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1394 unlock_user(target_cmsg, target_cmsg_addr, space);
1395 the_end:
1396 target_msgh->msg_controllen = tswapal(space);
1397 return 0;
1400 /* do_setsockopt() Must return target values and target errnos. */
1401 static abi_long do_setsockopt(int sockfd, int level, int optname,
1402 abi_ulong optval_addr, socklen_t optlen)
1404 abi_long ret;
1405 int val;
1406 struct ip_mreqn *ip_mreq;
1407 struct ip_mreq_source *ip_mreq_source;
1409 switch(level) {
1410 case SOL_TCP:
1411 /* TCP options all take an 'int' value. */
1412 if (optlen < sizeof(uint32_t))
1413 return -TARGET_EINVAL;
1415 if (get_user_u32(val, optval_addr))
1416 return -TARGET_EFAULT;
1417 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1418 break;
1419 case SOL_IP:
1420 switch(optname) {
1421 case IP_TOS:
1422 case IP_TTL:
1423 case IP_HDRINCL:
1424 case IP_ROUTER_ALERT:
1425 case IP_RECVOPTS:
1426 case IP_RETOPTS:
1427 case IP_PKTINFO:
1428 case IP_MTU_DISCOVER:
1429 case IP_RECVERR:
1430 case IP_RECVTOS:
1431 #ifdef IP_FREEBIND
1432 case IP_FREEBIND:
1433 #endif
1434 case IP_MULTICAST_TTL:
1435 case IP_MULTICAST_LOOP:
1436 val = 0;
1437 if (optlen >= sizeof(uint32_t)) {
1438 if (get_user_u32(val, optval_addr))
1439 return -TARGET_EFAULT;
1440 } else if (optlen >= 1) {
1441 if (get_user_u8(val, optval_addr))
1442 return -TARGET_EFAULT;
1444 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1445 break;
1446 case IP_ADD_MEMBERSHIP:
1447 case IP_DROP_MEMBERSHIP:
1448 if (optlen < sizeof (struct target_ip_mreq) ||
1449 optlen > sizeof (struct target_ip_mreqn))
1450 return -TARGET_EINVAL;
1452 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1453 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1454 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1455 break;
1457 case IP_BLOCK_SOURCE:
1458 case IP_UNBLOCK_SOURCE:
1459 case IP_ADD_SOURCE_MEMBERSHIP:
1460 case IP_DROP_SOURCE_MEMBERSHIP:
1461 if (optlen != sizeof (struct target_ip_mreq_source))
1462 return -TARGET_EINVAL;
1464 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1465 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1466 unlock_user (ip_mreq_source, optval_addr, 0);
1467 break;
1469 default:
1470 goto unimplemented;
1472 break;
1473 case SOL_IPV6:
1474 switch (optname) {
1475 case IPV6_MTU_DISCOVER:
1476 case IPV6_MTU:
1477 case IPV6_V6ONLY:
1478 case IPV6_RECVPKTINFO:
1479 val = 0;
1480 if (optlen < sizeof(uint32_t)) {
1481 return -TARGET_EINVAL;
1483 if (get_user_u32(val, optval_addr)) {
1484 return -TARGET_EFAULT;
1486 ret = get_errno(setsockopt(sockfd, level, optname,
1487 &val, sizeof(val)));
1488 break;
1489 default:
1490 goto unimplemented;
1492 break;
1493 case SOL_RAW:
1494 switch (optname) {
1495 case ICMP_FILTER:
1496 /* struct icmp_filter takes an u32 value */
1497 if (optlen < sizeof(uint32_t)) {
1498 return -TARGET_EINVAL;
1501 if (get_user_u32(val, optval_addr)) {
1502 return -TARGET_EFAULT;
1504 ret = get_errno(setsockopt(sockfd, level, optname,
1505 &val, sizeof(val)));
1506 break;
1508 default:
1509 goto unimplemented;
1511 break;
1512 case TARGET_SOL_SOCKET:
1513 switch (optname) {
1514 case TARGET_SO_RCVTIMEO:
1516 struct timeval tv;
1518 optname = SO_RCVTIMEO;
1520 set_timeout:
1521 if (optlen != sizeof(struct target_timeval)) {
1522 return -TARGET_EINVAL;
1525 if (copy_from_user_timeval(&tv, optval_addr)) {
1526 return -TARGET_EFAULT;
1529 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1530 &tv, sizeof(tv)));
1531 return ret;
1533 case TARGET_SO_SNDTIMEO:
1534 optname = SO_SNDTIMEO;
1535 goto set_timeout;
1536 case TARGET_SO_ATTACH_FILTER:
1538 struct target_sock_fprog *tfprog;
1539 struct target_sock_filter *tfilter;
1540 struct sock_fprog fprog;
1541 struct sock_filter *filter;
1542 int i;
1544 if (optlen != sizeof(*tfprog)) {
1545 return -TARGET_EINVAL;
1547 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1548 return -TARGET_EFAULT;
1550 if (!lock_user_struct(VERIFY_READ, tfilter,
1551 tswapal(tfprog->filter), 0)) {
1552 unlock_user_struct(tfprog, optval_addr, 1);
1553 return -TARGET_EFAULT;
1556 fprog.len = tswap16(tfprog->len);
1557 filter = malloc(fprog.len * sizeof(*filter));
1558 if (filter == NULL) {
1559 unlock_user_struct(tfilter, tfprog->filter, 1);
1560 unlock_user_struct(tfprog, optval_addr, 1);
1561 return -TARGET_ENOMEM;
1563 for (i = 0; i < fprog.len; i++) {
1564 filter[i].code = tswap16(tfilter[i].code);
1565 filter[i].jt = tfilter[i].jt;
1566 filter[i].jf = tfilter[i].jf;
1567 filter[i].k = tswap32(tfilter[i].k);
1569 fprog.filter = filter;
1571 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1572 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1573 free(filter);
1575 unlock_user_struct(tfilter, tfprog->filter, 1);
1576 unlock_user_struct(tfprog, optval_addr, 1);
1577 return ret;
1579 case TARGET_SO_BINDTODEVICE:
1581 char *dev_ifname, *addr_ifname;
1583 if (optlen > IFNAMSIZ - 1) {
1584 optlen = IFNAMSIZ - 1;
1586 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1587 if (!dev_ifname) {
1588 return -TARGET_EFAULT;
1590 optname = SO_BINDTODEVICE;
1591 addr_ifname = alloca(IFNAMSIZ);
1592 memcpy(addr_ifname, dev_ifname, optlen);
1593 addr_ifname[optlen] = 0;
1594 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
1595 unlock_user (dev_ifname, optval_addr, 0);
1596 return ret;
1598 /* Options with 'int' argument. */
1599 case TARGET_SO_DEBUG:
1600 optname = SO_DEBUG;
1601 break;
1602 case TARGET_SO_REUSEADDR:
1603 optname = SO_REUSEADDR;
1604 break;
1605 case TARGET_SO_TYPE:
1606 optname = SO_TYPE;
1607 break;
1608 case TARGET_SO_ERROR:
1609 optname = SO_ERROR;
1610 break;
1611 case TARGET_SO_DONTROUTE:
1612 optname = SO_DONTROUTE;
1613 break;
1614 case TARGET_SO_BROADCAST:
1615 optname = SO_BROADCAST;
1616 break;
1617 case TARGET_SO_SNDBUF:
1618 optname = SO_SNDBUF;
1619 break;
1620 case TARGET_SO_SNDBUFFORCE:
1621 optname = SO_SNDBUFFORCE;
1622 break;
1623 case TARGET_SO_RCVBUF:
1624 optname = SO_RCVBUF;
1625 break;
1626 case TARGET_SO_RCVBUFFORCE:
1627 optname = SO_RCVBUFFORCE;
1628 break;
1629 case TARGET_SO_KEEPALIVE:
1630 optname = SO_KEEPALIVE;
1631 break;
1632 case TARGET_SO_OOBINLINE:
1633 optname = SO_OOBINLINE;
1634 break;
1635 case TARGET_SO_NO_CHECK:
1636 optname = SO_NO_CHECK;
1637 break;
1638 case TARGET_SO_PRIORITY:
1639 optname = SO_PRIORITY;
1640 break;
1641 #ifdef SO_BSDCOMPAT
1642 case TARGET_SO_BSDCOMPAT:
1643 optname = SO_BSDCOMPAT;
1644 break;
1645 #endif
1646 case TARGET_SO_PASSCRED:
1647 optname = SO_PASSCRED;
1648 break;
1649 case TARGET_SO_PASSSEC:
1650 optname = SO_PASSSEC;
1651 break;
1652 case TARGET_SO_TIMESTAMP:
1653 optname = SO_TIMESTAMP;
1654 break;
1655 case TARGET_SO_RCVLOWAT:
1656 optname = SO_RCVLOWAT;
1657 break;
1658 break;
1659 default:
1660 goto unimplemented;
1662 if (optlen < sizeof(uint32_t))
1663 return -TARGET_EINVAL;
1665 if (get_user_u32(val, optval_addr))
1666 return -TARGET_EFAULT;
1667 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1668 break;
1669 default:
1670 unimplemented:
1671 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1672 ret = -TARGET_ENOPROTOOPT;
1674 return ret;
1677 /* do_getsockopt() Must return target values and target errnos. */
1678 static abi_long do_getsockopt(int sockfd, int level, int optname,
1679 abi_ulong optval_addr, abi_ulong optlen)
1681 abi_long ret;
1682 int len, val;
1683 socklen_t lv;
1685 switch(level) {
1686 case TARGET_SOL_SOCKET:
1687 level = SOL_SOCKET;
1688 switch (optname) {
1689 /* These don't just return a single integer */
1690 case TARGET_SO_LINGER:
1691 case TARGET_SO_RCVTIMEO:
1692 case TARGET_SO_SNDTIMEO:
1693 case TARGET_SO_PEERNAME:
1694 goto unimplemented;
1695 case TARGET_SO_PEERCRED: {
1696 struct ucred cr;
1697 socklen_t crlen;
1698 struct target_ucred *tcr;
1700 if (get_user_u32(len, optlen)) {
1701 return -TARGET_EFAULT;
1703 if (len < 0) {
1704 return -TARGET_EINVAL;
1707 crlen = sizeof(cr);
1708 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1709 &cr, &crlen));
1710 if (ret < 0) {
1711 return ret;
1713 if (len > crlen) {
1714 len = crlen;
1716 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1717 return -TARGET_EFAULT;
1719 __put_user(cr.pid, &tcr->pid);
1720 __put_user(cr.uid, &tcr->uid);
1721 __put_user(cr.gid, &tcr->gid);
1722 unlock_user_struct(tcr, optval_addr, 1);
1723 if (put_user_u32(len, optlen)) {
1724 return -TARGET_EFAULT;
1726 break;
1728 /* Options with 'int' argument. */
1729 case TARGET_SO_DEBUG:
1730 optname = SO_DEBUG;
1731 goto int_case;
1732 case TARGET_SO_REUSEADDR:
1733 optname = SO_REUSEADDR;
1734 goto int_case;
1735 case TARGET_SO_TYPE:
1736 optname = SO_TYPE;
1737 goto int_case;
1738 case TARGET_SO_ERROR:
1739 optname = SO_ERROR;
1740 goto int_case;
1741 case TARGET_SO_DONTROUTE:
1742 optname = SO_DONTROUTE;
1743 goto int_case;
1744 case TARGET_SO_BROADCAST:
1745 optname = SO_BROADCAST;
1746 goto int_case;
1747 case TARGET_SO_SNDBUF:
1748 optname = SO_SNDBUF;
1749 goto int_case;
1750 case TARGET_SO_RCVBUF:
1751 optname = SO_RCVBUF;
1752 goto int_case;
1753 case TARGET_SO_KEEPALIVE:
1754 optname = SO_KEEPALIVE;
1755 goto int_case;
1756 case TARGET_SO_OOBINLINE:
1757 optname = SO_OOBINLINE;
1758 goto int_case;
1759 case TARGET_SO_NO_CHECK:
1760 optname = SO_NO_CHECK;
1761 goto int_case;
1762 case TARGET_SO_PRIORITY:
1763 optname = SO_PRIORITY;
1764 goto int_case;
1765 #ifdef SO_BSDCOMPAT
1766 case TARGET_SO_BSDCOMPAT:
1767 optname = SO_BSDCOMPAT;
1768 goto int_case;
1769 #endif
1770 case TARGET_SO_PASSCRED:
1771 optname = SO_PASSCRED;
1772 goto int_case;
1773 case TARGET_SO_TIMESTAMP:
1774 optname = SO_TIMESTAMP;
1775 goto int_case;
1776 case TARGET_SO_RCVLOWAT:
1777 optname = SO_RCVLOWAT;
1778 goto int_case;
1779 case TARGET_SO_ACCEPTCONN:
1780 optname = SO_ACCEPTCONN;
1781 goto int_case;
1782 default:
1783 goto int_case;
1785 break;
1786 case SOL_TCP:
1787 /* TCP options all take an 'int' value. */
1788 int_case:
1789 if (get_user_u32(len, optlen))
1790 return -TARGET_EFAULT;
1791 if (len < 0)
1792 return -TARGET_EINVAL;
1793 lv = sizeof(lv);
1794 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1795 if (ret < 0)
1796 return ret;
1797 if (optname == SO_TYPE) {
1798 val = host_to_target_sock_type(val);
1800 if (len > lv)
1801 len = lv;
1802 if (len == 4) {
1803 if (put_user_u32(val, optval_addr))
1804 return -TARGET_EFAULT;
1805 } else {
1806 if (put_user_u8(val, optval_addr))
1807 return -TARGET_EFAULT;
1809 if (put_user_u32(len, optlen))
1810 return -TARGET_EFAULT;
1811 break;
1812 case SOL_IP:
1813 switch(optname) {
1814 case IP_TOS:
1815 case IP_TTL:
1816 case IP_HDRINCL:
1817 case IP_ROUTER_ALERT:
1818 case IP_RECVOPTS:
1819 case IP_RETOPTS:
1820 case IP_PKTINFO:
1821 case IP_MTU_DISCOVER:
1822 case IP_RECVERR:
1823 case IP_RECVTOS:
1824 #ifdef IP_FREEBIND
1825 case IP_FREEBIND:
1826 #endif
1827 case IP_MULTICAST_TTL:
1828 case IP_MULTICAST_LOOP:
1829 if (get_user_u32(len, optlen))
1830 return -TARGET_EFAULT;
1831 if (len < 0)
1832 return -TARGET_EINVAL;
1833 lv = sizeof(lv);
1834 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1835 if (ret < 0)
1836 return ret;
1837 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1838 len = 1;
1839 if (put_user_u32(len, optlen)
1840 || put_user_u8(val, optval_addr))
1841 return -TARGET_EFAULT;
1842 } else {
1843 if (len > sizeof(int))
1844 len = sizeof(int);
1845 if (put_user_u32(len, optlen)
1846 || put_user_u32(val, optval_addr))
1847 return -TARGET_EFAULT;
1849 break;
1850 default:
1851 ret = -TARGET_ENOPROTOOPT;
1852 break;
1854 break;
1855 default:
1856 unimplemented:
1857 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1858 level, optname);
1859 ret = -TARGET_EOPNOTSUPP;
1860 break;
1862 return ret;
1865 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1866 int count, int copy)
1868 struct target_iovec *target_vec;
1869 struct iovec *vec;
1870 abi_ulong total_len, max_len;
1871 int i;
1872 int err = 0;
1873 bool bad_address = false;
1875 if (count == 0) {
1876 errno = 0;
1877 return NULL;
1879 if (count < 0 || count > IOV_MAX) {
1880 errno = EINVAL;
1881 return NULL;
1884 vec = calloc(count, sizeof(struct iovec));
1885 if (vec == NULL) {
1886 errno = ENOMEM;
1887 return NULL;
1890 target_vec = lock_user(VERIFY_READ, target_addr,
1891 count * sizeof(struct target_iovec), 1);
1892 if (target_vec == NULL) {
1893 err = EFAULT;
1894 goto fail2;
1897 /* ??? If host page size > target page size, this will result in a
1898 value larger than what we can actually support. */
1899 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1900 total_len = 0;
1902 for (i = 0; i < count; i++) {
1903 abi_ulong base = tswapal(target_vec[i].iov_base);
1904 abi_long len = tswapal(target_vec[i].iov_len);
1906 if (len < 0) {
1907 err = EINVAL;
1908 goto fail;
1909 } else if (len == 0) {
1910 /* Zero length pointer is ignored. */
1911 vec[i].iov_base = 0;
1912 } else {
1913 vec[i].iov_base = lock_user(type, base, len, copy);
1914 /* If the first buffer pointer is bad, this is a fault. But
1915 * subsequent bad buffers will result in a partial write; this
1916 * is realized by filling the vector with null pointers and
1917 * zero lengths. */
1918 if (!vec[i].iov_base) {
1919 if (i == 0) {
1920 err = EFAULT;
1921 goto fail;
1922 } else {
1923 bad_address = true;
1926 if (bad_address) {
1927 len = 0;
1929 if (len > max_len - total_len) {
1930 len = max_len - total_len;
1933 vec[i].iov_len = len;
1934 total_len += len;
1937 unlock_user(target_vec, target_addr, 0);
1938 return vec;
1940 fail:
1941 while (--i >= 0) {
1942 if (tswapal(target_vec[i].iov_len) > 0) {
1943 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
1946 unlock_user(target_vec, target_addr, 0);
1947 fail2:
1948 free(vec);
1949 errno = err;
1950 return NULL;
1953 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1954 int count, int copy)
1956 struct target_iovec *target_vec;
1957 int i;
1959 target_vec = lock_user(VERIFY_READ, target_addr,
1960 count * sizeof(struct target_iovec), 1);
1961 if (target_vec) {
1962 for (i = 0; i < count; i++) {
1963 abi_ulong base = tswapal(target_vec[i].iov_base);
1964 abi_long len = tswapal(target_vec[i].iov_len);
1965 if (len < 0) {
1966 break;
1968 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1970 unlock_user(target_vec, target_addr, 0);
1973 free(vec);
1976 static inline int target_to_host_sock_type(int *type)
1978 int host_type = 0;
1979 int target_type = *type;
1981 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1982 case TARGET_SOCK_DGRAM:
1983 host_type = SOCK_DGRAM;
1984 break;
1985 case TARGET_SOCK_STREAM:
1986 host_type = SOCK_STREAM;
1987 break;
1988 default:
1989 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1990 break;
1992 if (target_type & TARGET_SOCK_CLOEXEC) {
1993 #if defined(SOCK_CLOEXEC)
1994 host_type |= SOCK_CLOEXEC;
1995 #else
1996 return -TARGET_EINVAL;
1997 #endif
1999 if (target_type & TARGET_SOCK_NONBLOCK) {
2000 #if defined(SOCK_NONBLOCK)
2001 host_type |= SOCK_NONBLOCK;
2002 #elif !defined(O_NONBLOCK)
2003 return -TARGET_EINVAL;
2004 #endif
2006 *type = host_type;
2007 return 0;
2010 /* Try to emulate socket type flags after socket creation. */
2011 static int sock_flags_fixup(int fd, int target_type)
2013 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2014 if (target_type & TARGET_SOCK_NONBLOCK) {
2015 int flags = fcntl(fd, F_GETFL);
2016 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2017 close(fd);
2018 return -TARGET_EINVAL;
2021 #endif
2022 return fd;
2025 /* do_socket() Must return target values and target errnos. */
2026 static abi_long do_socket(int domain, int type, int protocol)
2028 int target_type = type;
2029 int ret;
2031 ret = target_to_host_sock_type(&type);
2032 if (ret) {
2033 return ret;
2036 if (domain == PF_NETLINK)
2037 return -TARGET_EAFNOSUPPORT;
2038 ret = get_errno(socket(domain, type, protocol));
2039 if (ret >= 0) {
2040 ret = sock_flags_fixup(ret, target_type);
2042 return ret;
2045 /* do_bind() Must return target values and target errnos. */
2046 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2047 socklen_t addrlen)
2049 void *addr;
2050 abi_long ret;
2052 if ((int)addrlen < 0) {
2053 return -TARGET_EINVAL;
2056 addr = alloca(addrlen+1);
2058 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2059 if (ret)
2060 return ret;
2062 return get_errno(bind(sockfd, addr, addrlen));
2065 /* do_connect() Must return target values and target errnos. */
2066 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2067 socklen_t addrlen)
2069 void *addr;
2070 abi_long ret;
2072 if ((int)addrlen < 0) {
2073 return -TARGET_EINVAL;
2076 addr = alloca(addrlen+1);
2078 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2079 if (ret)
2080 return ret;
2082 return get_errno(connect(sockfd, addr, addrlen));
2085 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2086 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2087 int flags, int send)
2089 abi_long ret, len;
2090 struct msghdr msg;
2091 int count;
2092 struct iovec *vec;
2093 abi_ulong target_vec;
2095 if (msgp->msg_name) {
2096 msg.msg_namelen = tswap32(msgp->msg_namelen);
2097 msg.msg_name = alloca(msg.msg_namelen+1);
2098 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
2099 msg.msg_namelen);
2100 if (ret) {
2101 goto out2;
2103 } else {
2104 msg.msg_name = NULL;
2105 msg.msg_namelen = 0;
2107 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2108 msg.msg_control = alloca(msg.msg_controllen);
2109 msg.msg_flags = tswap32(msgp->msg_flags);
2111 count = tswapal(msgp->msg_iovlen);
2112 target_vec = tswapal(msgp->msg_iov);
2113 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2114 target_vec, count, send);
2115 if (vec == NULL) {
2116 ret = -host_to_target_errno(errno);
2117 goto out2;
2119 msg.msg_iovlen = count;
2120 msg.msg_iov = vec;
2122 if (send) {
2123 ret = target_to_host_cmsg(&msg, msgp);
2124 if (ret == 0)
2125 ret = get_errno(sendmsg(fd, &msg, flags));
2126 } else {
2127 ret = get_errno(recvmsg(fd, &msg, flags));
2128 if (!is_error(ret)) {
2129 len = ret;
2130 ret = host_to_target_cmsg(msgp, &msg);
2131 if (!is_error(ret)) {
2132 msgp->msg_namelen = tswap32(msg.msg_namelen);
2133 if (msg.msg_name != NULL) {
2134 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2135 msg.msg_name, msg.msg_namelen);
2136 if (ret) {
2137 goto out;
2141 ret = len;
2146 out:
2147 unlock_iovec(vec, target_vec, count, !send);
2148 out2:
2149 return ret;
2152 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2153 int flags, int send)
2155 abi_long ret;
2156 struct target_msghdr *msgp;
2158 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2159 msgp,
2160 target_msg,
2161 send ? 1 : 0)) {
2162 return -TARGET_EFAULT;
2164 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2165 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2166 return ret;
2169 #ifdef TARGET_NR_sendmmsg
2170 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2171 * so it might not have this *mmsg-specific flag either.
2173 #ifndef MSG_WAITFORONE
2174 #define MSG_WAITFORONE 0x10000
2175 #endif
2177 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2178 unsigned int vlen, unsigned int flags,
2179 int send)
2181 struct target_mmsghdr *mmsgp;
2182 abi_long ret = 0;
2183 int i;
2185 if (vlen > UIO_MAXIOV) {
2186 vlen = UIO_MAXIOV;
2189 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2190 if (!mmsgp) {
2191 return -TARGET_EFAULT;
2194 for (i = 0; i < vlen; i++) {
2195 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2196 if (is_error(ret)) {
2197 break;
2199 mmsgp[i].msg_len = tswap32(ret);
2200 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2201 if (flags & MSG_WAITFORONE) {
2202 flags |= MSG_DONTWAIT;
2206 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2208 /* Return number of datagrams sent if we sent any at all;
2209 * otherwise return the error.
2211 if (i) {
2212 return i;
2214 return ret;
2216 #endif
2218 /* If we don't have a system accept4() then just call accept.
2219 * The callsites to do_accept4() will ensure that they don't
2220 * pass a non-zero flags argument in this config.
2222 #ifndef CONFIG_ACCEPT4
2223 static inline int accept4(int sockfd, struct sockaddr *addr,
2224 socklen_t *addrlen, int flags)
2226 assert(flags == 0);
2227 return accept(sockfd, addr, addrlen);
2229 #endif
2231 /* do_accept4() Must return target values and target errnos. */
2232 static abi_long do_accept4(int fd, abi_ulong target_addr,
2233 abi_ulong target_addrlen_addr, int flags)
2235 socklen_t addrlen;
2236 void *addr;
2237 abi_long ret;
2238 int host_flags;
2240 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2242 if (target_addr == 0) {
2243 return get_errno(accept4(fd, NULL, NULL, host_flags));
2246 /* linux returns EINVAL if addrlen pointer is invalid */
2247 if (get_user_u32(addrlen, target_addrlen_addr))
2248 return -TARGET_EINVAL;
2250 if ((int)addrlen < 0) {
2251 return -TARGET_EINVAL;
2254 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2255 return -TARGET_EINVAL;
2257 addr = alloca(addrlen);
2259 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2260 if (!is_error(ret)) {
2261 host_to_target_sockaddr(target_addr, addr, addrlen);
2262 if (put_user_u32(addrlen, target_addrlen_addr))
2263 ret = -TARGET_EFAULT;
2265 return ret;
2268 /* do_getpeername() Must return target values and target errnos. */
2269 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2270 abi_ulong target_addrlen_addr)
2272 socklen_t addrlen;
2273 void *addr;
2274 abi_long ret;
2276 if (get_user_u32(addrlen, target_addrlen_addr))
2277 return -TARGET_EFAULT;
2279 if ((int)addrlen < 0) {
2280 return -TARGET_EINVAL;
2283 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2284 return -TARGET_EFAULT;
2286 addr = alloca(addrlen);
2288 ret = get_errno(getpeername(fd, addr, &addrlen));
2289 if (!is_error(ret)) {
2290 host_to_target_sockaddr(target_addr, addr, addrlen);
2291 if (put_user_u32(addrlen, target_addrlen_addr))
2292 ret = -TARGET_EFAULT;
2294 return ret;
2297 /* do_getsockname() Must return target values and target errnos. */
2298 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2299 abi_ulong target_addrlen_addr)
2301 socklen_t addrlen;
2302 void *addr;
2303 abi_long ret;
2305 if (get_user_u32(addrlen, target_addrlen_addr))
2306 return -TARGET_EFAULT;
2308 if ((int)addrlen < 0) {
2309 return -TARGET_EINVAL;
2312 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2313 return -TARGET_EFAULT;
2315 addr = alloca(addrlen);
2317 ret = get_errno(getsockname(fd, addr, &addrlen));
2318 if (!is_error(ret)) {
2319 host_to_target_sockaddr(target_addr, addr, addrlen);
2320 if (put_user_u32(addrlen, target_addrlen_addr))
2321 ret = -TARGET_EFAULT;
2323 return ret;
2326 /* do_socketpair() Must return target values and target errnos. */
2327 static abi_long do_socketpair(int domain, int type, int protocol,
2328 abi_ulong target_tab_addr)
2330 int tab[2];
2331 abi_long ret;
2333 target_to_host_sock_type(&type);
2335 ret = get_errno(socketpair(domain, type, protocol, tab));
2336 if (!is_error(ret)) {
2337 if (put_user_s32(tab[0], target_tab_addr)
2338 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2339 ret = -TARGET_EFAULT;
2341 return ret;
2344 /* do_sendto() Must return target values and target errnos. */
2345 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2346 abi_ulong target_addr, socklen_t addrlen)
2348 void *addr;
2349 void *host_msg;
2350 abi_long ret;
2352 if ((int)addrlen < 0) {
2353 return -TARGET_EINVAL;
2356 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2357 if (!host_msg)
2358 return -TARGET_EFAULT;
2359 if (target_addr) {
2360 addr = alloca(addrlen+1);
2361 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2362 if (ret) {
2363 unlock_user(host_msg, msg, 0);
2364 return ret;
2366 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2367 } else {
2368 ret = get_errno(send(fd, host_msg, len, flags));
2370 unlock_user(host_msg, msg, 0);
2371 return ret;
2374 /* do_recvfrom() Must return target values and target errnos. */
2375 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2376 abi_ulong target_addr,
2377 abi_ulong target_addrlen)
2379 socklen_t addrlen;
2380 void *addr;
2381 void *host_msg;
2382 abi_long ret;
2384 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2385 if (!host_msg)
2386 return -TARGET_EFAULT;
2387 if (target_addr) {
2388 if (get_user_u32(addrlen, target_addrlen)) {
2389 ret = -TARGET_EFAULT;
2390 goto fail;
2392 if ((int)addrlen < 0) {
2393 ret = -TARGET_EINVAL;
2394 goto fail;
2396 addr = alloca(addrlen);
2397 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2398 } else {
2399 addr = NULL; /* To keep compiler quiet. */
2400 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2402 if (!is_error(ret)) {
2403 if (target_addr) {
2404 host_to_target_sockaddr(target_addr, addr, addrlen);
2405 if (put_user_u32(addrlen, target_addrlen)) {
2406 ret = -TARGET_EFAULT;
2407 goto fail;
2410 unlock_user(host_msg, msg, len);
2411 } else {
2412 fail:
2413 unlock_user(host_msg, msg, 0);
2415 return ret;
2418 #ifdef TARGET_NR_socketcall
2419 /* do_socketcall() Must return target values and target errnos. */
2420 static abi_long do_socketcall(int num, abi_ulong vptr)
2422 static const unsigned ac[] = { /* number of arguments per call */
2423 [SOCKOP_socket] = 3, /* domain, type, protocol */
2424 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2425 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2426 [SOCKOP_listen] = 2, /* sockfd, backlog */
2427 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2428 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2429 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2430 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2431 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2432 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2433 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2434 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2435 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2436 [SOCKOP_shutdown] = 2, /* sockfd, how */
2437 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2438 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2439 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2440 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2442 abi_long a[6]; /* max 6 args */
2444 /* first, collect the arguments in a[] according to ac[] */
2445 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2446 unsigned i;
2447 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2448 for (i = 0; i < ac[num]; ++i) {
2449 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2450 return -TARGET_EFAULT;
2455 /* now when we have the args, actually handle the call */
2456 switch (num) {
2457 case SOCKOP_socket: /* domain, type, protocol */
2458 return do_socket(a[0], a[1], a[2]);
2459 case SOCKOP_bind: /* sockfd, addr, addrlen */
2460 return do_bind(a[0], a[1], a[2]);
2461 case SOCKOP_connect: /* sockfd, addr, addrlen */
2462 return do_connect(a[0], a[1], a[2]);
2463 case SOCKOP_listen: /* sockfd, backlog */
2464 return get_errno(listen(a[0], a[1]));
2465 case SOCKOP_accept: /* sockfd, addr, addrlen */
2466 return do_accept4(a[0], a[1], a[2], 0);
2467 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2468 return do_accept4(a[0], a[1], a[2], a[3]);
2469 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2470 return do_getsockname(a[0], a[1], a[2]);
2471 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2472 return do_getpeername(a[0], a[1], a[2]);
2473 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2474 return do_socketpair(a[0], a[1], a[2], a[3]);
2475 case SOCKOP_send: /* sockfd, msg, len, flags */
2476 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2477 case SOCKOP_recv: /* sockfd, msg, len, flags */
2478 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2479 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2480 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2481 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2482 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2483 case SOCKOP_shutdown: /* sockfd, how */
2484 return get_errno(shutdown(a[0], a[1]));
2485 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2486 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2487 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2488 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2489 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2490 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2491 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2492 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2493 default:
2494 gemu_log("Unsupported socketcall: %d\n", num);
2495 return -TARGET_ENOSYS;
2498 #endif
2500 #define N_SHM_REGIONS 32
2502 static struct shm_region {
2503 abi_ulong start;
2504 abi_ulong size;
2505 } shm_regions[N_SHM_REGIONS];
2507 struct target_semid_ds
2509 struct target_ipc_perm sem_perm;
2510 abi_ulong sem_otime;
2511 #if !defined(TARGET_PPC64)
2512 abi_ulong __unused1;
2513 #endif
2514 abi_ulong sem_ctime;
2515 #if !defined(TARGET_PPC64)
2516 abi_ulong __unused2;
2517 #endif
2518 abi_ulong sem_nsems;
2519 abi_ulong __unused3;
2520 abi_ulong __unused4;
2523 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2524 abi_ulong target_addr)
2526 struct target_ipc_perm *target_ip;
2527 struct target_semid_ds *target_sd;
2529 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2530 return -TARGET_EFAULT;
2531 target_ip = &(target_sd->sem_perm);
2532 host_ip->__key = tswap32(target_ip->__key);
2533 host_ip->uid = tswap32(target_ip->uid);
2534 host_ip->gid = tswap32(target_ip->gid);
2535 host_ip->cuid = tswap32(target_ip->cuid);
2536 host_ip->cgid = tswap32(target_ip->cgid);
2537 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2538 host_ip->mode = tswap32(target_ip->mode);
2539 #else
2540 host_ip->mode = tswap16(target_ip->mode);
2541 #endif
2542 #if defined(TARGET_PPC)
2543 host_ip->__seq = tswap32(target_ip->__seq);
2544 #else
2545 host_ip->__seq = tswap16(target_ip->__seq);
2546 #endif
2547 unlock_user_struct(target_sd, target_addr, 0);
2548 return 0;
2551 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2552 struct ipc_perm *host_ip)
2554 struct target_ipc_perm *target_ip;
2555 struct target_semid_ds *target_sd;
2557 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2558 return -TARGET_EFAULT;
2559 target_ip = &(target_sd->sem_perm);
2560 target_ip->__key = tswap32(host_ip->__key);
2561 target_ip->uid = tswap32(host_ip->uid);
2562 target_ip->gid = tswap32(host_ip->gid);
2563 target_ip->cuid = tswap32(host_ip->cuid);
2564 target_ip->cgid = tswap32(host_ip->cgid);
2565 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2566 target_ip->mode = tswap32(host_ip->mode);
2567 #else
2568 target_ip->mode = tswap16(host_ip->mode);
2569 #endif
2570 #if defined(TARGET_PPC)
2571 target_ip->__seq = tswap32(host_ip->__seq);
2572 #else
2573 target_ip->__seq = tswap16(host_ip->__seq);
2574 #endif
2575 unlock_user_struct(target_sd, target_addr, 1);
2576 return 0;
2579 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2580 abi_ulong target_addr)
2582 struct target_semid_ds *target_sd;
2584 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2585 return -TARGET_EFAULT;
2586 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2587 return -TARGET_EFAULT;
2588 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2589 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2590 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2591 unlock_user_struct(target_sd, target_addr, 0);
2592 return 0;
2595 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2596 struct semid_ds *host_sd)
2598 struct target_semid_ds *target_sd;
2600 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2601 return -TARGET_EFAULT;
2602 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2603 return -TARGET_EFAULT;
2604 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2605 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2606 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2607 unlock_user_struct(target_sd, target_addr, 1);
2608 return 0;
2611 struct target_seminfo {
2612 int semmap;
2613 int semmni;
2614 int semmns;
2615 int semmnu;
2616 int semmsl;
2617 int semopm;
2618 int semume;
2619 int semusz;
2620 int semvmx;
2621 int semaem;
2624 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2625 struct seminfo *host_seminfo)
2627 struct target_seminfo *target_seminfo;
2628 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2629 return -TARGET_EFAULT;
2630 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2631 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2632 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2633 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2634 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2635 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2636 __put_user(host_seminfo->semume, &target_seminfo->semume);
2637 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2638 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2639 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2640 unlock_user_struct(target_seminfo, target_addr, 1);
2641 return 0;
2644 union semun {
2645 int val;
2646 struct semid_ds *buf;
2647 unsigned short *array;
2648 struct seminfo *__buf;
2651 union target_semun {
2652 int val;
2653 abi_ulong buf;
2654 abi_ulong array;
2655 abi_ulong __buf;
2658 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2659 abi_ulong target_addr)
2661 int nsems;
2662 unsigned short *array;
2663 union semun semun;
2664 struct semid_ds semid_ds;
2665 int i, ret;
2667 semun.buf = &semid_ds;
2669 ret = semctl(semid, 0, IPC_STAT, semun);
2670 if (ret == -1)
2671 return get_errno(ret);
2673 nsems = semid_ds.sem_nsems;
2675 *host_array = malloc(nsems*sizeof(unsigned short));
2676 if (!*host_array) {
2677 return -TARGET_ENOMEM;
2679 array = lock_user(VERIFY_READ, target_addr,
2680 nsems*sizeof(unsigned short), 1);
2681 if (!array) {
2682 free(*host_array);
2683 return -TARGET_EFAULT;
2686 for(i=0; i<nsems; i++) {
2687 __get_user((*host_array)[i], &array[i]);
2689 unlock_user(array, target_addr, 0);
2691 return 0;
2694 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2695 unsigned short **host_array)
2697 int nsems;
2698 unsigned short *array;
2699 union semun semun;
2700 struct semid_ds semid_ds;
2701 int i, ret;
2703 semun.buf = &semid_ds;
2705 ret = semctl(semid, 0, IPC_STAT, semun);
2706 if (ret == -1)
2707 return get_errno(ret);
2709 nsems = semid_ds.sem_nsems;
2711 array = lock_user(VERIFY_WRITE, target_addr,
2712 nsems*sizeof(unsigned short), 0);
2713 if (!array)
2714 return -TARGET_EFAULT;
2716 for(i=0; i<nsems; i++) {
2717 __put_user((*host_array)[i], &array[i]);
2719 free(*host_array);
2720 unlock_user(array, target_addr, 1);
2722 return 0;
2725 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2726 union target_semun target_su)
2728 union semun arg;
2729 struct semid_ds dsarg;
2730 unsigned short *array = NULL;
2731 struct seminfo seminfo;
2732 abi_long ret = -TARGET_EINVAL;
2733 abi_long err;
2734 cmd &= 0xff;
2736 switch( cmd ) {
2737 case GETVAL:
2738 case SETVAL:
2739 /* In 64 bit cross-endian situations, we will erroneously pick up
2740 * the wrong half of the union for the "val" element. To rectify
2741 * this, the entire 8-byte structure is byteswapped, followed by
2742 * a swap of the 4 byte val field. In other cases, the data is
2743 * already in proper host byte order. */
2744 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2745 target_su.buf = tswapal(target_su.buf);
2746 arg.val = tswap32(target_su.val);
2747 } else {
2748 arg.val = target_su.val;
2750 ret = get_errno(semctl(semid, semnum, cmd, arg));
2751 break;
2752 case GETALL:
2753 case SETALL:
2754 err = target_to_host_semarray(semid, &array, target_su.array);
2755 if (err)
2756 return err;
2757 arg.array = array;
2758 ret = get_errno(semctl(semid, semnum, cmd, arg));
2759 err = host_to_target_semarray(semid, target_su.array, &array);
2760 if (err)
2761 return err;
2762 break;
2763 case IPC_STAT:
2764 case IPC_SET:
2765 case SEM_STAT:
2766 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2767 if (err)
2768 return err;
2769 arg.buf = &dsarg;
2770 ret = get_errno(semctl(semid, semnum, cmd, arg));
2771 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2772 if (err)
2773 return err;
2774 break;
2775 case IPC_INFO:
2776 case SEM_INFO:
2777 arg.__buf = &seminfo;
2778 ret = get_errno(semctl(semid, semnum, cmd, arg));
2779 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2780 if (err)
2781 return err;
2782 break;
2783 case IPC_RMID:
2784 case GETPID:
2785 case GETNCNT:
2786 case GETZCNT:
2787 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2788 break;
2791 return ret;
2794 struct target_sembuf {
2795 unsigned short sem_num;
2796 short sem_op;
2797 short sem_flg;
2800 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2801 abi_ulong target_addr,
2802 unsigned nsops)
2804 struct target_sembuf *target_sembuf;
2805 int i;
2807 target_sembuf = lock_user(VERIFY_READ, target_addr,
2808 nsops*sizeof(struct target_sembuf), 1);
2809 if (!target_sembuf)
2810 return -TARGET_EFAULT;
2812 for(i=0; i<nsops; i++) {
2813 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2814 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2815 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2818 unlock_user(target_sembuf, target_addr, 0);
2820 return 0;
2823 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2825 struct sembuf sops[nsops];
2827 if (target_to_host_sembuf(sops, ptr, nsops))
2828 return -TARGET_EFAULT;
2830 return get_errno(semop(semid, sops, nsops));
2833 struct target_msqid_ds
2835 struct target_ipc_perm msg_perm;
2836 abi_ulong msg_stime;
2837 #if TARGET_ABI_BITS == 32
2838 abi_ulong __unused1;
2839 #endif
2840 abi_ulong msg_rtime;
2841 #if TARGET_ABI_BITS == 32
2842 abi_ulong __unused2;
2843 #endif
2844 abi_ulong msg_ctime;
2845 #if TARGET_ABI_BITS == 32
2846 abi_ulong __unused3;
2847 #endif
2848 abi_ulong __msg_cbytes;
2849 abi_ulong msg_qnum;
2850 abi_ulong msg_qbytes;
2851 abi_ulong msg_lspid;
2852 abi_ulong msg_lrpid;
2853 abi_ulong __unused4;
2854 abi_ulong __unused5;
2857 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2858 abi_ulong target_addr)
2860 struct target_msqid_ds *target_md;
2862 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2863 return -TARGET_EFAULT;
2864 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2865 return -TARGET_EFAULT;
2866 host_md->msg_stime = tswapal(target_md->msg_stime);
2867 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2868 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2869 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2870 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2871 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2872 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2873 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2874 unlock_user_struct(target_md, target_addr, 0);
2875 return 0;
2878 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2879 struct msqid_ds *host_md)
2881 struct target_msqid_ds *target_md;
2883 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2884 return -TARGET_EFAULT;
2885 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2886 return -TARGET_EFAULT;
2887 target_md->msg_stime = tswapal(host_md->msg_stime);
2888 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2889 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2890 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2891 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2892 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2893 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2894 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2895 unlock_user_struct(target_md, target_addr, 1);
2896 return 0;
2899 struct target_msginfo {
2900 int msgpool;
2901 int msgmap;
2902 int msgmax;
2903 int msgmnb;
2904 int msgmni;
2905 int msgssz;
2906 int msgtql;
2907 unsigned short int msgseg;
2910 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2911 struct msginfo *host_msginfo)
2913 struct target_msginfo *target_msginfo;
2914 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2915 return -TARGET_EFAULT;
2916 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2917 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2918 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2919 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2920 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2921 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2922 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2923 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2924 unlock_user_struct(target_msginfo, target_addr, 1);
2925 return 0;
2928 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2930 struct msqid_ds dsarg;
2931 struct msginfo msginfo;
2932 abi_long ret = -TARGET_EINVAL;
2934 cmd &= 0xff;
2936 switch (cmd) {
2937 case IPC_STAT:
2938 case IPC_SET:
2939 case MSG_STAT:
2940 if (target_to_host_msqid_ds(&dsarg,ptr))
2941 return -TARGET_EFAULT;
2942 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2943 if (host_to_target_msqid_ds(ptr,&dsarg))
2944 return -TARGET_EFAULT;
2945 break;
2946 case IPC_RMID:
2947 ret = get_errno(msgctl(msgid, cmd, NULL));
2948 break;
2949 case IPC_INFO:
2950 case MSG_INFO:
2951 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2952 if (host_to_target_msginfo(ptr, &msginfo))
2953 return -TARGET_EFAULT;
2954 break;
2957 return ret;
2960 struct target_msgbuf {
2961 abi_long mtype;
2962 char mtext[1];
2965 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2966 ssize_t msgsz, int msgflg)
2968 struct target_msgbuf *target_mb;
2969 struct msgbuf *host_mb;
2970 abi_long ret = 0;
2972 if (msgsz < 0) {
2973 return -TARGET_EINVAL;
2976 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2977 return -TARGET_EFAULT;
2978 host_mb = malloc(msgsz+sizeof(long));
2979 if (!host_mb) {
2980 unlock_user_struct(target_mb, msgp, 0);
2981 return -TARGET_ENOMEM;
2983 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2984 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2985 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2986 free(host_mb);
2987 unlock_user_struct(target_mb, msgp, 0);
2989 return ret;
2992 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2993 unsigned int msgsz, abi_long msgtyp,
2994 int msgflg)
2996 struct target_msgbuf *target_mb;
2997 char *target_mtext;
2998 struct msgbuf *host_mb;
2999 abi_long ret = 0;
3001 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3002 return -TARGET_EFAULT;
3004 host_mb = g_malloc(msgsz+sizeof(long));
3005 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3007 if (ret > 0) {
3008 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3009 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3010 if (!target_mtext) {
3011 ret = -TARGET_EFAULT;
3012 goto end;
3014 memcpy(target_mb->mtext, host_mb->mtext, ret);
3015 unlock_user(target_mtext, target_mtext_addr, ret);
3018 target_mb->mtype = tswapal(host_mb->mtype);
3020 end:
3021 if (target_mb)
3022 unlock_user_struct(target_mb, msgp, 1);
3023 g_free(host_mb);
3024 return ret;
3027 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3028 abi_ulong target_addr)
3030 struct target_shmid_ds *target_sd;
3032 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3033 return -TARGET_EFAULT;
3034 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3035 return -TARGET_EFAULT;
3036 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3037 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3038 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3039 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3040 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3041 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3042 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3043 unlock_user_struct(target_sd, target_addr, 0);
3044 return 0;
3047 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3048 struct shmid_ds *host_sd)
3050 struct target_shmid_ds *target_sd;
3052 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3053 return -TARGET_EFAULT;
3054 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3055 return -TARGET_EFAULT;
3056 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3057 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3058 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3059 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3060 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3061 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3062 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3063 unlock_user_struct(target_sd, target_addr, 1);
3064 return 0;
3067 struct target_shminfo {
3068 abi_ulong shmmax;
3069 abi_ulong shmmin;
3070 abi_ulong shmmni;
3071 abi_ulong shmseg;
3072 abi_ulong shmall;
3075 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3076 struct shminfo *host_shminfo)
3078 struct target_shminfo *target_shminfo;
3079 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3080 return -TARGET_EFAULT;
3081 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3082 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3083 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3084 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3085 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3086 unlock_user_struct(target_shminfo, target_addr, 1);
3087 return 0;
3090 struct target_shm_info {
3091 int used_ids;
3092 abi_ulong shm_tot;
3093 abi_ulong shm_rss;
3094 abi_ulong shm_swp;
3095 abi_ulong swap_attempts;
3096 abi_ulong swap_successes;
3099 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3100 struct shm_info *host_shm_info)
3102 struct target_shm_info *target_shm_info;
3103 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3104 return -TARGET_EFAULT;
3105 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3106 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3107 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3108 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3109 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3110 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3111 unlock_user_struct(target_shm_info, target_addr, 1);
3112 return 0;
3115 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3117 struct shmid_ds dsarg;
3118 struct shminfo shminfo;
3119 struct shm_info shm_info;
3120 abi_long ret = -TARGET_EINVAL;
3122 cmd &= 0xff;
3124 switch(cmd) {
3125 case IPC_STAT:
3126 case IPC_SET:
3127 case SHM_STAT:
3128 if (target_to_host_shmid_ds(&dsarg, buf))
3129 return -TARGET_EFAULT;
3130 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3131 if (host_to_target_shmid_ds(buf, &dsarg))
3132 return -TARGET_EFAULT;
3133 break;
3134 case IPC_INFO:
3135 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3136 if (host_to_target_shminfo(buf, &shminfo))
3137 return -TARGET_EFAULT;
3138 break;
3139 case SHM_INFO:
3140 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3141 if (host_to_target_shm_info(buf, &shm_info))
3142 return -TARGET_EFAULT;
3143 break;
3144 case IPC_RMID:
3145 case SHM_LOCK:
3146 case SHM_UNLOCK:
3147 ret = get_errno(shmctl(shmid, cmd, NULL));
3148 break;
3151 return ret;
3154 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3156 abi_long raddr;
3157 void *host_raddr;
3158 struct shmid_ds shm_info;
3159 int i,ret;
3161 /* find out the length of the shared memory segment */
3162 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3163 if (is_error(ret)) {
3164 /* can't get length, bail out */
3165 return ret;
3168 mmap_lock();
3170 if (shmaddr)
3171 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3172 else {
3173 abi_ulong mmap_start;
3175 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3177 if (mmap_start == -1) {
3178 errno = ENOMEM;
3179 host_raddr = (void *)-1;
3180 } else
3181 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3184 if (host_raddr == (void *)-1) {
3185 mmap_unlock();
3186 return get_errno((long)host_raddr);
3188 raddr=h2g((unsigned long)host_raddr);
3190 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3191 PAGE_VALID | PAGE_READ |
3192 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3194 for (i = 0; i < N_SHM_REGIONS; i++) {
3195 if (shm_regions[i].start == 0) {
3196 shm_regions[i].start = raddr;
3197 shm_regions[i].size = shm_info.shm_segsz;
3198 break;
3202 mmap_unlock();
3203 return raddr;
3207 static inline abi_long do_shmdt(abi_ulong shmaddr)
3209 int i;
3211 for (i = 0; i < N_SHM_REGIONS; ++i) {
3212 if (shm_regions[i].start == shmaddr) {
3213 shm_regions[i].start = 0;
3214 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3215 break;
3219 return get_errno(shmdt(g2h(shmaddr)));
3222 #ifdef TARGET_NR_ipc
3223 /* ??? This only works with linear mappings. */
3224 /* do_ipc() must return target values and target errnos. */
3225 static abi_long do_ipc(unsigned int call, abi_long first,
3226 abi_long second, abi_long third,
3227 abi_long ptr, abi_long fifth)
3229 int version;
3230 abi_long ret = 0;
3232 version = call >> 16;
3233 call &= 0xffff;
3235 switch (call) {
3236 case IPCOP_semop:
3237 ret = do_semop(first, ptr, second);
3238 break;
3240 case IPCOP_semget:
3241 ret = get_errno(semget(first, second, third));
3242 break;
3244 case IPCOP_semctl: {
3245 /* The semun argument to semctl is passed by value, so dereference the
3246 * ptr argument. */
3247 abi_ulong atptr;
3248 get_user_ual(atptr, ptr);
3249 ret = do_semctl(first, second, third,
3250 (union target_semun) atptr);
3251 break;
3254 case IPCOP_msgget:
3255 ret = get_errno(msgget(first, second));
3256 break;
3258 case IPCOP_msgsnd:
3259 ret = do_msgsnd(first, ptr, second, third);
3260 break;
3262 case IPCOP_msgctl:
3263 ret = do_msgctl(first, second, ptr);
3264 break;
3266 case IPCOP_msgrcv:
3267 switch (version) {
3268 case 0:
3270 struct target_ipc_kludge {
3271 abi_long msgp;
3272 abi_long msgtyp;
3273 } *tmp;
3275 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3276 ret = -TARGET_EFAULT;
3277 break;
3280 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3282 unlock_user_struct(tmp, ptr, 0);
3283 break;
3285 default:
3286 ret = do_msgrcv(first, ptr, second, fifth, third);
3288 break;
3290 case IPCOP_shmat:
3291 switch (version) {
3292 default:
3294 abi_ulong raddr;
3295 raddr = do_shmat(first, ptr, second);
3296 if (is_error(raddr))
3297 return get_errno(raddr);
3298 if (put_user_ual(raddr, third))
3299 return -TARGET_EFAULT;
3300 break;
3302 case 1:
3303 ret = -TARGET_EINVAL;
3304 break;
3306 break;
3307 case IPCOP_shmdt:
3308 ret = do_shmdt(ptr);
3309 break;
3311 case IPCOP_shmget:
3312 /* IPC_* flag values are the same on all linux platforms */
3313 ret = get_errno(shmget(first, second, third));
3314 break;
3316 /* IPC_* and SHM_* command values are the same on all linux platforms */
3317 case IPCOP_shmctl:
3318 ret = do_shmctl(first, second, ptr);
3319 break;
3320 default:
3321 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3322 ret = -TARGET_ENOSYS;
3323 break;
3325 return ret;
3327 #endif
3329 /* kernel structure types definitions */
3331 #define STRUCT(name, ...) STRUCT_ ## name,
3332 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3333 enum {
3334 #include "syscall_types.h"
3335 STRUCT_MAX
3337 #undef STRUCT
3338 #undef STRUCT_SPECIAL
3340 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3341 #define STRUCT_SPECIAL(name)
3342 #include "syscall_types.h"
3343 #undef STRUCT
3344 #undef STRUCT_SPECIAL
3346 typedef struct IOCTLEntry IOCTLEntry;
3348 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3349 int fd, int cmd, abi_long arg);
3351 struct IOCTLEntry {
3352 int target_cmd;
3353 unsigned int host_cmd;
3354 const char *name;
3355 int access;
3356 do_ioctl_fn *do_ioctl;
3357 const argtype arg_type[5];
3360 #define IOC_R 0x0001
3361 #define IOC_W 0x0002
3362 #define IOC_RW (IOC_R | IOC_W)
3364 #define MAX_STRUCT_SIZE 4096
3366 #ifdef CONFIG_FIEMAP
3367 /* So fiemap access checks don't overflow on 32 bit systems.
3368 * This is very slightly smaller than the limit imposed by
3369 * the underlying kernel.
3371 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3372 / sizeof(struct fiemap_extent))
3374 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3375 int fd, int cmd, abi_long arg)
3377 /* The parameter for this ioctl is a struct fiemap followed
3378 * by an array of struct fiemap_extent whose size is set
3379 * in fiemap->fm_extent_count. The array is filled in by the
3380 * ioctl.
3382 int target_size_in, target_size_out;
3383 struct fiemap *fm;
3384 const argtype *arg_type = ie->arg_type;
3385 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3386 void *argptr, *p;
3387 abi_long ret;
3388 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3389 uint32_t outbufsz;
3390 int free_fm = 0;
3392 assert(arg_type[0] == TYPE_PTR);
3393 assert(ie->access == IOC_RW);
3394 arg_type++;
3395 target_size_in = thunk_type_size(arg_type, 0);
3396 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3397 if (!argptr) {
3398 return -TARGET_EFAULT;
3400 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3401 unlock_user(argptr, arg, 0);
3402 fm = (struct fiemap *)buf_temp;
3403 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3404 return -TARGET_EINVAL;
3407 outbufsz = sizeof (*fm) +
3408 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3410 if (outbufsz > MAX_STRUCT_SIZE) {
3411 /* We can't fit all the extents into the fixed size buffer.
3412 * Allocate one that is large enough and use it instead.
3414 fm = malloc(outbufsz);
3415 if (!fm) {
3416 return -TARGET_ENOMEM;
3418 memcpy(fm, buf_temp, sizeof(struct fiemap));
3419 free_fm = 1;
3421 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3422 if (!is_error(ret)) {
3423 target_size_out = target_size_in;
3424 /* An extent_count of 0 means we were only counting the extents
3425 * so there are no structs to copy
3427 if (fm->fm_extent_count != 0) {
3428 target_size_out += fm->fm_mapped_extents * extent_size;
3430 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3431 if (!argptr) {
3432 ret = -TARGET_EFAULT;
3433 } else {
3434 /* Convert the struct fiemap */
3435 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3436 if (fm->fm_extent_count != 0) {
3437 p = argptr + target_size_in;
3438 /* ...and then all the struct fiemap_extents */
3439 for (i = 0; i < fm->fm_mapped_extents; i++) {
3440 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3441 THUNK_TARGET);
3442 p += extent_size;
3445 unlock_user(argptr, arg, target_size_out);
3448 if (free_fm) {
3449 free(fm);
3451 return ret;
3453 #endif
3455 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3456 int fd, int cmd, abi_long arg)
3458 const argtype *arg_type = ie->arg_type;
3459 int target_size;
3460 void *argptr;
3461 int ret;
3462 struct ifconf *host_ifconf;
3463 uint32_t outbufsz;
3464 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3465 int target_ifreq_size;
3466 int nb_ifreq;
3467 int free_buf = 0;
3468 int i;
3469 int target_ifc_len;
3470 abi_long target_ifc_buf;
3471 int host_ifc_len;
3472 char *host_ifc_buf;
3474 assert(arg_type[0] == TYPE_PTR);
3475 assert(ie->access == IOC_RW);
3477 arg_type++;
3478 target_size = thunk_type_size(arg_type, 0);
3480 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3481 if (!argptr)
3482 return -TARGET_EFAULT;
3483 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3484 unlock_user(argptr, arg, 0);
3486 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3487 target_ifc_len = host_ifconf->ifc_len;
3488 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3490 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3491 nb_ifreq = target_ifc_len / target_ifreq_size;
3492 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3494 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3495 if (outbufsz > MAX_STRUCT_SIZE) {
3496 /* We can't fit all the extents into the fixed size buffer.
3497 * Allocate one that is large enough and use it instead.
3499 host_ifconf = malloc(outbufsz);
3500 if (!host_ifconf) {
3501 return -TARGET_ENOMEM;
3503 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3504 free_buf = 1;
3506 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3508 host_ifconf->ifc_len = host_ifc_len;
3509 host_ifconf->ifc_buf = host_ifc_buf;
3511 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3512 if (!is_error(ret)) {
3513 /* convert host ifc_len to target ifc_len */
3515 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3516 target_ifc_len = nb_ifreq * target_ifreq_size;
3517 host_ifconf->ifc_len = target_ifc_len;
3519 /* restore target ifc_buf */
3521 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3523 /* copy struct ifconf to target user */
3525 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3526 if (!argptr)
3527 return -TARGET_EFAULT;
3528 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3529 unlock_user(argptr, arg, target_size);
3531 /* copy ifreq[] to target user */
3533 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3534 for (i = 0; i < nb_ifreq ; i++) {
3535 thunk_convert(argptr + i * target_ifreq_size,
3536 host_ifc_buf + i * sizeof(struct ifreq),
3537 ifreq_arg_type, THUNK_TARGET);
3539 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3542 if (free_buf) {
3543 free(host_ifconf);
3546 return ret;
3549 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3550 int cmd, abi_long arg)
3552 void *argptr;
3553 struct dm_ioctl *host_dm;
3554 abi_long guest_data;
3555 uint32_t guest_data_size;
3556 int target_size;
3557 const argtype *arg_type = ie->arg_type;
3558 abi_long ret;
3559 void *big_buf = NULL;
3560 char *host_data;
3562 arg_type++;
3563 target_size = thunk_type_size(arg_type, 0);
3564 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3565 if (!argptr) {
3566 ret = -TARGET_EFAULT;
3567 goto out;
3569 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3570 unlock_user(argptr, arg, 0);
3572 /* buf_temp is too small, so fetch things into a bigger buffer */
3573 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3574 memcpy(big_buf, buf_temp, target_size);
3575 buf_temp = big_buf;
3576 host_dm = big_buf;
3578 guest_data = arg + host_dm->data_start;
3579 if ((guest_data - arg) < 0) {
3580 ret = -EINVAL;
3581 goto out;
3583 guest_data_size = host_dm->data_size - host_dm->data_start;
3584 host_data = (char*)host_dm + host_dm->data_start;
3586 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3587 switch (ie->host_cmd) {
3588 case DM_REMOVE_ALL:
3589 case DM_LIST_DEVICES:
3590 case DM_DEV_CREATE:
3591 case DM_DEV_REMOVE:
3592 case DM_DEV_SUSPEND:
3593 case DM_DEV_STATUS:
3594 case DM_DEV_WAIT:
3595 case DM_TABLE_STATUS:
3596 case DM_TABLE_CLEAR:
3597 case DM_TABLE_DEPS:
3598 case DM_LIST_VERSIONS:
3599 /* no input data */
3600 break;
3601 case DM_DEV_RENAME:
3602 case DM_DEV_SET_GEOMETRY:
3603 /* data contains only strings */
3604 memcpy(host_data, argptr, guest_data_size);
3605 break;
3606 case DM_TARGET_MSG:
3607 memcpy(host_data, argptr, guest_data_size);
3608 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3609 break;
3610 case DM_TABLE_LOAD:
3612 void *gspec = argptr;
3613 void *cur_data = host_data;
3614 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3615 int spec_size = thunk_type_size(arg_type, 0);
3616 int i;
3618 for (i = 0; i < host_dm->target_count; i++) {
3619 struct dm_target_spec *spec = cur_data;
3620 uint32_t next;
3621 int slen;
3623 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3624 slen = strlen((char*)gspec + spec_size) + 1;
3625 next = spec->next;
3626 spec->next = sizeof(*spec) + slen;
3627 strcpy((char*)&spec[1], gspec + spec_size);
3628 gspec += next;
3629 cur_data += spec->next;
3631 break;
3633 default:
3634 ret = -TARGET_EINVAL;
3635 unlock_user(argptr, guest_data, 0);
3636 goto out;
3638 unlock_user(argptr, guest_data, 0);
3640 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3641 if (!is_error(ret)) {
3642 guest_data = arg + host_dm->data_start;
3643 guest_data_size = host_dm->data_size - host_dm->data_start;
3644 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3645 switch (ie->host_cmd) {
3646 case DM_REMOVE_ALL:
3647 case DM_DEV_CREATE:
3648 case DM_DEV_REMOVE:
3649 case DM_DEV_RENAME:
3650 case DM_DEV_SUSPEND:
3651 case DM_DEV_STATUS:
3652 case DM_TABLE_LOAD:
3653 case DM_TABLE_CLEAR:
3654 case DM_TARGET_MSG:
3655 case DM_DEV_SET_GEOMETRY:
3656 /* no return data */
3657 break;
3658 case DM_LIST_DEVICES:
3660 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3661 uint32_t remaining_data = guest_data_size;
3662 void *cur_data = argptr;
3663 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3664 int nl_size = 12; /* can't use thunk_size due to alignment */
3666 while (1) {
3667 uint32_t next = nl->next;
3668 if (next) {
3669 nl->next = nl_size + (strlen(nl->name) + 1);
3671 if (remaining_data < nl->next) {
3672 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3673 break;
3675 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3676 strcpy(cur_data + nl_size, nl->name);
3677 cur_data += nl->next;
3678 remaining_data -= nl->next;
3679 if (!next) {
3680 break;
3682 nl = (void*)nl + next;
3684 break;
3686 case DM_DEV_WAIT:
3687 case DM_TABLE_STATUS:
3689 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3690 void *cur_data = argptr;
3691 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3692 int spec_size = thunk_type_size(arg_type, 0);
3693 int i;
3695 for (i = 0; i < host_dm->target_count; i++) {
3696 uint32_t next = spec->next;
3697 int slen = strlen((char*)&spec[1]) + 1;
3698 spec->next = (cur_data - argptr) + spec_size + slen;
3699 if (guest_data_size < spec->next) {
3700 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3701 break;
3703 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3704 strcpy(cur_data + spec_size, (char*)&spec[1]);
3705 cur_data = argptr + spec->next;
3706 spec = (void*)host_dm + host_dm->data_start + next;
3708 break;
3710 case DM_TABLE_DEPS:
3712 void *hdata = (void*)host_dm + host_dm->data_start;
3713 int count = *(uint32_t*)hdata;
3714 uint64_t *hdev = hdata + 8;
3715 uint64_t *gdev = argptr + 8;
3716 int i;
3718 *(uint32_t*)argptr = tswap32(count);
3719 for (i = 0; i < count; i++) {
3720 *gdev = tswap64(*hdev);
3721 gdev++;
3722 hdev++;
3724 break;
3726 case DM_LIST_VERSIONS:
3728 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3729 uint32_t remaining_data = guest_data_size;
3730 void *cur_data = argptr;
3731 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3732 int vers_size = thunk_type_size(arg_type, 0);
3734 while (1) {
3735 uint32_t next = vers->next;
3736 if (next) {
3737 vers->next = vers_size + (strlen(vers->name) + 1);
3739 if (remaining_data < vers->next) {
3740 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3741 break;
3743 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3744 strcpy(cur_data + vers_size, vers->name);
3745 cur_data += vers->next;
3746 remaining_data -= vers->next;
3747 if (!next) {
3748 break;
3750 vers = (void*)vers + next;
3752 break;
3754 default:
3755 unlock_user(argptr, guest_data, 0);
3756 ret = -TARGET_EINVAL;
3757 goto out;
3759 unlock_user(argptr, guest_data, guest_data_size);
3761 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3762 if (!argptr) {
3763 ret = -TARGET_EFAULT;
3764 goto out;
3766 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3767 unlock_user(argptr, arg, target_size);
3769 out:
3770 g_free(big_buf);
3771 return ret;
3774 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3775 int cmd, abi_long arg)
3777 void *argptr;
3778 int target_size;
3779 const argtype *arg_type = ie->arg_type;
3780 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3781 abi_long ret;
3783 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3784 struct blkpg_partition host_part;
3786 /* Read and convert blkpg */
3787 arg_type++;
3788 target_size = thunk_type_size(arg_type, 0);
3789 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3790 if (!argptr) {
3791 ret = -TARGET_EFAULT;
3792 goto out;
3794 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3795 unlock_user(argptr, arg, 0);
3797 switch (host_blkpg->op) {
3798 case BLKPG_ADD_PARTITION:
3799 case BLKPG_DEL_PARTITION:
3800 /* payload is struct blkpg_partition */
3801 break;
3802 default:
3803 /* Unknown opcode */
3804 ret = -TARGET_EINVAL;
3805 goto out;
3808 /* Read and convert blkpg->data */
3809 arg = (abi_long)(uintptr_t)host_blkpg->data;
3810 target_size = thunk_type_size(part_arg_type, 0);
3811 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3812 if (!argptr) {
3813 ret = -TARGET_EFAULT;
3814 goto out;
3816 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3817 unlock_user(argptr, arg, 0);
3819 /* Swizzle the data pointer to our local copy and call! */
3820 host_blkpg->data = &host_part;
3821 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3823 out:
3824 return ret;
3827 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3828 int fd, int cmd, abi_long arg)
3830 const argtype *arg_type = ie->arg_type;
3831 const StructEntry *se;
3832 const argtype *field_types;
3833 const int *dst_offsets, *src_offsets;
3834 int target_size;
3835 void *argptr;
3836 abi_ulong *target_rt_dev_ptr;
3837 unsigned long *host_rt_dev_ptr;
3838 abi_long ret;
3839 int i;
3841 assert(ie->access == IOC_W);
3842 assert(*arg_type == TYPE_PTR);
3843 arg_type++;
3844 assert(*arg_type == TYPE_STRUCT);
3845 target_size = thunk_type_size(arg_type, 0);
3846 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3847 if (!argptr) {
3848 return -TARGET_EFAULT;
3850 arg_type++;
3851 assert(*arg_type == (int)STRUCT_rtentry);
3852 se = struct_entries + *arg_type++;
3853 assert(se->convert[0] == NULL);
3854 /* convert struct here to be able to catch rt_dev string */
3855 field_types = se->field_types;
3856 dst_offsets = se->field_offsets[THUNK_HOST];
3857 src_offsets = se->field_offsets[THUNK_TARGET];
3858 for (i = 0; i < se->nb_fields; i++) {
3859 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3860 assert(*field_types == TYPE_PTRVOID);
3861 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3862 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3863 if (*target_rt_dev_ptr != 0) {
3864 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3865 tswapal(*target_rt_dev_ptr));
3866 if (!*host_rt_dev_ptr) {
3867 unlock_user(argptr, arg, 0);
3868 return -TARGET_EFAULT;
3870 } else {
3871 *host_rt_dev_ptr = 0;
3873 field_types++;
3874 continue;
3876 field_types = thunk_convert(buf_temp + dst_offsets[i],
3877 argptr + src_offsets[i],
3878 field_types, THUNK_HOST);
3880 unlock_user(argptr, arg, 0);
3882 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3883 if (*host_rt_dev_ptr != 0) {
3884 unlock_user((void *)*host_rt_dev_ptr,
3885 *target_rt_dev_ptr, 0);
3887 return ret;
3890 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3891 int fd, int cmd, abi_long arg)
3893 int sig = target_to_host_signal(arg);
3894 return get_errno(ioctl(fd, ie->host_cmd, sig));
3897 static IOCTLEntry ioctl_entries[] = {
3898 #define IOCTL(cmd, access, ...) \
3899 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3900 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3901 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3902 #include "ioctls.h"
3903 { 0, 0, },
3906 /* ??? Implement proper locking for ioctls. */
3907 /* do_ioctl() Must return target values and target errnos. */
3908 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
3910 const IOCTLEntry *ie;
3911 const argtype *arg_type;
3912 abi_long ret;
3913 uint8_t buf_temp[MAX_STRUCT_SIZE];
3914 int target_size;
3915 void *argptr;
3917 ie = ioctl_entries;
3918 for(;;) {
3919 if (ie->target_cmd == 0) {
3920 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3921 return -TARGET_ENOSYS;
3923 if (ie->target_cmd == cmd)
3924 break;
3925 ie++;
3927 arg_type = ie->arg_type;
3928 #if defined(DEBUG)
3929 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3930 #endif
3931 if (ie->do_ioctl) {
3932 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3935 switch(arg_type[0]) {
3936 case TYPE_NULL:
3937 /* no argument */
3938 ret = get_errno(ioctl(fd, ie->host_cmd));
3939 break;
3940 case TYPE_PTRVOID:
3941 case TYPE_INT:
3942 /* int argment */
3943 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3944 break;
3945 case TYPE_PTR:
3946 arg_type++;
3947 target_size = thunk_type_size(arg_type, 0);
3948 switch(ie->access) {
3949 case IOC_R:
3950 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3951 if (!is_error(ret)) {
3952 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3953 if (!argptr)
3954 return -TARGET_EFAULT;
3955 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3956 unlock_user(argptr, arg, target_size);
3958 break;
3959 case IOC_W:
3960 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3961 if (!argptr)
3962 return -TARGET_EFAULT;
3963 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3964 unlock_user(argptr, arg, 0);
3965 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3966 break;
3967 default:
3968 case IOC_RW:
3969 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3970 if (!argptr)
3971 return -TARGET_EFAULT;
3972 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3973 unlock_user(argptr, arg, 0);
3974 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3975 if (!is_error(ret)) {
3976 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3977 if (!argptr)
3978 return -TARGET_EFAULT;
3979 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3980 unlock_user(argptr, arg, target_size);
3982 break;
3984 break;
3985 default:
3986 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3987 (long)cmd, arg_type[0]);
3988 ret = -TARGET_ENOSYS;
3989 break;
3991 return ret;
3994 static const bitmask_transtbl iflag_tbl[] = {
3995 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3996 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3997 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3998 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3999 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4000 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4001 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4002 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4003 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4004 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4005 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4006 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4007 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4008 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4009 { 0, 0, 0, 0 }
4012 static const bitmask_transtbl oflag_tbl[] = {
4013 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4014 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4015 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4016 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4017 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4018 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4019 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4020 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4021 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4022 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4023 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4024 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4025 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4026 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4027 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4028 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4029 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4030 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4031 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4032 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4033 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4034 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4035 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4036 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4037 { 0, 0, 0, 0 }
4040 static const bitmask_transtbl cflag_tbl[] = {
4041 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4042 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4043 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4044 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4045 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4046 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4047 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4048 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4049 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4050 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4051 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4052 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4053 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4054 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4055 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4056 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4057 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4058 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4059 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4060 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4061 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4062 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4063 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4064 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4065 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4066 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4067 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4068 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4069 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4070 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4071 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4072 { 0, 0, 0, 0 }
4075 static const bitmask_transtbl lflag_tbl[] = {
4076 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4077 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4078 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4079 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4080 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4081 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4082 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4083 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4084 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4085 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4086 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4087 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4088 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4089 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4090 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4091 { 0, 0, 0, 0 }
4094 static void target_to_host_termios (void *dst, const void *src)
4096 struct host_termios *host = dst;
4097 const struct target_termios *target = src;
4099 host->c_iflag =
4100 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4101 host->c_oflag =
4102 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4103 host->c_cflag =
4104 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4105 host->c_lflag =
4106 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4107 host->c_line = target->c_line;
4109 memset(host->c_cc, 0, sizeof(host->c_cc));
4110 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4111 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4112 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4113 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4114 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4115 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4116 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4117 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4118 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4119 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4120 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4121 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4122 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4123 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4124 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4125 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4126 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4129 static void host_to_target_termios (void *dst, const void *src)
4131 struct target_termios *target = dst;
4132 const struct host_termios *host = src;
4134 target->c_iflag =
4135 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4136 target->c_oflag =
4137 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4138 target->c_cflag =
4139 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4140 target->c_lflag =
4141 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4142 target->c_line = host->c_line;
4144 memset(target->c_cc, 0, sizeof(target->c_cc));
4145 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4146 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4147 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4148 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4149 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4150 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4151 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4152 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4153 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4154 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4155 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4156 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4157 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4158 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4159 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4160 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4161 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4164 static const StructEntry struct_termios_def = {
4165 .convert = { host_to_target_termios, target_to_host_termios },
4166 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4167 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4170 static bitmask_transtbl mmap_flags_tbl[] = {
4171 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4172 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4173 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4174 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4175 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4176 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4177 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4178 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4179 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4180 MAP_NORESERVE },
4181 { 0, 0, 0, 0 }
4184 #if defined(TARGET_I386)
4186 /* NOTE: there is really one LDT for all the threads */
4187 static uint8_t *ldt_table;
4189 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4191 int size;
4192 void *p;
4194 if (!ldt_table)
4195 return 0;
4196 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4197 if (size > bytecount)
4198 size = bytecount;
4199 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4200 if (!p)
4201 return -TARGET_EFAULT;
4202 /* ??? Should this by byteswapped? */
4203 memcpy(p, ldt_table, size);
4204 unlock_user(p, ptr, size);
4205 return size;
4208 /* XXX: add locking support */
4209 static abi_long write_ldt(CPUX86State *env,
4210 abi_ulong ptr, unsigned long bytecount, int oldmode)
4212 struct target_modify_ldt_ldt_s ldt_info;
4213 struct target_modify_ldt_ldt_s *target_ldt_info;
4214 int seg_32bit, contents, read_exec_only, limit_in_pages;
4215 int seg_not_present, useable, lm;
4216 uint32_t *lp, entry_1, entry_2;
4218 if (bytecount != sizeof(ldt_info))
4219 return -TARGET_EINVAL;
4220 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4221 return -TARGET_EFAULT;
4222 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4223 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4224 ldt_info.limit = tswap32(target_ldt_info->limit);
4225 ldt_info.flags = tswap32(target_ldt_info->flags);
4226 unlock_user_struct(target_ldt_info, ptr, 0);
4228 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4229 return -TARGET_EINVAL;
4230 seg_32bit = ldt_info.flags & 1;
4231 contents = (ldt_info.flags >> 1) & 3;
4232 read_exec_only = (ldt_info.flags >> 3) & 1;
4233 limit_in_pages = (ldt_info.flags >> 4) & 1;
4234 seg_not_present = (ldt_info.flags >> 5) & 1;
4235 useable = (ldt_info.flags >> 6) & 1;
4236 #ifdef TARGET_ABI32
4237 lm = 0;
4238 #else
4239 lm = (ldt_info.flags >> 7) & 1;
4240 #endif
4241 if (contents == 3) {
4242 if (oldmode)
4243 return -TARGET_EINVAL;
4244 if (seg_not_present == 0)
4245 return -TARGET_EINVAL;
4247 /* allocate the LDT */
4248 if (!ldt_table) {
4249 env->ldt.base = target_mmap(0,
4250 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4251 PROT_READ|PROT_WRITE,
4252 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4253 if (env->ldt.base == -1)
4254 return -TARGET_ENOMEM;
4255 memset(g2h(env->ldt.base), 0,
4256 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4257 env->ldt.limit = 0xffff;
4258 ldt_table = g2h(env->ldt.base);
4261 /* NOTE: same code as Linux kernel */
4262 /* Allow LDTs to be cleared by the user. */
4263 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4264 if (oldmode ||
4265 (contents == 0 &&
4266 read_exec_only == 1 &&
4267 seg_32bit == 0 &&
4268 limit_in_pages == 0 &&
4269 seg_not_present == 1 &&
4270 useable == 0 )) {
4271 entry_1 = 0;
4272 entry_2 = 0;
4273 goto install;
4277 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4278 (ldt_info.limit & 0x0ffff);
4279 entry_2 = (ldt_info.base_addr & 0xff000000) |
4280 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4281 (ldt_info.limit & 0xf0000) |
4282 ((read_exec_only ^ 1) << 9) |
4283 (contents << 10) |
4284 ((seg_not_present ^ 1) << 15) |
4285 (seg_32bit << 22) |
4286 (limit_in_pages << 23) |
4287 (lm << 21) |
4288 0x7000;
4289 if (!oldmode)
4290 entry_2 |= (useable << 20);
4292 /* Install the new entry ... */
4293 install:
4294 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4295 lp[0] = tswap32(entry_1);
4296 lp[1] = tswap32(entry_2);
4297 return 0;
4300 /* specific and weird i386 syscalls */
4301 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4302 unsigned long bytecount)
4304 abi_long ret;
4306 switch (func) {
4307 case 0:
4308 ret = read_ldt(ptr, bytecount);
4309 break;
4310 case 1:
4311 ret = write_ldt(env, ptr, bytecount, 1);
4312 break;
4313 case 0x11:
4314 ret = write_ldt(env, ptr, bytecount, 0);
4315 break;
4316 default:
4317 ret = -TARGET_ENOSYS;
4318 break;
4320 return ret;
4323 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4324 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4326 uint64_t *gdt_table = g2h(env->gdt.base);
4327 struct target_modify_ldt_ldt_s ldt_info;
4328 struct target_modify_ldt_ldt_s *target_ldt_info;
4329 int seg_32bit, contents, read_exec_only, limit_in_pages;
4330 int seg_not_present, useable, lm;
4331 uint32_t *lp, entry_1, entry_2;
4332 int i;
4334 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4335 if (!target_ldt_info)
4336 return -TARGET_EFAULT;
4337 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4338 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4339 ldt_info.limit = tswap32(target_ldt_info->limit);
4340 ldt_info.flags = tswap32(target_ldt_info->flags);
4341 if (ldt_info.entry_number == -1) {
4342 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4343 if (gdt_table[i] == 0) {
4344 ldt_info.entry_number = i;
4345 target_ldt_info->entry_number = tswap32(i);
4346 break;
4350 unlock_user_struct(target_ldt_info, ptr, 1);
4352 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4353 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4354 return -TARGET_EINVAL;
4355 seg_32bit = ldt_info.flags & 1;
4356 contents = (ldt_info.flags >> 1) & 3;
4357 read_exec_only = (ldt_info.flags >> 3) & 1;
4358 limit_in_pages = (ldt_info.flags >> 4) & 1;
4359 seg_not_present = (ldt_info.flags >> 5) & 1;
4360 useable = (ldt_info.flags >> 6) & 1;
4361 #ifdef TARGET_ABI32
4362 lm = 0;
4363 #else
4364 lm = (ldt_info.flags >> 7) & 1;
4365 #endif
4367 if (contents == 3) {
4368 if (seg_not_present == 0)
4369 return -TARGET_EINVAL;
4372 /* NOTE: same code as Linux kernel */
4373 /* Allow LDTs to be cleared by the user. */
4374 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4375 if ((contents == 0 &&
4376 read_exec_only == 1 &&
4377 seg_32bit == 0 &&
4378 limit_in_pages == 0 &&
4379 seg_not_present == 1 &&
4380 useable == 0 )) {
4381 entry_1 = 0;
4382 entry_2 = 0;
4383 goto install;
4387 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4388 (ldt_info.limit & 0x0ffff);
4389 entry_2 = (ldt_info.base_addr & 0xff000000) |
4390 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4391 (ldt_info.limit & 0xf0000) |
4392 ((read_exec_only ^ 1) << 9) |
4393 (contents << 10) |
4394 ((seg_not_present ^ 1) << 15) |
4395 (seg_32bit << 22) |
4396 (limit_in_pages << 23) |
4397 (useable << 20) |
4398 (lm << 21) |
4399 0x7000;
4401 /* Install the new entry ... */
4402 install:
4403 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4404 lp[0] = tswap32(entry_1);
4405 lp[1] = tswap32(entry_2);
4406 return 0;
4409 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4411 struct target_modify_ldt_ldt_s *target_ldt_info;
4412 uint64_t *gdt_table = g2h(env->gdt.base);
4413 uint32_t base_addr, limit, flags;
4414 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4415 int seg_not_present, useable, lm;
4416 uint32_t *lp, entry_1, entry_2;
4418 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4419 if (!target_ldt_info)
4420 return -TARGET_EFAULT;
4421 idx = tswap32(target_ldt_info->entry_number);
4422 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4423 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4424 unlock_user_struct(target_ldt_info, ptr, 1);
4425 return -TARGET_EINVAL;
4427 lp = (uint32_t *)(gdt_table + idx);
4428 entry_1 = tswap32(lp[0]);
4429 entry_2 = tswap32(lp[1]);
4431 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4432 contents = (entry_2 >> 10) & 3;
4433 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4434 seg_32bit = (entry_2 >> 22) & 1;
4435 limit_in_pages = (entry_2 >> 23) & 1;
4436 useable = (entry_2 >> 20) & 1;
4437 #ifdef TARGET_ABI32
4438 lm = 0;
4439 #else
4440 lm = (entry_2 >> 21) & 1;
4441 #endif
4442 flags = (seg_32bit << 0) | (contents << 1) |
4443 (read_exec_only << 3) | (limit_in_pages << 4) |
4444 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4445 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4446 base_addr = (entry_1 >> 16) |
4447 (entry_2 & 0xff000000) |
4448 ((entry_2 & 0xff) << 16);
4449 target_ldt_info->base_addr = tswapal(base_addr);
4450 target_ldt_info->limit = tswap32(limit);
4451 target_ldt_info->flags = tswap32(flags);
4452 unlock_user_struct(target_ldt_info, ptr, 1);
4453 return 0;
4455 #endif /* TARGET_I386 && TARGET_ABI32 */
4457 #ifndef TARGET_ABI32
4458 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4460 abi_long ret = 0;
4461 abi_ulong val;
4462 int idx;
4464 switch(code) {
4465 case TARGET_ARCH_SET_GS:
4466 case TARGET_ARCH_SET_FS:
4467 if (code == TARGET_ARCH_SET_GS)
4468 idx = R_GS;
4469 else
4470 idx = R_FS;
4471 cpu_x86_load_seg(env, idx, 0);
4472 env->segs[idx].base = addr;
4473 break;
4474 case TARGET_ARCH_GET_GS:
4475 case TARGET_ARCH_GET_FS:
4476 if (code == TARGET_ARCH_GET_GS)
4477 idx = R_GS;
4478 else
4479 idx = R_FS;
4480 val = env->segs[idx].base;
4481 if (put_user(val, addr, abi_ulong))
4482 ret = -TARGET_EFAULT;
4483 break;
4484 default:
4485 ret = -TARGET_EINVAL;
4486 break;
4488 return ret;
4490 #endif
4492 #endif /* defined(TARGET_I386) */
4494 #define NEW_STACK_SIZE 0x40000
4497 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4498 typedef struct {
4499 CPUArchState *env;
4500 pthread_mutex_t mutex;
4501 pthread_cond_t cond;
4502 pthread_t thread;
4503 uint32_t tid;
4504 abi_ulong child_tidptr;
4505 abi_ulong parent_tidptr;
4506 sigset_t sigmask;
4507 } new_thread_info;
4509 static void *clone_func(void *arg)
4511 new_thread_info *info = arg;
4512 CPUArchState *env;
4513 CPUState *cpu;
4514 TaskState *ts;
4516 rcu_register_thread();
4517 env = info->env;
4518 cpu = ENV_GET_CPU(env);
4519 thread_cpu = cpu;
4520 ts = (TaskState *)cpu->opaque;
4521 info->tid = gettid();
4522 cpu->host_tid = info->tid;
4523 task_settid(ts);
4524 if (info->child_tidptr)
4525 put_user_u32(info->tid, info->child_tidptr);
4526 if (info->parent_tidptr)
4527 put_user_u32(info->tid, info->parent_tidptr);
4528 /* Enable signals. */
4529 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4530 /* Signal to the parent that we're ready. */
4531 pthread_mutex_lock(&info->mutex);
4532 pthread_cond_broadcast(&info->cond);
4533 pthread_mutex_unlock(&info->mutex);
4534 /* Wait until the parent has finshed initializing the tls state. */
4535 pthread_mutex_lock(&clone_lock);
4536 pthread_mutex_unlock(&clone_lock);
4537 cpu_loop(env);
4538 /* never exits */
4539 return NULL;
4542 /* do_fork() Must return host values and target errnos (unlike most
4543 do_*() functions). */
4544 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4545 abi_ulong parent_tidptr, target_ulong newtls,
4546 abi_ulong child_tidptr)
4548 CPUState *cpu = ENV_GET_CPU(env);
4549 int ret;
4550 TaskState *ts;
4551 CPUState *new_cpu;
4552 CPUArchState *new_env;
4553 unsigned int nptl_flags;
4554 sigset_t sigmask;
4556 /* Emulate vfork() with fork() */
4557 if (flags & CLONE_VFORK)
4558 flags &= ~(CLONE_VFORK | CLONE_VM);
4560 if (flags & CLONE_VM) {
4561 TaskState *parent_ts = (TaskState *)cpu->opaque;
4562 new_thread_info info;
4563 pthread_attr_t attr;
4565 ts = g_malloc0(sizeof(TaskState));
4566 init_task_state(ts);
4567 /* we create a new CPU instance. */
4568 new_env = cpu_copy(env);
4569 /* Init regs that differ from the parent. */
4570 cpu_clone_regs(new_env, newsp);
4571 new_cpu = ENV_GET_CPU(new_env);
4572 new_cpu->opaque = ts;
4573 ts->bprm = parent_ts->bprm;
4574 ts->info = parent_ts->info;
4575 nptl_flags = flags;
4576 flags &= ~CLONE_NPTL_FLAGS2;
4578 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4579 ts->child_tidptr = child_tidptr;
4582 if (nptl_flags & CLONE_SETTLS)
4583 cpu_set_tls (new_env, newtls);
4585 /* Grab a mutex so that thread setup appears atomic. */
4586 pthread_mutex_lock(&clone_lock);
4588 memset(&info, 0, sizeof(info));
4589 pthread_mutex_init(&info.mutex, NULL);
4590 pthread_mutex_lock(&info.mutex);
4591 pthread_cond_init(&info.cond, NULL);
4592 info.env = new_env;
4593 if (nptl_flags & CLONE_CHILD_SETTID)
4594 info.child_tidptr = child_tidptr;
4595 if (nptl_flags & CLONE_PARENT_SETTID)
4596 info.parent_tidptr = parent_tidptr;
4598 ret = pthread_attr_init(&attr);
4599 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4600 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4601 /* It is not safe to deliver signals until the child has finished
4602 initializing, so temporarily block all signals. */
4603 sigfillset(&sigmask);
4604 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4606 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4607 /* TODO: Free new CPU state if thread creation failed. */
4609 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4610 pthread_attr_destroy(&attr);
4611 if (ret == 0) {
4612 /* Wait for the child to initialize. */
4613 pthread_cond_wait(&info.cond, &info.mutex);
4614 ret = info.tid;
4615 if (flags & CLONE_PARENT_SETTID)
4616 put_user_u32(ret, parent_tidptr);
4617 } else {
4618 ret = -1;
4620 pthread_mutex_unlock(&info.mutex);
4621 pthread_cond_destroy(&info.cond);
4622 pthread_mutex_destroy(&info.mutex);
4623 pthread_mutex_unlock(&clone_lock);
4624 } else {
4625 /* if no CLONE_VM, we consider it is a fork */
4626 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4627 return -EINVAL;
4628 fork_start();
4629 ret = fork();
4630 if (ret == 0) {
4631 /* Child Process. */
4632 rcu_after_fork();
4633 cpu_clone_regs(env, newsp);
4634 fork_end(1);
4635 /* There is a race condition here. The parent process could
4636 theoretically read the TID in the child process before the child
4637 tid is set. This would require using either ptrace
4638 (not implemented) or having *_tidptr to point at a shared memory
4639 mapping. We can't repeat the spinlock hack used above because
4640 the child process gets its own copy of the lock. */
4641 if (flags & CLONE_CHILD_SETTID)
4642 put_user_u32(gettid(), child_tidptr);
4643 if (flags & CLONE_PARENT_SETTID)
4644 put_user_u32(gettid(), parent_tidptr);
4645 ts = (TaskState *)cpu->opaque;
4646 if (flags & CLONE_SETTLS)
4647 cpu_set_tls (env, newtls);
4648 if (flags & CLONE_CHILD_CLEARTID)
4649 ts->child_tidptr = child_tidptr;
4650 } else {
4651 fork_end(0);
4654 return ret;
4657 /* warning : doesn't handle linux specific flags... */
4658 static int target_to_host_fcntl_cmd(int cmd)
4660 switch(cmd) {
4661 case TARGET_F_DUPFD:
4662 case TARGET_F_GETFD:
4663 case TARGET_F_SETFD:
4664 case TARGET_F_GETFL:
4665 case TARGET_F_SETFL:
4666 return cmd;
4667 case TARGET_F_GETLK:
4668 return F_GETLK;
4669 case TARGET_F_SETLK:
4670 return F_SETLK;
4671 case TARGET_F_SETLKW:
4672 return F_SETLKW;
4673 case TARGET_F_GETOWN:
4674 return F_GETOWN;
4675 case TARGET_F_SETOWN:
4676 return F_SETOWN;
4677 case TARGET_F_GETSIG:
4678 return F_GETSIG;
4679 case TARGET_F_SETSIG:
4680 return F_SETSIG;
4681 #if TARGET_ABI_BITS == 32
4682 case TARGET_F_GETLK64:
4683 return F_GETLK64;
4684 case TARGET_F_SETLK64:
4685 return F_SETLK64;
4686 case TARGET_F_SETLKW64:
4687 return F_SETLKW64;
4688 #endif
4689 case TARGET_F_SETLEASE:
4690 return F_SETLEASE;
4691 case TARGET_F_GETLEASE:
4692 return F_GETLEASE;
4693 #ifdef F_DUPFD_CLOEXEC
4694 case TARGET_F_DUPFD_CLOEXEC:
4695 return F_DUPFD_CLOEXEC;
4696 #endif
4697 case TARGET_F_NOTIFY:
4698 return F_NOTIFY;
4699 #ifdef F_GETOWN_EX
4700 case TARGET_F_GETOWN_EX:
4701 return F_GETOWN_EX;
4702 #endif
4703 #ifdef F_SETOWN_EX
4704 case TARGET_F_SETOWN_EX:
4705 return F_SETOWN_EX;
4706 #endif
4707 default:
4708 return -TARGET_EINVAL;
4710 return -TARGET_EINVAL;
4713 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4714 static const bitmask_transtbl flock_tbl[] = {
4715 TRANSTBL_CONVERT(F_RDLCK),
4716 TRANSTBL_CONVERT(F_WRLCK),
4717 TRANSTBL_CONVERT(F_UNLCK),
4718 TRANSTBL_CONVERT(F_EXLCK),
4719 TRANSTBL_CONVERT(F_SHLCK),
4720 { 0, 0, 0, 0 }
4723 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4725 struct flock fl;
4726 struct target_flock *target_fl;
4727 struct flock64 fl64;
4728 struct target_flock64 *target_fl64;
4729 #ifdef F_GETOWN_EX
4730 struct f_owner_ex fox;
4731 struct target_f_owner_ex *target_fox;
4732 #endif
4733 abi_long ret;
4734 int host_cmd = target_to_host_fcntl_cmd(cmd);
4736 if (host_cmd == -TARGET_EINVAL)
4737 return host_cmd;
4739 switch(cmd) {
4740 case TARGET_F_GETLK:
4741 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4742 return -TARGET_EFAULT;
4743 fl.l_type =
4744 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4745 fl.l_whence = tswap16(target_fl->l_whence);
4746 fl.l_start = tswapal(target_fl->l_start);
4747 fl.l_len = tswapal(target_fl->l_len);
4748 fl.l_pid = tswap32(target_fl->l_pid);
4749 unlock_user_struct(target_fl, arg, 0);
4750 ret = get_errno(fcntl(fd, host_cmd, &fl));
4751 if (ret == 0) {
4752 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4753 return -TARGET_EFAULT;
4754 target_fl->l_type =
4755 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4756 target_fl->l_whence = tswap16(fl.l_whence);
4757 target_fl->l_start = tswapal(fl.l_start);
4758 target_fl->l_len = tswapal(fl.l_len);
4759 target_fl->l_pid = tswap32(fl.l_pid);
4760 unlock_user_struct(target_fl, arg, 1);
4762 break;
4764 case TARGET_F_SETLK:
4765 case TARGET_F_SETLKW:
4766 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4767 return -TARGET_EFAULT;
4768 fl.l_type =
4769 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4770 fl.l_whence = tswap16(target_fl->l_whence);
4771 fl.l_start = tswapal(target_fl->l_start);
4772 fl.l_len = tswapal(target_fl->l_len);
4773 fl.l_pid = tswap32(target_fl->l_pid);
4774 unlock_user_struct(target_fl, arg, 0);
4775 ret = get_errno(fcntl(fd, host_cmd, &fl));
4776 break;
4778 case TARGET_F_GETLK64:
4779 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4780 return -TARGET_EFAULT;
4781 fl64.l_type =
4782 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4783 fl64.l_whence = tswap16(target_fl64->l_whence);
4784 fl64.l_start = tswap64(target_fl64->l_start);
4785 fl64.l_len = tswap64(target_fl64->l_len);
4786 fl64.l_pid = tswap32(target_fl64->l_pid);
4787 unlock_user_struct(target_fl64, arg, 0);
4788 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4789 if (ret == 0) {
4790 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4791 return -TARGET_EFAULT;
4792 target_fl64->l_type =
4793 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4794 target_fl64->l_whence = tswap16(fl64.l_whence);
4795 target_fl64->l_start = tswap64(fl64.l_start);
4796 target_fl64->l_len = tswap64(fl64.l_len);
4797 target_fl64->l_pid = tswap32(fl64.l_pid);
4798 unlock_user_struct(target_fl64, arg, 1);
4800 break;
4801 case TARGET_F_SETLK64:
4802 case TARGET_F_SETLKW64:
4803 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4804 return -TARGET_EFAULT;
4805 fl64.l_type =
4806 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4807 fl64.l_whence = tswap16(target_fl64->l_whence);
4808 fl64.l_start = tswap64(target_fl64->l_start);
4809 fl64.l_len = tswap64(target_fl64->l_len);
4810 fl64.l_pid = tswap32(target_fl64->l_pid);
4811 unlock_user_struct(target_fl64, arg, 0);
4812 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4813 break;
4815 case TARGET_F_GETFL:
4816 ret = get_errno(fcntl(fd, host_cmd, arg));
4817 if (ret >= 0) {
4818 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4820 break;
4822 case TARGET_F_SETFL:
4823 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4824 break;
4826 #ifdef F_GETOWN_EX
4827 case TARGET_F_GETOWN_EX:
4828 ret = get_errno(fcntl(fd, host_cmd, &fox));
4829 if (ret >= 0) {
4830 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4831 return -TARGET_EFAULT;
4832 target_fox->type = tswap32(fox.type);
4833 target_fox->pid = tswap32(fox.pid);
4834 unlock_user_struct(target_fox, arg, 1);
4836 break;
4837 #endif
4839 #ifdef F_SETOWN_EX
4840 case TARGET_F_SETOWN_EX:
4841 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4842 return -TARGET_EFAULT;
4843 fox.type = tswap32(target_fox->type);
4844 fox.pid = tswap32(target_fox->pid);
4845 unlock_user_struct(target_fox, arg, 0);
4846 ret = get_errno(fcntl(fd, host_cmd, &fox));
4847 break;
4848 #endif
4850 case TARGET_F_SETOWN:
4851 case TARGET_F_GETOWN:
4852 case TARGET_F_SETSIG:
4853 case TARGET_F_GETSIG:
4854 case TARGET_F_SETLEASE:
4855 case TARGET_F_GETLEASE:
4856 ret = get_errno(fcntl(fd, host_cmd, arg));
4857 break;
4859 default:
4860 ret = get_errno(fcntl(fd, cmd, arg));
4861 break;
4863 return ret;
4866 #ifdef USE_UID16
4868 static inline int high2lowuid(int uid)
4870 if (uid > 65535)
4871 return 65534;
4872 else
4873 return uid;
4876 static inline int high2lowgid(int gid)
4878 if (gid > 65535)
4879 return 65534;
4880 else
4881 return gid;
4884 static inline int low2highuid(int uid)
4886 if ((int16_t)uid == -1)
4887 return -1;
4888 else
4889 return uid;
4892 static inline int low2highgid(int gid)
4894 if ((int16_t)gid == -1)
4895 return -1;
4896 else
4897 return gid;
4899 static inline int tswapid(int id)
4901 return tswap16(id);
4904 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4906 #else /* !USE_UID16 */
4907 static inline int high2lowuid(int uid)
4909 return uid;
4911 static inline int high2lowgid(int gid)
4913 return gid;
4915 static inline int low2highuid(int uid)
4917 return uid;
4919 static inline int low2highgid(int gid)
4921 return gid;
4923 static inline int tswapid(int id)
4925 return tswap32(id);
4928 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4930 #endif /* USE_UID16 */
4932 void syscall_init(void)
4934 IOCTLEntry *ie;
4935 const argtype *arg_type;
4936 int size;
4937 int i;
4939 thunk_init(STRUCT_MAX);
4941 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4942 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4943 #include "syscall_types.h"
4944 #undef STRUCT
4945 #undef STRUCT_SPECIAL
4947 /* Build target_to_host_errno_table[] table from
4948 * host_to_target_errno_table[]. */
4949 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4950 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4953 /* we patch the ioctl size if necessary. We rely on the fact that
4954 no ioctl has all the bits at '1' in the size field */
4955 ie = ioctl_entries;
4956 while (ie->target_cmd != 0) {
4957 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4958 TARGET_IOC_SIZEMASK) {
4959 arg_type = ie->arg_type;
4960 if (arg_type[0] != TYPE_PTR) {
4961 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4962 ie->target_cmd);
4963 exit(1);
4965 arg_type++;
4966 size = thunk_type_size(arg_type, 0);
4967 ie->target_cmd = (ie->target_cmd &
4968 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4969 (size << TARGET_IOC_SIZESHIFT);
4972 /* automatic consistency check if same arch */
4973 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4974 (defined(__x86_64__) && defined(TARGET_X86_64))
4975 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4976 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4977 ie->name, ie->target_cmd, ie->host_cmd);
4979 #endif
4980 ie++;
4984 #if TARGET_ABI_BITS == 32
4985 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4987 #ifdef TARGET_WORDS_BIGENDIAN
4988 return ((uint64_t)word0 << 32) | word1;
4989 #else
4990 return ((uint64_t)word1 << 32) | word0;
4991 #endif
4993 #else /* TARGET_ABI_BITS == 32 */
4994 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4996 return word0;
4998 #endif /* TARGET_ABI_BITS != 32 */
5000 #ifdef TARGET_NR_truncate64
5001 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5002 abi_long arg2,
5003 abi_long arg3,
5004 abi_long arg4)
5006 if (regpairs_aligned(cpu_env)) {
5007 arg2 = arg3;
5008 arg3 = arg4;
5010 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5012 #endif
5014 #ifdef TARGET_NR_ftruncate64
5015 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5016 abi_long arg2,
5017 abi_long arg3,
5018 abi_long arg4)
5020 if (regpairs_aligned(cpu_env)) {
5021 arg2 = arg3;
5022 arg3 = arg4;
5024 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5026 #endif
5028 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5029 abi_ulong target_addr)
5031 struct target_timespec *target_ts;
5033 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5034 return -TARGET_EFAULT;
5035 host_ts->tv_sec = tswapal(target_ts->tv_sec);
5036 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
5037 unlock_user_struct(target_ts, target_addr, 0);
5038 return 0;
5041 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5042 struct timespec *host_ts)
5044 struct target_timespec *target_ts;
5046 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5047 return -TARGET_EFAULT;
5048 target_ts->tv_sec = tswapal(host_ts->tv_sec);
5049 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
5050 unlock_user_struct(target_ts, target_addr, 1);
5051 return 0;
5054 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5055 abi_ulong target_addr)
5057 struct target_itimerspec *target_itspec;
5059 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5060 return -TARGET_EFAULT;
5063 host_itspec->it_interval.tv_sec =
5064 tswapal(target_itspec->it_interval.tv_sec);
5065 host_itspec->it_interval.tv_nsec =
5066 tswapal(target_itspec->it_interval.tv_nsec);
5067 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5068 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5070 unlock_user_struct(target_itspec, target_addr, 1);
5071 return 0;
5074 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5075 struct itimerspec *host_its)
5077 struct target_itimerspec *target_itspec;
5079 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5080 return -TARGET_EFAULT;
5083 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5084 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5086 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5087 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5089 unlock_user_struct(target_itspec, target_addr, 0);
5090 return 0;
5093 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5094 abi_ulong target_addr)
5096 struct target_sigevent *target_sevp;
5098 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5099 return -TARGET_EFAULT;
5102 /* This union is awkward on 64 bit systems because it has a 32 bit
5103 * integer and a pointer in it; we follow the conversion approach
5104 * used for handling sigval types in signal.c so the guest should get
5105 * the correct value back even if we did a 64 bit byteswap and it's
5106 * using the 32 bit integer.
5108 host_sevp->sigev_value.sival_ptr =
5109 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5110 host_sevp->sigev_signo =
5111 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5112 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5113 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5115 unlock_user_struct(target_sevp, target_addr, 1);
5116 return 0;
5119 #if defined(TARGET_NR_mlockall)
5120 static inline int target_to_host_mlockall_arg(int arg)
5122 int result = 0;
5124 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5125 result |= MCL_CURRENT;
5127 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5128 result |= MCL_FUTURE;
5130 return result;
5132 #endif
5134 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5135 static inline abi_long host_to_target_stat64(void *cpu_env,
5136 abi_ulong target_addr,
5137 struct stat *host_st)
5139 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5140 if (((CPUARMState *)cpu_env)->eabi) {
5141 struct target_eabi_stat64 *target_st;
5143 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5144 return -TARGET_EFAULT;
5145 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5146 __put_user(host_st->st_dev, &target_st->st_dev);
5147 __put_user(host_st->st_ino, &target_st->st_ino);
5148 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5149 __put_user(host_st->st_ino, &target_st->__st_ino);
5150 #endif
5151 __put_user(host_st->st_mode, &target_st->st_mode);
5152 __put_user(host_st->st_nlink, &target_st->st_nlink);
5153 __put_user(host_st->st_uid, &target_st->st_uid);
5154 __put_user(host_st->st_gid, &target_st->st_gid);
5155 __put_user(host_st->st_rdev, &target_st->st_rdev);
5156 __put_user(host_st->st_size, &target_st->st_size);
5157 __put_user(host_st->st_blksize, &target_st->st_blksize);
5158 __put_user(host_st->st_blocks, &target_st->st_blocks);
5159 __put_user(host_st->st_atime, &target_st->target_st_atime);
5160 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5161 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5162 unlock_user_struct(target_st, target_addr, 1);
5163 } else
5164 #endif
5166 #if defined(TARGET_HAS_STRUCT_STAT64)
5167 struct target_stat64 *target_st;
5168 #else
5169 struct target_stat *target_st;
5170 #endif
5172 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5173 return -TARGET_EFAULT;
5174 memset(target_st, 0, sizeof(*target_st));
5175 __put_user(host_st->st_dev, &target_st->st_dev);
5176 __put_user(host_st->st_ino, &target_st->st_ino);
5177 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5178 __put_user(host_st->st_ino, &target_st->__st_ino);
5179 #endif
5180 __put_user(host_st->st_mode, &target_st->st_mode);
5181 __put_user(host_st->st_nlink, &target_st->st_nlink);
5182 __put_user(host_st->st_uid, &target_st->st_uid);
5183 __put_user(host_st->st_gid, &target_st->st_gid);
5184 __put_user(host_st->st_rdev, &target_st->st_rdev);
5185 /* XXX: better use of kernel struct */
5186 __put_user(host_st->st_size, &target_st->st_size);
5187 __put_user(host_st->st_blksize, &target_st->st_blksize);
5188 __put_user(host_st->st_blocks, &target_st->st_blocks);
5189 __put_user(host_st->st_atime, &target_st->target_st_atime);
5190 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5191 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5192 unlock_user_struct(target_st, target_addr, 1);
5195 return 0;
5197 #endif
5199 /* ??? Using host futex calls even when target atomic operations
5200 are not really atomic probably breaks things. However implementing
5201 futexes locally would make futexes shared between multiple processes
5202 tricky. However they're probably useless because guest atomic
5203 operations won't work either. */
5204 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5205 target_ulong uaddr2, int val3)
5207 struct timespec ts, *pts;
5208 int base_op;
5210 /* ??? We assume FUTEX_* constants are the same on both host
5211 and target. */
5212 #ifdef FUTEX_CMD_MASK
5213 base_op = op & FUTEX_CMD_MASK;
5214 #else
5215 base_op = op;
5216 #endif
5217 switch (base_op) {
5218 case FUTEX_WAIT:
5219 case FUTEX_WAIT_BITSET:
5220 if (timeout) {
5221 pts = &ts;
5222 target_to_host_timespec(pts, timeout);
5223 } else {
5224 pts = NULL;
5226 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5227 pts, NULL, val3));
5228 case FUTEX_WAKE:
5229 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5230 case FUTEX_FD:
5231 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5232 case FUTEX_REQUEUE:
5233 case FUTEX_CMP_REQUEUE:
5234 case FUTEX_WAKE_OP:
5235 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5236 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5237 But the prototype takes a `struct timespec *'; insert casts
5238 to satisfy the compiler. We do not need to tswap TIMEOUT
5239 since it's not compared to guest memory. */
5240 pts = (struct timespec *)(uintptr_t) timeout;
5241 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5242 g2h(uaddr2),
5243 (base_op == FUTEX_CMP_REQUEUE
5244 ? tswap32(val3)
5245 : val3)));
5246 default:
5247 return -TARGET_ENOSYS;
5251 /* Map host to target signal numbers for the wait family of syscalls.
5252 Assume all other status bits are the same. */
5253 int host_to_target_waitstatus(int status)
5255 if (WIFSIGNALED(status)) {
5256 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5258 if (WIFSTOPPED(status)) {
5259 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5260 | (status & 0xff);
5262 return status;
5265 static int open_self_cmdline(void *cpu_env, int fd)
5267 int fd_orig = -1;
5268 bool word_skipped = false;
5270 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5271 if (fd_orig < 0) {
5272 return fd_orig;
5275 while (true) {
5276 ssize_t nb_read;
5277 char buf[128];
5278 char *cp_buf = buf;
5280 nb_read = read(fd_orig, buf, sizeof(buf));
5281 if (nb_read < 0) {
5282 fd_orig = close(fd_orig);
5283 return -1;
5284 } else if (nb_read == 0) {
5285 break;
5288 if (!word_skipped) {
5289 /* Skip the first string, which is the path to qemu-*-static
5290 instead of the actual command. */
5291 cp_buf = memchr(buf, 0, sizeof(buf));
5292 if (cp_buf) {
5293 /* Null byte found, skip one string */
5294 cp_buf++;
5295 nb_read -= cp_buf - buf;
5296 word_skipped = true;
5300 if (word_skipped) {
5301 if (write(fd, cp_buf, nb_read) != nb_read) {
5302 close(fd_orig);
5303 return -1;
5308 return close(fd_orig);
5311 static int open_self_maps(void *cpu_env, int fd)
5313 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5314 TaskState *ts = cpu->opaque;
5315 FILE *fp;
5316 char *line = NULL;
5317 size_t len = 0;
5318 ssize_t read;
5320 fp = fopen("/proc/self/maps", "r");
5321 if (fp == NULL) {
5322 return -EACCES;
5325 while ((read = getline(&line, &len, fp)) != -1) {
5326 int fields, dev_maj, dev_min, inode;
5327 uint64_t min, max, offset;
5328 char flag_r, flag_w, flag_x, flag_p;
5329 char path[512] = "";
5330 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5331 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5332 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5334 if ((fields < 10) || (fields > 11)) {
5335 continue;
5337 if (h2g_valid(min)) {
5338 int flags = page_get_flags(h2g(min));
5339 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5340 if (page_check_range(h2g(min), max - min, flags) == -1) {
5341 continue;
5343 if (h2g(min) == ts->info->stack_limit) {
5344 pstrcpy(path, sizeof(path), " [stack]");
5346 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5347 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5348 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5349 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5350 path[0] ? " " : "", path);
5354 free(line);
5355 fclose(fp);
5357 return 0;
5360 static int open_self_stat(void *cpu_env, int fd)
5362 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5363 TaskState *ts = cpu->opaque;
5364 abi_ulong start_stack = ts->info->start_stack;
5365 int i;
5367 for (i = 0; i < 44; i++) {
5368 char buf[128];
5369 int len;
5370 uint64_t val = 0;
5372 if (i == 0) {
5373 /* pid */
5374 val = getpid();
5375 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5376 } else if (i == 1) {
5377 /* app name */
5378 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5379 } else if (i == 27) {
5380 /* stack bottom */
5381 val = start_stack;
5382 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5383 } else {
5384 /* for the rest, there is MasterCard */
5385 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5388 len = strlen(buf);
5389 if (write(fd, buf, len) != len) {
5390 return -1;
5394 return 0;
5397 static int open_self_auxv(void *cpu_env, int fd)
5399 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5400 TaskState *ts = cpu->opaque;
5401 abi_ulong auxv = ts->info->saved_auxv;
5402 abi_ulong len = ts->info->auxv_len;
5403 char *ptr;
5406 * Auxiliary vector is stored in target process stack.
5407 * read in whole auxv vector and copy it to file
5409 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5410 if (ptr != NULL) {
5411 while (len > 0) {
5412 ssize_t r;
5413 r = write(fd, ptr, len);
5414 if (r <= 0) {
5415 break;
5417 len -= r;
5418 ptr += r;
5420 lseek(fd, 0, SEEK_SET);
5421 unlock_user(ptr, auxv, len);
5424 return 0;
5427 static int is_proc_myself(const char *filename, const char *entry)
5429 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5430 filename += strlen("/proc/");
5431 if (!strncmp(filename, "self/", strlen("self/"))) {
5432 filename += strlen("self/");
5433 } else if (*filename >= '1' && *filename <= '9') {
5434 char myself[80];
5435 snprintf(myself, sizeof(myself), "%d/", getpid());
5436 if (!strncmp(filename, myself, strlen(myself))) {
5437 filename += strlen(myself);
5438 } else {
5439 return 0;
5441 } else {
5442 return 0;
5444 if (!strcmp(filename, entry)) {
5445 return 1;
5448 return 0;
5451 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5452 static int is_proc(const char *filename, const char *entry)
5454 return strcmp(filename, entry) == 0;
5457 static int open_net_route(void *cpu_env, int fd)
5459 FILE *fp;
5460 char *line = NULL;
5461 size_t len = 0;
5462 ssize_t read;
5464 fp = fopen("/proc/net/route", "r");
5465 if (fp == NULL) {
5466 return -EACCES;
5469 /* read header */
5471 read = getline(&line, &len, fp);
5472 dprintf(fd, "%s", line);
5474 /* read routes */
5476 while ((read = getline(&line, &len, fp)) != -1) {
5477 char iface[16];
5478 uint32_t dest, gw, mask;
5479 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5480 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5481 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5482 &mask, &mtu, &window, &irtt);
5483 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5484 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5485 metric, tswap32(mask), mtu, window, irtt);
5488 free(line);
5489 fclose(fp);
5491 return 0;
5493 #endif
5495 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5497 struct fake_open {
5498 const char *filename;
5499 int (*fill)(void *cpu_env, int fd);
5500 int (*cmp)(const char *s1, const char *s2);
5502 const struct fake_open *fake_open;
5503 static const struct fake_open fakes[] = {
5504 { "maps", open_self_maps, is_proc_myself },
5505 { "stat", open_self_stat, is_proc_myself },
5506 { "auxv", open_self_auxv, is_proc_myself },
5507 { "cmdline", open_self_cmdline, is_proc_myself },
5508 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5509 { "/proc/net/route", open_net_route, is_proc },
5510 #endif
5511 { NULL, NULL, NULL }
5514 if (is_proc_myself(pathname, "exe")) {
5515 int execfd = qemu_getauxval(AT_EXECFD);
5516 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5519 for (fake_open = fakes; fake_open->filename; fake_open++) {
5520 if (fake_open->cmp(pathname, fake_open->filename)) {
5521 break;
5525 if (fake_open->filename) {
5526 const char *tmpdir;
5527 char filename[PATH_MAX];
5528 int fd, r;
5530 /* create temporary file to map stat to */
5531 tmpdir = getenv("TMPDIR");
5532 if (!tmpdir)
5533 tmpdir = "/tmp";
5534 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5535 fd = mkstemp(filename);
5536 if (fd < 0) {
5537 return fd;
5539 unlink(filename);
5541 if ((r = fake_open->fill(cpu_env, fd))) {
5542 close(fd);
5543 return r;
5545 lseek(fd, 0, SEEK_SET);
5547 return fd;
5550 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5553 #define TIMER_MAGIC 0x0caf0000
5554 #define TIMER_MAGIC_MASK 0xffff0000
5556 /* Convert QEMU provided timer ID back to internal 16bit index format */
5557 static target_timer_t get_timer_id(abi_long arg)
5559 target_timer_t timerid = arg;
5561 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5562 return -TARGET_EINVAL;
5565 timerid &= 0xffff;
5567 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5568 return -TARGET_EINVAL;
5571 return timerid;
5574 /* do_syscall() should always have a single exit point at the end so
5575 that actions, such as logging of syscall results, can be performed.
5576 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5577 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5578 abi_long arg2, abi_long arg3, abi_long arg4,
5579 abi_long arg5, abi_long arg6, abi_long arg7,
5580 abi_long arg8)
5582 CPUState *cpu = ENV_GET_CPU(cpu_env);
5583 abi_long ret;
5584 struct stat st;
5585 struct statfs stfs;
5586 void *p;
5588 #ifdef DEBUG
5589 gemu_log("syscall %d", num);
5590 #endif
5591 if(do_strace)
5592 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5594 switch(num) {
5595 case TARGET_NR_exit:
5596 /* In old applications this may be used to implement _exit(2).
5597 However in threaded applictions it is used for thread termination,
5598 and _exit_group is used for application termination.
5599 Do thread termination if we have more then one thread. */
5600 /* FIXME: This probably breaks if a signal arrives. We should probably
5601 be disabling signals. */
5602 if (CPU_NEXT(first_cpu)) {
5603 TaskState *ts;
5605 cpu_list_lock();
5606 /* Remove the CPU from the list. */
5607 QTAILQ_REMOVE(&cpus, cpu, node);
5608 cpu_list_unlock();
5609 ts = cpu->opaque;
5610 if (ts->child_tidptr) {
5611 put_user_u32(0, ts->child_tidptr);
5612 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5613 NULL, NULL, 0);
5615 thread_cpu = NULL;
5616 object_unref(OBJECT(cpu));
5617 g_free(ts);
5618 rcu_unregister_thread();
5619 pthread_exit(NULL);
5621 #ifdef TARGET_GPROF
5622 _mcleanup();
5623 #endif
5624 gdb_exit(cpu_env, arg1);
5625 _exit(arg1);
5626 ret = 0; /* avoid warning */
5627 break;
5628 case TARGET_NR_read:
5629 if (arg3 == 0)
5630 ret = 0;
5631 else {
5632 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5633 goto efault;
5634 ret = get_errno(read(arg1, p, arg3));
5635 unlock_user(p, arg2, ret);
5637 break;
5638 case TARGET_NR_write:
5639 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5640 goto efault;
5641 ret = get_errno(write(arg1, p, arg3));
5642 unlock_user(p, arg2, 0);
5643 break;
5644 case TARGET_NR_open:
5645 if (!(p = lock_user_string(arg1)))
5646 goto efault;
5647 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5648 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5649 arg3));
5650 unlock_user(p, arg1, 0);
5651 break;
5652 case TARGET_NR_openat:
5653 if (!(p = lock_user_string(arg2)))
5654 goto efault;
5655 ret = get_errno(do_openat(cpu_env, arg1, p,
5656 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5657 arg4));
5658 unlock_user(p, arg2, 0);
5659 break;
5660 case TARGET_NR_close:
5661 ret = get_errno(close(arg1));
5662 break;
5663 case TARGET_NR_brk:
5664 ret = do_brk(arg1);
5665 break;
5666 case TARGET_NR_fork:
5667 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5668 break;
5669 #ifdef TARGET_NR_waitpid
5670 case TARGET_NR_waitpid:
5672 int status;
5673 ret = get_errno(waitpid(arg1, &status, arg3));
5674 if (!is_error(ret) && arg2 && ret
5675 && put_user_s32(host_to_target_waitstatus(status), arg2))
5676 goto efault;
5678 break;
5679 #endif
5680 #ifdef TARGET_NR_waitid
5681 case TARGET_NR_waitid:
5683 siginfo_t info;
5684 info.si_pid = 0;
5685 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5686 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5687 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5688 goto efault;
5689 host_to_target_siginfo(p, &info);
5690 unlock_user(p, arg3, sizeof(target_siginfo_t));
5693 break;
5694 #endif
5695 #ifdef TARGET_NR_creat /* not on alpha */
5696 case TARGET_NR_creat:
5697 if (!(p = lock_user_string(arg1)))
5698 goto efault;
5699 ret = get_errno(creat(p, arg2));
5700 unlock_user(p, arg1, 0);
5701 break;
5702 #endif
5703 case TARGET_NR_link:
5705 void * p2;
5706 p = lock_user_string(arg1);
5707 p2 = lock_user_string(arg2);
5708 if (!p || !p2)
5709 ret = -TARGET_EFAULT;
5710 else
5711 ret = get_errno(link(p, p2));
5712 unlock_user(p2, arg2, 0);
5713 unlock_user(p, arg1, 0);
5715 break;
5716 #if defined(TARGET_NR_linkat)
5717 case TARGET_NR_linkat:
5719 void * p2 = NULL;
5720 if (!arg2 || !arg4)
5721 goto efault;
5722 p = lock_user_string(arg2);
5723 p2 = lock_user_string(arg4);
5724 if (!p || !p2)
5725 ret = -TARGET_EFAULT;
5726 else
5727 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5728 unlock_user(p, arg2, 0);
5729 unlock_user(p2, arg4, 0);
5731 break;
5732 #endif
5733 case TARGET_NR_unlink:
5734 if (!(p = lock_user_string(arg1)))
5735 goto efault;
5736 ret = get_errno(unlink(p));
5737 unlock_user(p, arg1, 0);
5738 break;
5739 #if defined(TARGET_NR_unlinkat)
5740 case TARGET_NR_unlinkat:
5741 if (!(p = lock_user_string(arg2)))
5742 goto efault;
5743 ret = get_errno(unlinkat(arg1, p, arg3));
5744 unlock_user(p, arg2, 0);
5745 break;
5746 #endif
5747 case TARGET_NR_execve:
5749 char **argp, **envp;
5750 int argc, envc;
5751 abi_ulong gp;
5752 abi_ulong guest_argp;
5753 abi_ulong guest_envp;
5754 abi_ulong addr;
5755 char **q;
5756 int total_size = 0;
5758 argc = 0;
5759 guest_argp = arg2;
5760 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5761 if (get_user_ual(addr, gp))
5762 goto efault;
5763 if (!addr)
5764 break;
5765 argc++;
5767 envc = 0;
5768 guest_envp = arg3;
5769 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5770 if (get_user_ual(addr, gp))
5771 goto efault;
5772 if (!addr)
5773 break;
5774 envc++;
5777 argp = alloca((argc + 1) * sizeof(void *));
5778 envp = alloca((envc + 1) * sizeof(void *));
5780 for (gp = guest_argp, q = argp; gp;
5781 gp += sizeof(abi_ulong), q++) {
5782 if (get_user_ual(addr, gp))
5783 goto execve_efault;
5784 if (!addr)
5785 break;
5786 if (!(*q = lock_user_string(addr)))
5787 goto execve_efault;
5788 total_size += strlen(*q) + 1;
5790 *q = NULL;
5792 for (gp = guest_envp, q = envp; gp;
5793 gp += sizeof(abi_ulong), q++) {
5794 if (get_user_ual(addr, gp))
5795 goto execve_efault;
5796 if (!addr)
5797 break;
5798 if (!(*q = lock_user_string(addr)))
5799 goto execve_efault;
5800 total_size += strlen(*q) + 1;
5802 *q = NULL;
5804 /* This case will not be caught by the host's execve() if its
5805 page size is bigger than the target's. */
5806 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5807 ret = -TARGET_E2BIG;
5808 goto execve_end;
5810 if (!(p = lock_user_string(arg1)))
5811 goto execve_efault;
5812 ret = get_errno(execve(p, argp, envp));
5813 unlock_user(p, arg1, 0);
5815 goto execve_end;
5817 execve_efault:
5818 ret = -TARGET_EFAULT;
5820 execve_end:
5821 for (gp = guest_argp, q = argp; *q;
5822 gp += sizeof(abi_ulong), q++) {
5823 if (get_user_ual(addr, gp)
5824 || !addr)
5825 break;
5826 unlock_user(*q, addr, 0);
5828 for (gp = guest_envp, q = envp; *q;
5829 gp += sizeof(abi_ulong), q++) {
5830 if (get_user_ual(addr, gp)
5831 || !addr)
5832 break;
5833 unlock_user(*q, addr, 0);
5836 break;
5837 case TARGET_NR_chdir:
5838 if (!(p = lock_user_string(arg1)))
5839 goto efault;
5840 ret = get_errno(chdir(p));
5841 unlock_user(p, arg1, 0);
5842 break;
5843 #ifdef TARGET_NR_time
5844 case TARGET_NR_time:
5846 time_t host_time;
5847 ret = get_errno(time(&host_time));
5848 if (!is_error(ret)
5849 && arg1
5850 && put_user_sal(host_time, arg1))
5851 goto efault;
5853 break;
5854 #endif
5855 case TARGET_NR_mknod:
5856 if (!(p = lock_user_string(arg1)))
5857 goto efault;
5858 ret = get_errno(mknod(p, arg2, arg3));
5859 unlock_user(p, arg1, 0);
5860 break;
5861 #if defined(TARGET_NR_mknodat)
5862 case TARGET_NR_mknodat:
5863 if (!(p = lock_user_string(arg2)))
5864 goto efault;
5865 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5866 unlock_user(p, arg2, 0);
5867 break;
5868 #endif
5869 case TARGET_NR_chmod:
5870 if (!(p = lock_user_string(arg1)))
5871 goto efault;
5872 ret = get_errno(chmod(p, arg2));
5873 unlock_user(p, arg1, 0);
5874 break;
5875 #ifdef TARGET_NR_break
5876 case TARGET_NR_break:
5877 goto unimplemented;
5878 #endif
5879 #ifdef TARGET_NR_oldstat
5880 case TARGET_NR_oldstat:
5881 goto unimplemented;
5882 #endif
5883 case TARGET_NR_lseek:
5884 ret = get_errno(lseek(arg1, arg2, arg3));
5885 break;
5886 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5887 /* Alpha specific */
5888 case TARGET_NR_getxpid:
5889 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5890 ret = get_errno(getpid());
5891 break;
5892 #endif
5893 #ifdef TARGET_NR_getpid
5894 case TARGET_NR_getpid:
5895 ret = get_errno(getpid());
5896 break;
5897 #endif
5898 case TARGET_NR_mount:
5900 /* need to look at the data field */
5901 void *p2, *p3;
5903 if (arg1) {
5904 p = lock_user_string(arg1);
5905 if (!p) {
5906 goto efault;
5908 } else {
5909 p = NULL;
5912 p2 = lock_user_string(arg2);
5913 if (!p2) {
5914 if (arg1) {
5915 unlock_user(p, arg1, 0);
5917 goto efault;
5920 if (arg3) {
5921 p3 = lock_user_string(arg3);
5922 if (!p3) {
5923 if (arg1) {
5924 unlock_user(p, arg1, 0);
5926 unlock_user(p2, arg2, 0);
5927 goto efault;
5929 } else {
5930 p3 = NULL;
5933 /* FIXME - arg5 should be locked, but it isn't clear how to
5934 * do that since it's not guaranteed to be a NULL-terminated
5935 * string.
5937 if (!arg5) {
5938 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
5939 } else {
5940 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
5942 ret = get_errno(ret);
5944 if (arg1) {
5945 unlock_user(p, arg1, 0);
5947 unlock_user(p2, arg2, 0);
5948 if (arg3) {
5949 unlock_user(p3, arg3, 0);
5952 break;
5953 #ifdef TARGET_NR_umount
5954 case TARGET_NR_umount:
5955 if (!(p = lock_user_string(arg1)))
5956 goto efault;
5957 ret = get_errno(umount(p));
5958 unlock_user(p, arg1, 0);
5959 break;
5960 #endif
5961 #ifdef TARGET_NR_stime /* not on alpha */
5962 case TARGET_NR_stime:
5964 time_t host_time;
5965 if (get_user_sal(host_time, arg1))
5966 goto efault;
5967 ret = get_errno(stime(&host_time));
5969 break;
5970 #endif
5971 case TARGET_NR_ptrace:
5972 goto unimplemented;
5973 #ifdef TARGET_NR_alarm /* not on alpha */
5974 case TARGET_NR_alarm:
5975 ret = alarm(arg1);
5976 break;
5977 #endif
5978 #ifdef TARGET_NR_oldfstat
5979 case TARGET_NR_oldfstat:
5980 goto unimplemented;
5981 #endif
5982 #ifdef TARGET_NR_pause /* not on alpha */
5983 case TARGET_NR_pause:
5984 ret = get_errno(pause());
5985 break;
5986 #endif
5987 #ifdef TARGET_NR_utime
5988 case TARGET_NR_utime:
5990 struct utimbuf tbuf, *host_tbuf;
5991 struct target_utimbuf *target_tbuf;
5992 if (arg2) {
5993 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5994 goto efault;
5995 tbuf.actime = tswapal(target_tbuf->actime);
5996 tbuf.modtime = tswapal(target_tbuf->modtime);
5997 unlock_user_struct(target_tbuf, arg2, 0);
5998 host_tbuf = &tbuf;
5999 } else {
6000 host_tbuf = NULL;
6002 if (!(p = lock_user_string(arg1)))
6003 goto efault;
6004 ret = get_errno(utime(p, host_tbuf));
6005 unlock_user(p, arg1, 0);
6007 break;
6008 #endif
6009 case TARGET_NR_utimes:
6011 struct timeval *tvp, tv[2];
6012 if (arg2) {
6013 if (copy_from_user_timeval(&tv[0], arg2)
6014 || copy_from_user_timeval(&tv[1],
6015 arg2 + sizeof(struct target_timeval)))
6016 goto efault;
6017 tvp = tv;
6018 } else {
6019 tvp = NULL;
6021 if (!(p = lock_user_string(arg1)))
6022 goto efault;
6023 ret = get_errno(utimes(p, tvp));
6024 unlock_user(p, arg1, 0);
6026 break;
6027 #if defined(TARGET_NR_futimesat)
6028 case TARGET_NR_futimesat:
6030 struct timeval *tvp, tv[2];
6031 if (arg3) {
6032 if (copy_from_user_timeval(&tv[0], arg3)
6033 || copy_from_user_timeval(&tv[1],
6034 arg3 + sizeof(struct target_timeval)))
6035 goto efault;
6036 tvp = tv;
6037 } else {
6038 tvp = NULL;
6040 if (!(p = lock_user_string(arg2)))
6041 goto efault;
6042 ret = get_errno(futimesat(arg1, path(p), tvp));
6043 unlock_user(p, arg2, 0);
6045 break;
6046 #endif
6047 #ifdef TARGET_NR_stty
6048 case TARGET_NR_stty:
6049 goto unimplemented;
6050 #endif
6051 #ifdef TARGET_NR_gtty
6052 case TARGET_NR_gtty:
6053 goto unimplemented;
6054 #endif
6055 case TARGET_NR_access:
6056 if (!(p = lock_user_string(arg1)))
6057 goto efault;
6058 ret = get_errno(access(path(p), arg2));
6059 unlock_user(p, arg1, 0);
6060 break;
6061 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6062 case TARGET_NR_faccessat:
6063 if (!(p = lock_user_string(arg2)))
6064 goto efault;
6065 ret = get_errno(faccessat(arg1, p, arg3, 0));
6066 unlock_user(p, arg2, 0);
6067 break;
6068 #endif
6069 #ifdef TARGET_NR_nice /* not on alpha */
6070 case TARGET_NR_nice:
6071 ret = get_errno(nice(arg1));
6072 break;
6073 #endif
6074 #ifdef TARGET_NR_ftime
6075 case TARGET_NR_ftime:
6076 goto unimplemented;
6077 #endif
6078 case TARGET_NR_sync:
6079 sync();
6080 ret = 0;
6081 break;
6082 case TARGET_NR_kill:
6083 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6084 break;
6085 case TARGET_NR_rename:
6087 void *p2;
6088 p = lock_user_string(arg1);
6089 p2 = lock_user_string(arg2);
6090 if (!p || !p2)
6091 ret = -TARGET_EFAULT;
6092 else
6093 ret = get_errno(rename(p, p2));
6094 unlock_user(p2, arg2, 0);
6095 unlock_user(p, arg1, 0);
6097 break;
6098 #if defined(TARGET_NR_renameat)
6099 case TARGET_NR_renameat:
6101 void *p2;
6102 p = lock_user_string(arg2);
6103 p2 = lock_user_string(arg4);
6104 if (!p || !p2)
6105 ret = -TARGET_EFAULT;
6106 else
6107 ret = get_errno(renameat(arg1, p, arg3, p2));
6108 unlock_user(p2, arg4, 0);
6109 unlock_user(p, arg2, 0);
6111 break;
6112 #endif
6113 case TARGET_NR_mkdir:
6114 if (!(p = lock_user_string(arg1)))
6115 goto efault;
6116 ret = get_errno(mkdir(p, arg2));
6117 unlock_user(p, arg1, 0);
6118 break;
6119 #if defined(TARGET_NR_mkdirat)
6120 case TARGET_NR_mkdirat:
6121 if (!(p = lock_user_string(arg2)))
6122 goto efault;
6123 ret = get_errno(mkdirat(arg1, p, arg3));
6124 unlock_user(p, arg2, 0);
6125 break;
6126 #endif
6127 case TARGET_NR_rmdir:
6128 if (!(p = lock_user_string(arg1)))
6129 goto efault;
6130 ret = get_errno(rmdir(p));
6131 unlock_user(p, arg1, 0);
6132 break;
6133 case TARGET_NR_dup:
6134 ret = get_errno(dup(arg1));
6135 break;
6136 case TARGET_NR_pipe:
6137 ret = do_pipe(cpu_env, arg1, 0, 0);
6138 break;
6139 #ifdef TARGET_NR_pipe2
6140 case TARGET_NR_pipe2:
6141 ret = do_pipe(cpu_env, arg1,
6142 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6143 break;
6144 #endif
6145 case TARGET_NR_times:
6147 struct target_tms *tmsp;
6148 struct tms tms;
6149 ret = get_errno(times(&tms));
6150 if (arg1) {
6151 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6152 if (!tmsp)
6153 goto efault;
6154 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6155 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6156 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6157 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6159 if (!is_error(ret))
6160 ret = host_to_target_clock_t(ret);
6162 break;
6163 #ifdef TARGET_NR_prof
6164 case TARGET_NR_prof:
6165 goto unimplemented;
6166 #endif
6167 #ifdef TARGET_NR_signal
6168 case TARGET_NR_signal:
6169 goto unimplemented;
6170 #endif
6171 case TARGET_NR_acct:
6172 if (arg1 == 0) {
6173 ret = get_errno(acct(NULL));
6174 } else {
6175 if (!(p = lock_user_string(arg1)))
6176 goto efault;
6177 ret = get_errno(acct(path(p)));
6178 unlock_user(p, arg1, 0);
6180 break;
6181 #ifdef TARGET_NR_umount2
6182 case TARGET_NR_umount2:
6183 if (!(p = lock_user_string(arg1)))
6184 goto efault;
6185 ret = get_errno(umount2(p, arg2));
6186 unlock_user(p, arg1, 0);
6187 break;
6188 #endif
6189 #ifdef TARGET_NR_lock
6190 case TARGET_NR_lock:
6191 goto unimplemented;
6192 #endif
6193 case TARGET_NR_ioctl:
6194 ret = do_ioctl(arg1, arg2, arg3);
6195 break;
6196 case TARGET_NR_fcntl:
6197 ret = do_fcntl(arg1, arg2, arg3);
6198 break;
6199 #ifdef TARGET_NR_mpx
6200 case TARGET_NR_mpx:
6201 goto unimplemented;
6202 #endif
6203 case TARGET_NR_setpgid:
6204 ret = get_errno(setpgid(arg1, arg2));
6205 break;
6206 #ifdef TARGET_NR_ulimit
6207 case TARGET_NR_ulimit:
6208 goto unimplemented;
6209 #endif
6210 #ifdef TARGET_NR_oldolduname
6211 case TARGET_NR_oldolduname:
6212 goto unimplemented;
6213 #endif
6214 case TARGET_NR_umask:
6215 ret = get_errno(umask(arg1));
6216 break;
6217 case TARGET_NR_chroot:
6218 if (!(p = lock_user_string(arg1)))
6219 goto efault;
6220 ret = get_errno(chroot(p));
6221 unlock_user(p, arg1, 0);
6222 break;
6223 case TARGET_NR_ustat:
6224 goto unimplemented;
6225 case TARGET_NR_dup2:
6226 ret = get_errno(dup2(arg1, arg2));
6227 break;
6228 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6229 case TARGET_NR_dup3:
6230 ret = get_errno(dup3(arg1, arg2, arg3));
6231 break;
6232 #endif
6233 #ifdef TARGET_NR_getppid /* not on alpha */
6234 case TARGET_NR_getppid:
6235 ret = get_errno(getppid());
6236 break;
6237 #endif
6238 case TARGET_NR_getpgrp:
6239 ret = get_errno(getpgrp());
6240 break;
6241 case TARGET_NR_setsid:
6242 ret = get_errno(setsid());
6243 break;
6244 #ifdef TARGET_NR_sigaction
6245 case TARGET_NR_sigaction:
6247 #if defined(TARGET_ALPHA)
6248 struct target_sigaction act, oact, *pact = 0;
6249 struct target_old_sigaction *old_act;
6250 if (arg2) {
6251 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6252 goto efault;
6253 act._sa_handler = old_act->_sa_handler;
6254 target_siginitset(&act.sa_mask, old_act->sa_mask);
6255 act.sa_flags = old_act->sa_flags;
6256 act.sa_restorer = 0;
6257 unlock_user_struct(old_act, arg2, 0);
6258 pact = &act;
6260 ret = get_errno(do_sigaction(arg1, pact, &oact));
6261 if (!is_error(ret) && arg3) {
6262 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6263 goto efault;
6264 old_act->_sa_handler = oact._sa_handler;
6265 old_act->sa_mask = oact.sa_mask.sig[0];
6266 old_act->sa_flags = oact.sa_flags;
6267 unlock_user_struct(old_act, arg3, 1);
6269 #elif defined(TARGET_MIPS)
6270 struct target_sigaction act, oact, *pact, *old_act;
6272 if (arg2) {
6273 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6274 goto efault;
6275 act._sa_handler = old_act->_sa_handler;
6276 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6277 act.sa_flags = old_act->sa_flags;
6278 unlock_user_struct(old_act, arg2, 0);
6279 pact = &act;
6280 } else {
6281 pact = NULL;
6284 ret = get_errno(do_sigaction(arg1, pact, &oact));
6286 if (!is_error(ret) && arg3) {
6287 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6288 goto efault;
6289 old_act->_sa_handler = oact._sa_handler;
6290 old_act->sa_flags = oact.sa_flags;
6291 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6292 old_act->sa_mask.sig[1] = 0;
6293 old_act->sa_mask.sig[2] = 0;
6294 old_act->sa_mask.sig[3] = 0;
6295 unlock_user_struct(old_act, arg3, 1);
6297 #else
6298 struct target_old_sigaction *old_act;
6299 struct target_sigaction act, oact, *pact;
6300 if (arg2) {
6301 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6302 goto efault;
6303 act._sa_handler = old_act->_sa_handler;
6304 target_siginitset(&act.sa_mask, old_act->sa_mask);
6305 act.sa_flags = old_act->sa_flags;
6306 act.sa_restorer = old_act->sa_restorer;
6307 unlock_user_struct(old_act, arg2, 0);
6308 pact = &act;
6309 } else {
6310 pact = NULL;
6312 ret = get_errno(do_sigaction(arg1, pact, &oact));
6313 if (!is_error(ret) && arg3) {
6314 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6315 goto efault;
6316 old_act->_sa_handler = oact._sa_handler;
6317 old_act->sa_mask = oact.sa_mask.sig[0];
6318 old_act->sa_flags = oact.sa_flags;
6319 old_act->sa_restorer = oact.sa_restorer;
6320 unlock_user_struct(old_act, arg3, 1);
6322 #endif
6324 break;
6325 #endif
6326 case TARGET_NR_rt_sigaction:
6328 #if defined(TARGET_ALPHA)
6329 struct target_sigaction act, oact, *pact = 0;
6330 struct target_rt_sigaction *rt_act;
6331 /* ??? arg4 == sizeof(sigset_t). */
6332 if (arg2) {
6333 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6334 goto efault;
6335 act._sa_handler = rt_act->_sa_handler;
6336 act.sa_mask = rt_act->sa_mask;
6337 act.sa_flags = rt_act->sa_flags;
6338 act.sa_restorer = arg5;
6339 unlock_user_struct(rt_act, arg2, 0);
6340 pact = &act;
6342 ret = get_errno(do_sigaction(arg1, pact, &oact));
6343 if (!is_error(ret) && arg3) {
6344 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6345 goto efault;
6346 rt_act->_sa_handler = oact._sa_handler;
6347 rt_act->sa_mask = oact.sa_mask;
6348 rt_act->sa_flags = oact.sa_flags;
6349 unlock_user_struct(rt_act, arg3, 1);
6351 #else
6352 struct target_sigaction *act;
6353 struct target_sigaction *oact;
6355 if (arg2) {
6356 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6357 goto efault;
6358 } else
6359 act = NULL;
6360 if (arg3) {
6361 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6362 ret = -TARGET_EFAULT;
6363 goto rt_sigaction_fail;
6365 } else
6366 oact = NULL;
6367 ret = get_errno(do_sigaction(arg1, act, oact));
6368 rt_sigaction_fail:
6369 if (act)
6370 unlock_user_struct(act, arg2, 0);
6371 if (oact)
6372 unlock_user_struct(oact, arg3, 1);
6373 #endif
6375 break;
6376 #ifdef TARGET_NR_sgetmask /* not on alpha */
6377 case TARGET_NR_sgetmask:
6379 sigset_t cur_set;
6380 abi_ulong target_set;
6381 do_sigprocmask(0, NULL, &cur_set);
6382 host_to_target_old_sigset(&target_set, &cur_set);
6383 ret = target_set;
6385 break;
6386 #endif
6387 #ifdef TARGET_NR_ssetmask /* not on alpha */
6388 case TARGET_NR_ssetmask:
6390 sigset_t set, oset, cur_set;
6391 abi_ulong target_set = arg1;
6392 do_sigprocmask(0, NULL, &cur_set);
6393 target_to_host_old_sigset(&set, &target_set);
6394 sigorset(&set, &set, &cur_set);
6395 do_sigprocmask(SIG_SETMASK, &set, &oset);
6396 host_to_target_old_sigset(&target_set, &oset);
6397 ret = target_set;
6399 break;
6400 #endif
6401 #ifdef TARGET_NR_sigprocmask
6402 case TARGET_NR_sigprocmask:
6404 #if defined(TARGET_ALPHA)
6405 sigset_t set, oldset;
6406 abi_ulong mask;
6407 int how;
6409 switch (arg1) {
6410 case TARGET_SIG_BLOCK:
6411 how = SIG_BLOCK;
6412 break;
6413 case TARGET_SIG_UNBLOCK:
6414 how = SIG_UNBLOCK;
6415 break;
6416 case TARGET_SIG_SETMASK:
6417 how = SIG_SETMASK;
6418 break;
6419 default:
6420 ret = -TARGET_EINVAL;
6421 goto fail;
6423 mask = arg2;
6424 target_to_host_old_sigset(&set, &mask);
6426 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6427 if (!is_error(ret)) {
6428 host_to_target_old_sigset(&mask, &oldset);
6429 ret = mask;
6430 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6432 #else
6433 sigset_t set, oldset, *set_ptr;
6434 int how;
6436 if (arg2) {
6437 switch (arg1) {
6438 case TARGET_SIG_BLOCK:
6439 how = SIG_BLOCK;
6440 break;
6441 case TARGET_SIG_UNBLOCK:
6442 how = SIG_UNBLOCK;
6443 break;
6444 case TARGET_SIG_SETMASK:
6445 how = SIG_SETMASK;
6446 break;
6447 default:
6448 ret = -TARGET_EINVAL;
6449 goto fail;
6451 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6452 goto efault;
6453 target_to_host_old_sigset(&set, p);
6454 unlock_user(p, arg2, 0);
6455 set_ptr = &set;
6456 } else {
6457 how = 0;
6458 set_ptr = NULL;
6460 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6461 if (!is_error(ret) && arg3) {
6462 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6463 goto efault;
6464 host_to_target_old_sigset(p, &oldset);
6465 unlock_user(p, arg3, sizeof(target_sigset_t));
6467 #endif
6469 break;
6470 #endif
6471 case TARGET_NR_rt_sigprocmask:
6473 int how = arg1;
6474 sigset_t set, oldset, *set_ptr;
6476 if (arg2) {
6477 switch(how) {
6478 case TARGET_SIG_BLOCK:
6479 how = SIG_BLOCK;
6480 break;
6481 case TARGET_SIG_UNBLOCK:
6482 how = SIG_UNBLOCK;
6483 break;
6484 case TARGET_SIG_SETMASK:
6485 how = SIG_SETMASK;
6486 break;
6487 default:
6488 ret = -TARGET_EINVAL;
6489 goto fail;
6491 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6492 goto efault;
6493 target_to_host_sigset(&set, p);
6494 unlock_user(p, arg2, 0);
6495 set_ptr = &set;
6496 } else {
6497 how = 0;
6498 set_ptr = NULL;
6500 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6501 if (!is_error(ret) && arg3) {
6502 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6503 goto efault;
6504 host_to_target_sigset(p, &oldset);
6505 unlock_user(p, arg3, sizeof(target_sigset_t));
6508 break;
6509 #ifdef TARGET_NR_sigpending
6510 case TARGET_NR_sigpending:
6512 sigset_t set;
6513 ret = get_errno(sigpending(&set));
6514 if (!is_error(ret)) {
6515 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6516 goto efault;
6517 host_to_target_old_sigset(p, &set);
6518 unlock_user(p, arg1, sizeof(target_sigset_t));
6521 break;
6522 #endif
6523 case TARGET_NR_rt_sigpending:
6525 sigset_t set;
6526 ret = get_errno(sigpending(&set));
6527 if (!is_error(ret)) {
6528 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6529 goto efault;
6530 host_to_target_sigset(p, &set);
6531 unlock_user(p, arg1, sizeof(target_sigset_t));
6534 break;
6535 #ifdef TARGET_NR_sigsuspend
6536 case TARGET_NR_sigsuspend:
6538 sigset_t set;
6539 #if defined(TARGET_ALPHA)
6540 abi_ulong mask = arg1;
6541 target_to_host_old_sigset(&set, &mask);
6542 #else
6543 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6544 goto efault;
6545 target_to_host_old_sigset(&set, p);
6546 unlock_user(p, arg1, 0);
6547 #endif
6548 ret = get_errno(sigsuspend(&set));
6550 break;
6551 #endif
6552 case TARGET_NR_rt_sigsuspend:
6554 sigset_t set;
6555 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6556 goto efault;
6557 target_to_host_sigset(&set, p);
6558 unlock_user(p, arg1, 0);
6559 ret = get_errno(sigsuspend(&set));
6561 break;
6562 case TARGET_NR_rt_sigtimedwait:
6564 sigset_t set;
6565 struct timespec uts, *puts;
6566 siginfo_t uinfo;
6568 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6569 goto efault;
6570 target_to_host_sigset(&set, p);
6571 unlock_user(p, arg1, 0);
6572 if (arg3) {
6573 puts = &uts;
6574 target_to_host_timespec(puts, arg3);
6575 } else {
6576 puts = NULL;
6578 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6579 if (!is_error(ret)) {
6580 if (arg2) {
6581 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6583 if (!p) {
6584 goto efault;
6586 host_to_target_siginfo(p, &uinfo);
6587 unlock_user(p, arg2, sizeof(target_siginfo_t));
6589 ret = host_to_target_signal(ret);
6592 break;
6593 case TARGET_NR_rt_sigqueueinfo:
6595 siginfo_t uinfo;
6596 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6597 goto efault;
6598 target_to_host_siginfo(&uinfo, p);
6599 unlock_user(p, arg1, 0);
6600 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6602 break;
6603 #ifdef TARGET_NR_sigreturn
6604 case TARGET_NR_sigreturn:
6605 /* NOTE: ret is eax, so not transcoding must be done */
6606 ret = do_sigreturn(cpu_env);
6607 break;
6608 #endif
6609 case TARGET_NR_rt_sigreturn:
6610 /* NOTE: ret is eax, so not transcoding must be done */
6611 ret = do_rt_sigreturn(cpu_env);
6612 break;
6613 case TARGET_NR_sethostname:
6614 if (!(p = lock_user_string(arg1)))
6615 goto efault;
6616 ret = get_errno(sethostname(p, arg2));
6617 unlock_user(p, arg1, 0);
6618 break;
6619 case TARGET_NR_setrlimit:
6621 int resource = target_to_host_resource(arg1);
6622 struct target_rlimit *target_rlim;
6623 struct rlimit rlim;
6624 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6625 goto efault;
6626 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6627 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6628 unlock_user_struct(target_rlim, arg2, 0);
6629 ret = get_errno(setrlimit(resource, &rlim));
6631 break;
6632 case TARGET_NR_getrlimit:
6634 int resource = target_to_host_resource(arg1);
6635 struct target_rlimit *target_rlim;
6636 struct rlimit rlim;
6638 ret = get_errno(getrlimit(resource, &rlim));
6639 if (!is_error(ret)) {
6640 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6641 goto efault;
6642 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6643 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6644 unlock_user_struct(target_rlim, arg2, 1);
6647 break;
6648 case TARGET_NR_getrusage:
6650 struct rusage rusage;
6651 ret = get_errno(getrusage(arg1, &rusage));
6652 if (!is_error(ret)) {
6653 ret = host_to_target_rusage(arg2, &rusage);
6656 break;
6657 case TARGET_NR_gettimeofday:
6659 struct timeval tv;
6660 ret = get_errno(gettimeofday(&tv, NULL));
6661 if (!is_error(ret)) {
6662 if (copy_to_user_timeval(arg1, &tv))
6663 goto efault;
6666 break;
6667 case TARGET_NR_settimeofday:
6669 struct timeval tv, *ptv = NULL;
6670 struct timezone tz, *ptz = NULL;
6672 if (arg1) {
6673 if (copy_from_user_timeval(&tv, arg1)) {
6674 goto efault;
6676 ptv = &tv;
6679 if (arg2) {
6680 if (copy_from_user_timezone(&tz, arg2)) {
6681 goto efault;
6683 ptz = &tz;
6686 ret = get_errno(settimeofday(ptv, ptz));
6688 break;
6689 #if defined(TARGET_NR_select)
6690 case TARGET_NR_select:
6691 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6692 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6693 #else
6695 struct target_sel_arg_struct *sel;
6696 abi_ulong inp, outp, exp, tvp;
6697 long nsel;
6699 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6700 goto efault;
6701 nsel = tswapal(sel->n);
6702 inp = tswapal(sel->inp);
6703 outp = tswapal(sel->outp);
6704 exp = tswapal(sel->exp);
6705 tvp = tswapal(sel->tvp);
6706 unlock_user_struct(sel, arg1, 0);
6707 ret = do_select(nsel, inp, outp, exp, tvp);
6709 #endif
6710 break;
6711 #endif
6712 #ifdef TARGET_NR_pselect6
6713 case TARGET_NR_pselect6:
6715 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6716 fd_set rfds, wfds, efds;
6717 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6718 struct timespec ts, *ts_ptr;
6721 * The 6th arg is actually two args smashed together,
6722 * so we cannot use the C library.
6724 sigset_t set;
6725 struct {
6726 sigset_t *set;
6727 size_t size;
6728 } sig, *sig_ptr;
6730 abi_ulong arg_sigset, arg_sigsize, *arg7;
6731 target_sigset_t *target_sigset;
6733 n = arg1;
6734 rfd_addr = arg2;
6735 wfd_addr = arg3;
6736 efd_addr = arg4;
6737 ts_addr = arg5;
6739 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6740 if (ret) {
6741 goto fail;
6743 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6744 if (ret) {
6745 goto fail;
6747 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6748 if (ret) {
6749 goto fail;
6753 * This takes a timespec, and not a timeval, so we cannot
6754 * use the do_select() helper ...
6756 if (ts_addr) {
6757 if (target_to_host_timespec(&ts, ts_addr)) {
6758 goto efault;
6760 ts_ptr = &ts;
6761 } else {
6762 ts_ptr = NULL;
6765 /* Extract the two packed args for the sigset */
6766 if (arg6) {
6767 sig_ptr = &sig;
6768 sig.size = _NSIG / 8;
6770 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6771 if (!arg7) {
6772 goto efault;
6774 arg_sigset = tswapal(arg7[0]);
6775 arg_sigsize = tswapal(arg7[1]);
6776 unlock_user(arg7, arg6, 0);
6778 if (arg_sigset) {
6779 sig.set = &set;
6780 if (arg_sigsize != sizeof(*target_sigset)) {
6781 /* Like the kernel, we enforce correct size sigsets */
6782 ret = -TARGET_EINVAL;
6783 goto fail;
6785 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6786 sizeof(*target_sigset), 1);
6787 if (!target_sigset) {
6788 goto efault;
6790 target_to_host_sigset(&set, target_sigset);
6791 unlock_user(target_sigset, arg_sigset, 0);
6792 } else {
6793 sig.set = NULL;
6795 } else {
6796 sig_ptr = NULL;
6799 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6800 ts_ptr, sig_ptr));
6802 if (!is_error(ret)) {
6803 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6804 goto efault;
6805 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6806 goto efault;
6807 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6808 goto efault;
6810 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6811 goto efault;
6814 break;
6815 #endif
6816 case TARGET_NR_symlink:
6818 void *p2;
6819 p = lock_user_string(arg1);
6820 p2 = lock_user_string(arg2);
6821 if (!p || !p2)
6822 ret = -TARGET_EFAULT;
6823 else
6824 ret = get_errno(symlink(p, p2));
6825 unlock_user(p2, arg2, 0);
6826 unlock_user(p, arg1, 0);
6828 break;
6829 #if defined(TARGET_NR_symlinkat)
6830 case TARGET_NR_symlinkat:
6832 void *p2;
6833 p = lock_user_string(arg1);
6834 p2 = lock_user_string(arg3);
6835 if (!p || !p2)
6836 ret = -TARGET_EFAULT;
6837 else
6838 ret = get_errno(symlinkat(p, arg2, p2));
6839 unlock_user(p2, arg3, 0);
6840 unlock_user(p, arg1, 0);
6842 break;
6843 #endif
6844 #ifdef TARGET_NR_oldlstat
6845 case TARGET_NR_oldlstat:
6846 goto unimplemented;
6847 #endif
6848 case TARGET_NR_readlink:
6850 void *p2;
6851 p = lock_user_string(arg1);
6852 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6853 if (!p || !p2) {
6854 ret = -TARGET_EFAULT;
6855 } else if (!arg3) {
6856 /* Short circuit this for the magic exe check. */
6857 ret = -TARGET_EINVAL;
6858 } else if (is_proc_myself((const char *)p, "exe")) {
6859 char real[PATH_MAX], *temp;
6860 temp = realpath(exec_path, real);
6861 /* Return value is # of bytes that we wrote to the buffer. */
6862 if (temp == NULL) {
6863 ret = get_errno(-1);
6864 } else {
6865 /* Don't worry about sign mismatch as earlier mapping
6866 * logic would have thrown a bad address error. */
6867 ret = MIN(strlen(real), arg3);
6868 /* We cannot NUL terminate the string. */
6869 memcpy(p2, real, ret);
6871 } else {
6872 ret = get_errno(readlink(path(p), p2, arg3));
6874 unlock_user(p2, arg2, ret);
6875 unlock_user(p, arg1, 0);
6877 break;
6878 #if defined(TARGET_NR_readlinkat)
6879 case TARGET_NR_readlinkat:
6881 void *p2;
6882 p = lock_user_string(arg2);
6883 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6884 if (!p || !p2) {
6885 ret = -TARGET_EFAULT;
6886 } else if (is_proc_myself((const char *)p, "exe")) {
6887 char real[PATH_MAX], *temp;
6888 temp = realpath(exec_path, real);
6889 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6890 snprintf((char *)p2, arg4, "%s", real);
6891 } else {
6892 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6894 unlock_user(p2, arg3, ret);
6895 unlock_user(p, arg2, 0);
6897 break;
6898 #endif
6899 #ifdef TARGET_NR_uselib
6900 case TARGET_NR_uselib:
6901 goto unimplemented;
6902 #endif
6903 #ifdef TARGET_NR_swapon
6904 case TARGET_NR_swapon:
6905 if (!(p = lock_user_string(arg1)))
6906 goto efault;
6907 ret = get_errno(swapon(p, arg2));
6908 unlock_user(p, arg1, 0);
6909 break;
6910 #endif
6911 case TARGET_NR_reboot:
6912 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6913 /* arg4 must be ignored in all other cases */
6914 p = lock_user_string(arg4);
6915 if (!p) {
6916 goto efault;
6918 ret = get_errno(reboot(arg1, arg2, arg3, p));
6919 unlock_user(p, arg4, 0);
6920 } else {
6921 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6923 break;
6924 #ifdef TARGET_NR_readdir
6925 case TARGET_NR_readdir:
6926 goto unimplemented;
6927 #endif
6928 #ifdef TARGET_NR_mmap
6929 case TARGET_NR_mmap:
6930 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6931 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6932 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6933 || defined(TARGET_S390X)
6935 abi_ulong *v;
6936 abi_ulong v1, v2, v3, v4, v5, v6;
6937 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6938 goto efault;
6939 v1 = tswapal(v[0]);
6940 v2 = tswapal(v[1]);
6941 v3 = tswapal(v[2]);
6942 v4 = tswapal(v[3]);
6943 v5 = tswapal(v[4]);
6944 v6 = tswapal(v[5]);
6945 unlock_user(v, arg1, 0);
6946 ret = get_errno(target_mmap(v1, v2, v3,
6947 target_to_host_bitmask(v4, mmap_flags_tbl),
6948 v5, v6));
6950 #else
6951 ret = get_errno(target_mmap(arg1, arg2, arg3,
6952 target_to_host_bitmask(arg4, mmap_flags_tbl),
6953 arg5,
6954 arg6));
6955 #endif
6956 break;
6957 #endif
6958 #ifdef TARGET_NR_mmap2
6959 case TARGET_NR_mmap2:
6960 #ifndef MMAP_SHIFT
6961 #define MMAP_SHIFT 12
6962 #endif
6963 ret = get_errno(target_mmap(arg1, arg2, arg3,
6964 target_to_host_bitmask(arg4, mmap_flags_tbl),
6965 arg5,
6966 arg6 << MMAP_SHIFT));
6967 break;
6968 #endif
6969 case TARGET_NR_munmap:
6970 ret = get_errno(target_munmap(arg1, arg2));
6971 break;
6972 case TARGET_NR_mprotect:
6974 TaskState *ts = cpu->opaque;
6975 /* Special hack to detect libc making the stack executable. */
6976 if ((arg3 & PROT_GROWSDOWN)
6977 && arg1 >= ts->info->stack_limit
6978 && arg1 <= ts->info->start_stack) {
6979 arg3 &= ~PROT_GROWSDOWN;
6980 arg2 = arg2 + arg1 - ts->info->stack_limit;
6981 arg1 = ts->info->stack_limit;
6984 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6985 break;
6986 #ifdef TARGET_NR_mremap
6987 case TARGET_NR_mremap:
6988 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6989 break;
6990 #endif
6991 /* ??? msync/mlock/munlock are broken for softmmu. */
6992 #ifdef TARGET_NR_msync
6993 case TARGET_NR_msync:
6994 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6995 break;
6996 #endif
6997 #ifdef TARGET_NR_mlock
6998 case TARGET_NR_mlock:
6999 ret = get_errno(mlock(g2h(arg1), arg2));
7000 break;
7001 #endif
7002 #ifdef TARGET_NR_munlock
7003 case TARGET_NR_munlock:
7004 ret = get_errno(munlock(g2h(arg1), arg2));
7005 break;
7006 #endif
7007 #ifdef TARGET_NR_mlockall
7008 case TARGET_NR_mlockall:
7009 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7010 break;
7011 #endif
7012 #ifdef TARGET_NR_munlockall
7013 case TARGET_NR_munlockall:
7014 ret = get_errno(munlockall());
7015 break;
7016 #endif
7017 case TARGET_NR_truncate:
7018 if (!(p = lock_user_string(arg1)))
7019 goto efault;
7020 ret = get_errno(truncate(p, arg2));
7021 unlock_user(p, arg1, 0);
7022 break;
7023 case TARGET_NR_ftruncate:
7024 ret = get_errno(ftruncate(arg1, arg2));
7025 break;
7026 case TARGET_NR_fchmod:
7027 ret = get_errno(fchmod(arg1, arg2));
7028 break;
7029 #if defined(TARGET_NR_fchmodat)
7030 case TARGET_NR_fchmodat:
7031 if (!(p = lock_user_string(arg2)))
7032 goto efault;
7033 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7034 unlock_user(p, arg2, 0);
7035 break;
7036 #endif
7037 case TARGET_NR_getpriority:
7038 /* Note that negative values are valid for getpriority, so we must
7039 differentiate based on errno settings. */
7040 errno = 0;
7041 ret = getpriority(arg1, arg2);
7042 if (ret == -1 && errno != 0) {
7043 ret = -host_to_target_errno(errno);
7044 break;
7046 #ifdef TARGET_ALPHA
7047 /* Return value is the unbiased priority. Signal no error. */
7048 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7049 #else
7050 /* Return value is a biased priority to avoid negative numbers. */
7051 ret = 20 - ret;
7052 #endif
7053 break;
7054 case TARGET_NR_setpriority:
7055 ret = get_errno(setpriority(arg1, arg2, arg3));
7056 break;
7057 #ifdef TARGET_NR_profil
7058 case TARGET_NR_profil:
7059 goto unimplemented;
7060 #endif
7061 case TARGET_NR_statfs:
7062 if (!(p = lock_user_string(arg1)))
7063 goto efault;
7064 ret = get_errno(statfs(path(p), &stfs));
7065 unlock_user(p, arg1, 0);
7066 convert_statfs:
7067 if (!is_error(ret)) {
7068 struct target_statfs *target_stfs;
7070 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7071 goto efault;
7072 __put_user(stfs.f_type, &target_stfs->f_type);
7073 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7074 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7075 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7076 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7077 __put_user(stfs.f_files, &target_stfs->f_files);
7078 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7079 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7080 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7081 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7082 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7083 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7084 unlock_user_struct(target_stfs, arg2, 1);
7086 break;
7087 case TARGET_NR_fstatfs:
7088 ret = get_errno(fstatfs(arg1, &stfs));
7089 goto convert_statfs;
7090 #ifdef TARGET_NR_statfs64
7091 case TARGET_NR_statfs64:
7092 if (!(p = lock_user_string(arg1)))
7093 goto efault;
7094 ret = get_errno(statfs(path(p), &stfs));
7095 unlock_user(p, arg1, 0);
7096 convert_statfs64:
7097 if (!is_error(ret)) {
7098 struct target_statfs64 *target_stfs;
7100 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7101 goto efault;
7102 __put_user(stfs.f_type, &target_stfs->f_type);
7103 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7104 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7105 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7106 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7107 __put_user(stfs.f_files, &target_stfs->f_files);
7108 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7109 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7110 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7111 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7112 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7113 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7114 unlock_user_struct(target_stfs, arg3, 1);
7116 break;
7117 case TARGET_NR_fstatfs64:
7118 ret = get_errno(fstatfs(arg1, &stfs));
7119 goto convert_statfs64;
7120 #endif
7121 #ifdef TARGET_NR_ioperm
7122 case TARGET_NR_ioperm:
7123 goto unimplemented;
7124 #endif
7125 #ifdef TARGET_NR_socketcall
7126 case TARGET_NR_socketcall:
7127 ret = do_socketcall(arg1, arg2);
7128 break;
7129 #endif
7130 #ifdef TARGET_NR_accept
7131 case TARGET_NR_accept:
7132 ret = do_accept4(arg1, arg2, arg3, 0);
7133 break;
7134 #endif
7135 #ifdef TARGET_NR_accept4
7136 case TARGET_NR_accept4:
7137 #ifdef CONFIG_ACCEPT4
7138 ret = do_accept4(arg1, arg2, arg3, arg4);
7139 #else
7140 goto unimplemented;
7141 #endif
7142 break;
7143 #endif
7144 #ifdef TARGET_NR_bind
7145 case TARGET_NR_bind:
7146 ret = do_bind(arg1, arg2, arg3);
7147 break;
7148 #endif
7149 #ifdef TARGET_NR_connect
7150 case TARGET_NR_connect:
7151 ret = do_connect(arg1, arg2, arg3);
7152 break;
7153 #endif
7154 #ifdef TARGET_NR_getpeername
7155 case TARGET_NR_getpeername:
7156 ret = do_getpeername(arg1, arg2, arg3);
7157 break;
7158 #endif
7159 #ifdef TARGET_NR_getsockname
7160 case TARGET_NR_getsockname:
7161 ret = do_getsockname(arg1, arg2, arg3);
7162 break;
7163 #endif
7164 #ifdef TARGET_NR_getsockopt
7165 case TARGET_NR_getsockopt:
7166 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7167 break;
7168 #endif
7169 #ifdef TARGET_NR_listen
7170 case TARGET_NR_listen:
7171 ret = get_errno(listen(arg1, arg2));
7172 break;
7173 #endif
7174 #ifdef TARGET_NR_recv
7175 case TARGET_NR_recv:
7176 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7177 break;
7178 #endif
7179 #ifdef TARGET_NR_recvfrom
7180 case TARGET_NR_recvfrom:
7181 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7182 break;
7183 #endif
7184 #ifdef TARGET_NR_recvmsg
7185 case TARGET_NR_recvmsg:
7186 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7187 break;
7188 #endif
7189 #ifdef TARGET_NR_send
7190 case TARGET_NR_send:
7191 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7192 break;
7193 #endif
7194 #ifdef TARGET_NR_sendmsg
7195 case TARGET_NR_sendmsg:
7196 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7197 break;
7198 #endif
7199 #ifdef TARGET_NR_sendmmsg
7200 case TARGET_NR_sendmmsg:
7201 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7202 break;
7203 case TARGET_NR_recvmmsg:
7204 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7205 break;
7206 #endif
7207 #ifdef TARGET_NR_sendto
7208 case TARGET_NR_sendto:
7209 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7210 break;
7211 #endif
7212 #ifdef TARGET_NR_shutdown
7213 case TARGET_NR_shutdown:
7214 ret = get_errno(shutdown(arg1, arg2));
7215 break;
7216 #endif
7217 #ifdef TARGET_NR_socket
7218 case TARGET_NR_socket:
7219 ret = do_socket(arg1, arg2, arg3);
7220 break;
7221 #endif
7222 #ifdef TARGET_NR_socketpair
7223 case TARGET_NR_socketpair:
7224 ret = do_socketpair(arg1, arg2, arg3, arg4);
7225 break;
7226 #endif
7227 #ifdef TARGET_NR_setsockopt
7228 case TARGET_NR_setsockopt:
7229 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7230 break;
7231 #endif
7233 case TARGET_NR_syslog:
7234 if (!(p = lock_user_string(arg2)))
7235 goto efault;
7236 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7237 unlock_user(p, arg2, 0);
7238 break;
7240 case TARGET_NR_setitimer:
7242 struct itimerval value, ovalue, *pvalue;
7244 if (arg2) {
7245 pvalue = &value;
7246 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7247 || copy_from_user_timeval(&pvalue->it_value,
7248 arg2 + sizeof(struct target_timeval)))
7249 goto efault;
7250 } else {
7251 pvalue = NULL;
7253 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7254 if (!is_error(ret) && arg3) {
7255 if (copy_to_user_timeval(arg3,
7256 &ovalue.it_interval)
7257 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7258 &ovalue.it_value))
7259 goto efault;
7262 break;
7263 case TARGET_NR_getitimer:
7265 struct itimerval value;
7267 ret = get_errno(getitimer(arg1, &value));
7268 if (!is_error(ret) && arg2) {
7269 if (copy_to_user_timeval(arg2,
7270 &value.it_interval)
7271 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7272 &value.it_value))
7273 goto efault;
7276 break;
7277 case TARGET_NR_stat:
7278 if (!(p = lock_user_string(arg1)))
7279 goto efault;
7280 ret = get_errno(stat(path(p), &st));
7281 unlock_user(p, arg1, 0);
7282 goto do_stat;
7283 case TARGET_NR_lstat:
7284 if (!(p = lock_user_string(arg1)))
7285 goto efault;
7286 ret = get_errno(lstat(path(p), &st));
7287 unlock_user(p, arg1, 0);
7288 goto do_stat;
7289 case TARGET_NR_fstat:
7291 ret = get_errno(fstat(arg1, &st));
7292 do_stat:
7293 if (!is_error(ret)) {
7294 struct target_stat *target_st;
7296 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7297 goto efault;
7298 memset(target_st, 0, sizeof(*target_st));
7299 __put_user(st.st_dev, &target_st->st_dev);
7300 __put_user(st.st_ino, &target_st->st_ino);
7301 __put_user(st.st_mode, &target_st->st_mode);
7302 __put_user(st.st_uid, &target_st->st_uid);
7303 __put_user(st.st_gid, &target_st->st_gid);
7304 __put_user(st.st_nlink, &target_st->st_nlink);
7305 __put_user(st.st_rdev, &target_st->st_rdev);
7306 __put_user(st.st_size, &target_st->st_size);
7307 __put_user(st.st_blksize, &target_st->st_blksize);
7308 __put_user(st.st_blocks, &target_st->st_blocks);
7309 __put_user(st.st_atime, &target_st->target_st_atime);
7310 __put_user(st.st_mtime, &target_st->target_st_mtime);
7311 __put_user(st.st_ctime, &target_st->target_st_ctime);
7312 unlock_user_struct(target_st, arg2, 1);
7315 break;
7316 #ifdef TARGET_NR_olduname
7317 case TARGET_NR_olduname:
7318 goto unimplemented;
7319 #endif
7320 #ifdef TARGET_NR_iopl
7321 case TARGET_NR_iopl:
7322 goto unimplemented;
7323 #endif
7324 case TARGET_NR_vhangup:
7325 ret = get_errno(vhangup());
7326 break;
7327 #ifdef TARGET_NR_idle
7328 case TARGET_NR_idle:
7329 goto unimplemented;
7330 #endif
7331 #ifdef TARGET_NR_syscall
7332 case TARGET_NR_syscall:
7333 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7334 arg6, arg7, arg8, 0);
7335 break;
7336 #endif
7337 case TARGET_NR_wait4:
7339 int status;
7340 abi_long status_ptr = arg2;
7341 struct rusage rusage, *rusage_ptr;
7342 abi_ulong target_rusage = arg4;
7343 abi_long rusage_err;
7344 if (target_rusage)
7345 rusage_ptr = &rusage;
7346 else
7347 rusage_ptr = NULL;
7348 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7349 if (!is_error(ret)) {
7350 if (status_ptr && ret) {
7351 status = host_to_target_waitstatus(status);
7352 if (put_user_s32(status, status_ptr))
7353 goto efault;
7355 if (target_rusage) {
7356 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7357 if (rusage_err) {
7358 ret = rusage_err;
7363 break;
7364 #ifdef TARGET_NR_swapoff
7365 case TARGET_NR_swapoff:
7366 if (!(p = lock_user_string(arg1)))
7367 goto efault;
7368 ret = get_errno(swapoff(p));
7369 unlock_user(p, arg1, 0);
7370 break;
7371 #endif
7372 case TARGET_NR_sysinfo:
7374 struct target_sysinfo *target_value;
7375 struct sysinfo value;
7376 ret = get_errno(sysinfo(&value));
7377 if (!is_error(ret) && arg1)
7379 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7380 goto efault;
7381 __put_user(value.uptime, &target_value->uptime);
7382 __put_user(value.loads[0], &target_value->loads[0]);
7383 __put_user(value.loads[1], &target_value->loads[1]);
7384 __put_user(value.loads[2], &target_value->loads[2]);
7385 __put_user(value.totalram, &target_value->totalram);
7386 __put_user(value.freeram, &target_value->freeram);
7387 __put_user(value.sharedram, &target_value->sharedram);
7388 __put_user(value.bufferram, &target_value->bufferram);
7389 __put_user(value.totalswap, &target_value->totalswap);
7390 __put_user(value.freeswap, &target_value->freeswap);
7391 __put_user(value.procs, &target_value->procs);
7392 __put_user(value.totalhigh, &target_value->totalhigh);
7393 __put_user(value.freehigh, &target_value->freehigh);
7394 __put_user(value.mem_unit, &target_value->mem_unit);
7395 unlock_user_struct(target_value, arg1, 1);
7398 break;
7399 #ifdef TARGET_NR_ipc
7400 case TARGET_NR_ipc:
7401 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7402 break;
7403 #endif
7404 #ifdef TARGET_NR_semget
7405 case TARGET_NR_semget:
7406 ret = get_errno(semget(arg1, arg2, arg3));
7407 break;
7408 #endif
7409 #ifdef TARGET_NR_semop
7410 case TARGET_NR_semop:
7411 ret = do_semop(arg1, arg2, arg3);
7412 break;
7413 #endif
7414 #ifdef TARGET_NR_semctl
7415 case TARGET_NR_semctl:
7416 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7417 break;
7418 #endif
7419 #ifdef TARGET_NR_msgctl
7420 case TARGET_NR_msgctl:
7421 ret = do_msgctl(arg1, arg2, arg3);
7422 break;
7423 #endif
7424 #ifdef TARGET_NR_msgget
7425 case TARGET_NR_msgget:
7426 ret = get_errno(msgget(arg1, arg2));
7427 break;
7428 #endif
7429 #ifdef TARGET_NR_msgrcv
7430 case TARGET_NR_msgrcv:
7431 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7432 break;
7433 #endif
7434 #ifdef TARGET_NR_msgsnd
7435 case TARGET_NR_msgsnd:
7436 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7437 break;
7438 #endif
7439 #ifdef TARGET_NR_shmget
7440 case TARGET_NR_shmget:
7441 ret = get_errno(shmget(arg1, arg2, arg3));
7442 break;
7443 #endif
7444 #ifdef TARGET_NR_shmctl
7445 case TARGET_NR_shmctl:
7446 ret = do_shmctl(arg1, arg2, arg3);
7447 break;
7448 #endif
7449 #ifdef TARGET_NR_shmat
7450 case TARGET_NR_shmat:
7451 ret = do_shmat(arg1, arg2, arg3);
7452 break;
7453 #endif
7454 #ifdef TARGET_NR_shmdt
7455 case TARGET_NR_shmdt:
7456 ret = do_shmdt(arg1);
7457 break;
7458 #endif
7459 case TARGET_NR_fsync:
7460 ret = get_errno(fsync(arg1));
7461 break;
7462 case TARGET_NR_clone:
7463 /* Linux manages to have three different orderings for its
7464 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7465 * match the kernel's CONFIG_CLONE_* settings.
7466 * Microblaze is further special in that it uses a sixth
7467 * implicit argument to clone for the TLS pointer.
7469 #if defined(TARGET_MICROBLAZE)
7470 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7471 #elif defined(TARGET_CLONE_BACKWARDS)
7472 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7473 #elif defined(TARGET_CLONE_BACKWARDS2)
7474 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7475 #else
7476 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7477 #endif
7478 break;
7479 #ifdef __NR_exit_group
7480 /* new thread calls */
7481 case TARGET_NR_exit_group:
7482 #ifdef TARGET_GPROF
7483 _mcleanup();
7484 #endif
7485 gdb_exit(cpu_env, arg1);
7486 ret = get_errno(exit_group(arg1));
7487 break;
7488 #endif
7489 case TARGET_NR_setdomainname:
7490 if (!(p = lock_user_string(arg1)))
7491 goto efault;
7492 ret = get_errno(setdomainname(p, arg2));
7493 unlock_user(p, arg1, 0);
7494 break;
7495 case TARGET_NR_uname:
7496 /* no need to transcode because we use the linux syscall */
7498 struct new_utsname * buf;
7500 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7501 goto efault;
7502 ret = get_errno(sys_uname(buf));
7503 if (!is_error(ret)) {
7504 /* Overrite the native machine name with whatever is being
7505 emulated. */
7506 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7507 /* Allow the user to override the reported release. */
7508 if (qemu_uname_release && *qemu_uname_release)
7509 strcpy (buf->release, qemu_uname_release);
7511 unlock_user_struct(buf, arg1, 1);
7513 break;
7514 #ifdef TARGET_I386
7515 case TARGET_NR_modify_ldt:
7516 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7517 break;
7518 #if !defined(TARGET_X86_64)
7519 case TARGET_NR_vm86old:
7520 goto unimplemented;
7521 case TARGET_NR_vm86:
7522 ret = do_vm86(cpu_env, arg1, arg2);
7523 break;
7524 #endif
7525 #endif
7526 case TARGET_NR_adjtimex:
7527 goto unimplemented;
7528 #ifdef TARGET_NR_create_module
7529 case TARGET_NR_create_module:
7530 #endif
7531 case TARGET_NR_init_module:
7532 case TARGET_NR_delete_module:
7533 #ifdef TARGET_NR_get_kernel_syms
7534 case TARGET_NR_get_kernel_syms:
7535 #endif
7536 goto unimplemented;
7537 case TARGET_NR_quotactl:
7538 goto unimplemented;
7539 case TARGET_NR_getpgid:
7540 ret = get_errno(getpgid(arg1));
7541 break;
7542 case TARGET_NR_fchdir:
7543 ret = get_errno(fchdir(arg1));
7544 break;
7545 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7546 case TARGET_NR_bdflush:
7547 goto unimplemented;
7548 #endif
7549 #ifdef TARGET_NR_sysfs
7550 case TARGET_NR_sysfs:
7551 goto unimplemented;
7552 #endif
7553 case TARGET_NR_personality:
7554 ret = get_errno(personality(arg1));
7555 break;
7556 #ifdef TARGET_NR_afs_syscall
7557 case TARGET_NR_afs_syscall:
7558 goto unimplemented;
7559 #endif
7560 #ifdef TARGET_NR__llseek /* Not on alpha */
7561 case TARGET_NR__llseek:
7563 int64_t res;
7564 #if !defined(__NR_llseek)
7565 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7566 if (res == -1) {
7567 ret = get_errno(res);
7568 } else {
7569 ret = 0;
7571 #else
7572 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7573 #endif
7574 if ((ret == 0) && put_user_s64(res, arg4)) {
7575 goto efault;
7578 break;
7579 #endif
7580 case TARGET_NR_getdents:
7581 #ifdef __NR_getdents
7582 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7584 struct target_dirent *target_dirp;
7585 struct linux_dirent *dirp;
7586 abi_long count = arg3;
7588 dirp = malloc(count);
7589 if (!dirp) {
7590 ret = -TARGET_ENOMEM;
7591 goto fail;
7594 ret = get_errno(sys_getdents(arg1, dirp, count));
7595 if (!is_error(ret)) {
7596 struct linux_dirent *de;
7597 struct target_dirent *tde;
7598 int len = ret;
7599 int reclen, treclen;
7600 int count1, tnamelen;
7602 count1 = 0;
7603 de = dirp;
7604 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7605 goto efault;
7606 tde = target_dirp;
7607 while (len > 0) {
7608 reclen = de->d_reclen;
7609 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7610 assert(tnamelen >= 0);
7611 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7612 assert(count1 + treclen <= count);
7613 tde->d_reclen = tswap16(treclen);
7614 tde->d_ino = tswapal(de->d_ino);
7615 tde->d_off = tswapal(de->d_off);
7616 memcpy(tde->d_name, de->d_name, tnamelen);
7617 de = (struct linux_dirent *)((char *)de + reclen);
7618 len -= reclen;
7619 tde = (struct target_dirent *)((char *)tde + treclen);
7620 count1 += treclen;
7622 ret = count1;
7623 unlock_user(target_dirp, arg2, ret);
7625 free(dirp);
7627 #else
7629 struct linux_dirent *dirp;
7630 abi_long count = arg3;
7632 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7633 goto efault;
7634 ret = get_errno(sys_getdents(arg1, dirp, count));
7635 if (!is_error(ret)) {
7636 struct linux_dirent *de;
7637 int len = ret;
7638 int reclen;
7639 de = dirp;
7640 while (len > 0) {
7641 reclen = de->d_reclen;
7642 if (reclen > len)
7643 break;
7644 de->d_reclen = tswap16(reclen);
7645 tswapls(&de->d_ino);
7646 tswapls(&de->d_off);
7647 de = (struct linux_dirent *)((char *)de + reclen);
7648 len -= reclen;
7651 unlock_user(dirp, arg2, ret);
7653 #endif
7654 #else
7655 /* Implement getdents in terms of getdents64 */
7657 struct linux_dirent64 *dirp;
7658 abi_long count = arg3;
7660 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7661 if (!dirp) {
7662 goto efault;
7664 ret = get_errno(sys_getdents64(arg1, dirp, count));
7665 if (!is_error(ret)) {
7666 /* Convert the dirent64 structs to target dirent. We do this
7667 * in-place, since we can guarantee that a target_dirent is no
7668 * larger than a dirent64; however this means we have to be
7669 * careful to read everything before writing in the new format.
7671 struct linux_dirent64 *de;
7672 struct target_dirent *tde;
7673 int len = ret;
7674 int tlen = 0;
7676 de = dirp;
7677 tde = (struct target_dirent *)dirp;
7678 while (len > 0) {
7679 int namelen, treclen;
7680 int reclen = de->d_reclen;
7681 uint64_t ino = de->d_ino;
7682 int64_t off = de->d_off;
7683 uint8_t type = de->d_type;
7685 namelen = strlen(de->d_name);
7686 treclen = offsetof(struct target_dirent, d_name)
7687 + namelen + 2;
7688 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7690 memmove(tde->d_name, de->d_name, namelen + 1);
7691 tde->d_ino = tswapal(ino);
7692 tde->d_off = tswapal(off);
7693 tde->d_reclen = tswap16(treclen);
7694 /* The target_dirent type is in what was formerly a padding
7695 * byte at the end of the structure:
7697 *(((char *)tde) + treclen - 1) = type;
7699 de = (struct linux_dirent64 *)((char *)de + reclen);
7700 tde = (struct target_dirent *)((char *)tde + treclen);
7701 len -= reclen;
7702 tlen += treclen;
7704 ret = tlen;
7706 unlock_user(dirp, arg2, ret);
7708 #endif
7709 break;
7710 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7711 case TARGET_NR_getdents64:
7713 struct linux_dirent64 *dirp;
7714 abi_long count = arg3;
7715 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7716 goto efault;
7717 ret = get_errno(sys_getdents64(arg1, dirp, count));
7718 if (!is_error(ret)) {
7719 struct linux_dirent64 *de;
7720 int len = ret;
7721 int reclen;
7722 de = dirp;
7723 while (len > 0) {
7724 reclen = de->d_reclen;
7725 if (reclen > len)
7726 break;
7727 de->d_reclen = tswap16(reclen);
7728 tswap64s((uint64_t *)&de->d_ino);
7729 tswap64s((uint64_t *)&de->d_off);
7730 de = (struct linux_dirent64 *)((char *)de + reclen);
7731 len -= reclen;
7734 unlock_user(dirp, arg2, ret);
7736 break;
7737 #endif /* TARGET_NR_getdents64 */
7738 #if defined(TARGET_NR__newselect)
7739 case TARGET_NR__newselect:
7740 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7741 break;
7742 #endif
7743 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7744 # ifdef TARGET_NR_poll
7745 case TARGET_NR_poll:
7746 # endif
7747 # ifdef TARGET_NR_ppoll
7748 case TARGET_NR_ppoll:
7749 # endif
7751 struct target_pollfd *target_pfd;
7752 unsigned int nfds = arg2;
7753 int timeout = arg3;
7754 struct pollfd *pfd;
7755 unsigned int i;
7757 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7758 if (!target_pfd)
7759 goto efault;
7761 pfd = alloca(sizeof(struct pollfd) * nfds);
7762 for(i = 0; i < nfds; i++) {
7763 pfd[i].fd = tswap32(target_pfd[i].fd);
7764 pfd[i].events = tswap16(target_pfd[i].events);
7767 # ifdef TARGET_NR_ppoll
7768 if (num == TARGET_NR_ppoll) {
7769 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7770 target_sigset_t *target_set;
7771 sigset_t _set, *set = &_set;
7773 if (arg3) {
7774 if (target_to_host_timespec(timeout_ts, arg3)) {
7775 unlock_user(target_pfd, arg1, 0);
7776 goto efault;
7778 } else {
7779 timeout_ts = NULL;
7782 if (arg4) {
7783 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7784 if (!target_set) {
7785 unlock_user(target_pfd, arg1, 0);
7786 goto efault;
7788 target_to_host_sigset(set, target_set);
7789 } else {
7790 set = NULL;
7793 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7795 if (!is_error(ret) && arg3) {
7796 host_to_target_timespec(arg3, timeout_ts);
7798 if (arg4) {
7799 unlock_user(target_set, arg4, 0);
7801 } else
7802 # endif
7803 ret = get_errno(poll(pfd, nfds, timeout));
7805 if (!is_error(ret)) {
7806 for(i = 0; i < nfds; i++) {
7807 target_pfd[i].revents = tswap16(pfd[i].revents);
7810 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7812 break;
7813 #endif
7814 case TARGET_NR_flock:
7815 /* NOTE: the flock constant seems to be the same for every
7816 Linux platform */
7817 ret = get_errno(flock(arg1, arg2));
7818 break;
7819 case TARGET_NR_readv:
7821 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7822 if (vec != NULL) {
7823 ret = get_errno(readv(arg1, vec, arg3));
7824 unlock_iovec(vec, arg2, arg3, 1);
7825 } else {
7826 ret = -host_to_target_errno(errno);
7829 break;
7830 case TARGET_NR_writev:
7832 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7833 if (vec != NULL) {
7834 ret = get_errno(writev(arg1, vec, arg3));
7835 unlock_iovec(vec, arg2, arg3, 0);
7836 } else {
7837 ret = -host_to_target_errno(errno);
7840 break;
7841 case TARGET_NR_getsid:
7842 ret = get_errno(getsid(arg1));
7843 break;
7844 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7845 case TARGET_NR_fdatasync:
7846 ret = get_errno(fdatasync(arg1));
7847 break;
7848 #endif
7849 case TARGET_NR__sysctl:
7850 /* We don't implement this, but ENOTDIR is always a safe
7851 return value. */
7852 ret = -TARGET_ENOTDIR;
7853 break;
7854 case TARGET_NR_sched_getaffinity:
7856 unsigned int mask_size;
7857 unsigned long *mask;
7860 * sched_getaffinity needs multiples of ulong, so need to take
7861 * care of mismatches between target ulong and host ulong sizes.
7863 if (arg2 & (sizeof(abi_ulong) - 1)) {
7864 ret = -TARGET_EINVAL;
7865 break;
7867 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7869 mask = alloca(mask_size);
7870 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7872 if (!is_error(ret)) {
7873 if (ret > arg2) {
7874 /* More data returned than the caller's buffer will fit.
7875 * This only happens if sizeof(abi_long) < sizeof(long)
7876 * and the caller passed us a buffer holding an odd number
7877 * of abi_longs. If the host kernel is actually using the
7878 * extra 4 bytes then fail EINVAL; otherwise we can just
7879 * ignore them and only copy the interesting part.
7881 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
7882 if (numcpus > arg2 * 8) {
7883 ret = -TARGET_EINVAL;
7884 break;
7886 ret = arg2;
7889 if (copy_to_user(arg3, mask, ret)) {
7890 goto efault;
7894 break;
7895 case TARGET_NR_sched_setaffinity:
7897 unsigned int mask_size;
7898 unsigned long *mask;
7901 * sched_setaffinity needs multiples of ulong, so need to take
7902 * care of mismatches between target ulong and host ulong sizes.
7904 if (arg2 & (sizeof(abi_ulong) - 1)) {
7905 ret = -TARGET_EINVAL;
7906 break;
7908 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7910 mask = alloca(mask_size);
7911 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7912 goto efault;
7914 memcpy(mask, p, arg2);
7915 unlock_user_struct(p, arg2, 0);
7917 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7919 break;
7920 case TARGET_NR_sched_setparam:
7922 struct sched_param *target_schp;
7923 struct sched_param schp;
7925 if (arg2 == 0) {
7926 return -TARGET_EINVAL;
7928 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7929 goto efault;
7930 schp.sched_priority = tswap32(target_schp->sched_priority);
7931 unlock_user_struct(target_schp, arg2, 0);
7932 ret = get_errno(sched_setparam(arg1, &schp));
7934 break;
7935 case TARGET_NR_sched_getparam:
7937 struct sched_param *target_schp;
7938 struct sched_param schp;
7940 if (arg2 == 0) {
7941 return -TARGET_EINVAL;
7943 ret = get_errno(sched_getparam(arg1, &schp));
7944 if (!is_error(ret)) {
7945 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7946 goto efault;
7947 target_schp->sched_priority = tswap32(schp.sched_priority);
7948 unlock_user_struct(target_schp, arg2, 1);
7951 break;
7952 case TARGET_NR_sched_setscheduler:
7954 struct sched_param *target_schp;
7955 struct sched_param schp;
7956 if (arg3 == 0) {
7957 return -TARGET_EINVAL;
7959 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7960 goto efault;
7961 schp.sched_priority = tswap32(target_schp->sched_priority);
7962 unlock_user_struct(target_schp, arg3, 0);
7963 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7965 break;
7966 case TARGET_NR_sched_getscheduler:
7967 ret = get_errno(sched_getscheduler(arg1));
7968 break;
7969 case TARGET_NR_sched_yield:
7970 ret = get_errno(sched_yield());
7971 break;
7972 case TARGET_NR_sched_get_priority_max:
7973 ret = get_errno(sched_get_priority_max(arg1));
7974 break;
7975 case TARGET_NR_sched_get_priority_min:
7976 ret = get_errno(sched_get_priority_min(arg1));
7977 break;
7978 case TARGET_NR_sched_rr_get_interval:
7980 struct timespec ts;
7981 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7982 if (!is_error(ret)) {
7983 ret = host_to_target_timespec(arg2, &ts);
7986 break;
7987 case TARGET_NR_nanosleep:
7989 struct timespec req, rem;
7990 target_to_host_timespec(&req, arg1);
7991 ret = get_errno(nanosleep(&req, &rem));
7992 if (is_error(ret) && arg2) {
7993 host_to_target_timespec(arg2, &rem);
7996 break;
7997 #ifdef TARGET_NR_query_module
7998 case TARGET_NR_query_module:
7999 goto unimplemented;
8000 #endif
8001 #ifdef TARGET_NR_nfsservctl
8002 case TARGET_NR_nfsservctl:
8003 goto unimplemented;
8004 #endif
8005 case TARGET_NR_prctl:
8006 switch (arg1) {
8007 case PR_GET_PDEATHSIG:
8009 int deathsig;
8010 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8011 if (!is_error(ret) && arg2
8012 && put_user_ual(deathsig, arg2)) {
8013 goto efault;
8015 break;
8017 #ifdef PR_GET_NAME
8018 case PR_GET_NAME:
8020 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8021 if (!name) {
8022 goto efault;
8024 ret = get_errno(prctl(arg1, (unsigned long)name,
8025 arg3, arg4, arg5));
8026 unlock_user(name, arg2, 16);
8027 break;
8029 case PR_SET_NAME:
8031 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8032 if (!name) {
8033 goto efault;
8035 ret = get_errno(prctl(arg1, (unsigned long)name,
8036 arg3, arg4, arg5));
8037 unlock_user(name, arg2, 0);
8038 break;
8040 #endif
8041 default:
8042 /* Most prctl options have no pointer arguments */
8043 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8044 break;
8046 break;
8047 #ifdef TARGET_NR_arch_prctl
8048 case TARGET_NR_arch_prctl:
8049 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8050 ret = do_arch_prctl(cpu_env, arg1, arg2);
8051 break;
8052 #else
8053 goto unimplemented;
8054 #endif
8055 #endif
8056 #ifdef TARGET_NR_pread64
8057 case TARGET_NR_pread64:
8058 if (regpairs_aligned(cpu_env)) {
8059 arg4 = arg5;
8060 arg5 = arg6;
8062 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8063 goto efault;
8064 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8065 unlock_user(p, arg2, ret);
8066 break;
8067 case TARGET_NR_pwrite64:
8068 if (regpairs_aligned(cpu_env)) {
8069 arg4 = arg5;
8070 arg5 = arg6;
8072 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8073 goto efault;
8074 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8075 unlock_user(p, arg2, 0);
8076 break;
8077 #endif
8078 case TARGET_NR_getcwd:
8079 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8080 goto efault;
8081 ret = get_errno(sys_getcwd1(p, arg2));
8082 unlock_user(p, arg1, ret);
8083 break;
8084 case TARGET_NR_capget:
8085 case TARGET_NR_capset:
8087 struct target_user_cap_header *target_header;
8088 struct target_user_cap_data *target_data = NULL;
8089 struct __user_cap_header_struct header;
8090 struct __user_cap_data_struct data[2];
8091 struct __user_cap_data_struct *dataptr = NULL;
8092 int i, target_datalen;
8093 int data_items = 1;
8095 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8096 goto efault;
8098 header.version = tswap32(target_header->version);
8099 header.pid = tswap32(target_header->pid);
8101 if (header.version != _LINUX_CAPABILITY_VERSION) {
8102 /* Version 2 and up takes pointer to two user_data structs */
8103 data_items = 2;
8106 target_datalen = sizeof(*target_data) * data_items;
8108 if (arg2) {
8109 if (num == TARGET_NR_capget) {
8110 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8111 } else {
8112 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8114 if (!target_data) {
8115 unlock_user_struct(target_header, arg1, 0);
8116 goto efault;
8119 if (num == TARGET_NR_capset) {
8120 for (i = 0; i < data_items; i++) {
8121 data[i].effective = tswap32(target_data[i].effective);
8122 data[i].permitted = tswap32(target_data[i].permitted);
8123 data[i].inheritable = tswap32(target_data[i].inheritable);
8127 dataptr = data;
8130 if (num == TARGET_NR_capget) {
8131 ret = get_errno(capget(&header, dataptr));
8132 } else {
8133 ret = get_errno(capset(&header, dataptr));
8136 /* The kernel always updates version for both capget and capset */
8137 target_header->version = tswap32(header.version);
8138 unlock_user_struct(target_header, arg1, 1);
8140 if (arg2) {
8141 if (num == TARGET_NR_capget) {
8142 for (i = 0; i < data_items; i++) {
8143 target_data[i].effective = tswap32(data[i].effective);
8144 target_data[i].permitted = tswap32(data[i].permitted);
8145 target_data[i].inheritable = tswap32(data[i].inheritable);
8147 unlock_user(target_data, arg2, target_datalen);
8148 } else {
8149 unlock_user(target_data, arg2, 0);
8152 break;
8154 case TARGET_NR_sigaltstack:
8155 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8156 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8157 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8158 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8159 break;
8160 #else
8161 goto unimplemented;
8162 #endif
8164 #ifdef CONFIG_SENDFILE
8165 case TARGET_NR_sendfile:
8167 off_t *offp = NULL;
8168 off_t off;
8169 if (arg3) {
8170 ret = get_user_sal(off, arg3);
8171 if (is_error(ret)) {
8172 break;
8174 offp = &off;
8176 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8177 if (!is_error(ret) && arg3) {
8178 abi_long ret2 = put_user_sal(off, arg3);
8179 if (is_error(ret2)) {
8180 ret = ret2;
8183 break;
8185 #ifdef TARGET_NR_sendfile64
8186 case TARGET_NR_sendfile64:
8188 off_t *offp = NULL;
8189 off_t off;
8190 if (arg3) {
8191 ret = get_user_s64(off, arg3);
8192 if (is_error(ret)) {
8193 break;
8195 offp = &off;
8197 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8198 if (!is_error(ret) && arg3) {
8199 abi_long ret2 = put_user_s64(off, arg3);
8200 if (is_error(ret2)) {
8201 ret = ret2;
8204 break;
8206 #endif
8207 #else
8208 case TARGET_NR_sendfile:
8209 #ifdef TARGET_NR_sendfile64
8210 case TARGET_NR_sendfile64:
8211 #endif
8212 goto unimplemented;
8213 #endif
8215 #ifdef TARGET_NR_getpmsg
8216 case TARGET_NR_getpmsg:
8217 goto unimplemented;
8218 #endif
8219 #ifdef TARGET_NR_putpmsg
8220 case TARGET_NR_putpmsg:
8221 goto unimplemented;
8222 #endif
8223 #ifdef TARGET_NR_vfork
8224 case TARGET_NR_vfork:
8225 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8226 0, 0, 0, 0));
8227 break;
8228 #endif
8229 #ifdef TARGET_NR_ugetrlimit
8230 case TARGET_NR_ugetrlimit:
8232 struct rlimit rlim;
8233 int resource = target_to_host_resource(arg1);
8234 ret = get_errno(getrlimit(resource, &rlim));
8235 if (!is_error(ret)) {
8236 struct target_rlimit *target_rlim;
8237 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8238 goto efault;
8239 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8240 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8241 unlock_user_struct(target_rlim, arg2, 1);
8243 break;
8245 #endif
8246 #ifdef TARGET_NR_truncate64
8247 case TARGET_NR_truncate64:
8248 if (!(p = lock_user_string(arg1)))
8249 goto efault;
8250 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8251 unlock_user(p, arg1, 0);
8252 break;
8253 #endif
8254 #ifdef TARGET_NR_ftruncate64
8255 case TARGET_NR_ftruncate64:
8256 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8257 break;
8258 #endif
8259 #ifdef TARGET_NR_stat64
8260 case TARGET_NR_stat64:
8261 if (!(p = lock_user_string(arg1)))
8262 goto efault;
8263 ret = get_errno(stat(path(p), &st));
8264 unlock_user(p, arg1, 0);
8265 if (!is_error(ret))
8266 ret = host_to_target_stat64(cpu_env, arg2, &st);
8267 break;
8268 #endif
8269 #ifdef TARGET_NR_lstat64
8270 case TARGET_NR_lstat64:
8271 if (!(p = lock_user_string(arg1)))
8272 goto efault;
8273 ret = get_errno(lstat(path(p), &st));
8274 unlock_user(p, arg1, 0);
8275 if (!is_error(ret))
8276 ret = host_to_target_stat64(cpu_env, arg2, &st);
8277 break;
8278 #endif
8279 #ifdef TARGET_NR_fstat64
8280 case TARGET_NR_fstat64:
8281 ret = get_errno(fstat(arg1, &st));
8282 if (!is_error(ret))
8283 ret = host_to_target_stat64(cpu_env, arg2, &st);
8284 break;
8285 #endif
8286 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8287 #ifdef TARGET_NR_fstatat64
8288 case TARGET_NR_fstatat64:
8289 #endif
8290 #ifdef TARGET_NR_newfstatat
8291 case TARGET_NR_newfstatat:
8292 #endif
8293 if (!(p = lock_user_string(arg2)))
8294 goto efault;
8295 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8296 if (!is_error(ret))
8297 ret = host_to_target_stat64(cpu_env, arg3, &st);
8298 break;
8299 #endif
8300 case TARGET_NR_lchown:
8301 if (!(p = lock_user_string(arg1)))
8302 goto efault;
8303 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8304 unlock_user(p, arg1, 0);
8305 break;
8306 #ifdef TARGET_NR_getuid
8307 case TARGET_NR_getuid:
8308 ret = get_errno(high2lowuid(getuid()));
8309 break;
8310 #endif
8311 #ifdef TARGET_NR_getgid
8312 case TARGET_NR_getgid:
8313 ret = get_errno(high2lowgid(getgid()));
8314 break;
8315 #endif
8316 #ifdef TARGET_NR_geteuid
8317 case TARGET_NR_geteuid:
8318 ret = get_errno(high2lowuid(geteuid()));
8319 break;
8320 #endif
8321 #ifdef TARGET_NR_getegid
8322 case TARGET_NR_getegid:
8323 ret = get_errno(high2lowgid(getegid()));
8324 break;
8325 #endif
8326 case TARGET_NR_setreuid:
8327 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8328 break;
8329 case TARGET_NR_setregid:
8330 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8331 break;
8332 case TARGET_NR_getgroups:
8334 int gidsetsize = arg1;
8335 target_id *target_grouplist;
8336 gid_t *grouplist;
8337 int i;
8339 grouplist = alloca(gidsetsize * sizeof(gid_t));
8340 ret = get_errno(getgroups(gidsetsize, grouplist));
8341 if (gidsetsize == 0)
8342 break;
8343 if (!is_error(ret)) {
8344 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8345 if (!target_grouplist)
8346 goto efault;
8347 for(i = 0;i < ret; i++)
8348 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8349 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8352 break;
8353 case TARGET_NR_setgroups:
8355 int gidsetsize = arg1;
8356 target_id *target_grouplist;
8357 gid_t *grouplist = NULL;
8358 int i;
8359 if (gidsetsize) {
8360 grouplist = alloca(gidsetsize * sizeof(gid_t));
8361 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8362 if (!target_grouplist) {
8363 ret = -TARGET_EFAULT;
8364 goto fail;
8366 for (i = 0; i < gidsetsize; i++) {
8367 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8369 unlock_user(target_grouplist, arg2, 0);
8371 ret = get_errno(setgroups(gidsetsize, grouplist));
8373 break;
8374 case TARGET_NR_fchown:
8375 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8376 break;
8377 #if defined(TARGET_NR_fchownat)
8378 case TARGET_NR_fchownat:
8379 if (!(p = lock_user_string(arg2)))
8380 goto efault;
8381 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8382 low2highgid(arg4), arg5));
8383 unlock_user(p, arg2, 0);
8384 break;
8385 #endif
8386 #ifdef TARGET_NR_setresuid
8387 case TARGET_NR_setresuid:
8388 ret = get_errno(setresuid(low2highuid(arg1),
8389 low2highuid(arg2),
8390 low2highuid(arg3)));
8391 break;
8392 #endif
8393 #ifdef TARGET_NR_getresuid
8394 case TARGET_NR_getresuid:
8396 uid_t ruid, euid, suid;
8397 ret = get_errno(getresuid(&ruid, &euid, &suid));
8398 if (!is_error(ret)) {
8399 if (put_user_id(high2lowuid(ruid), arg1)
8400 || put_user_id(high2lowuid(euid), arg2)
8401 || put_user_id(high2lowuid(suid), arg3))
8402 goto efault;
8405 break;
8406 #endif
8407 #ifdef TARGET_NR_getresgid
8408 case TARGET_NR_setresgid:
8409 ret = get_errno(setresgid(low2highgid(arg1),
8410 low2highgid(arg2),
8411 low2highgid(arg3)));
8412 break;
8413 #endif
8414 #ifdef TARGET_NR_getresgid
8415 case TARGET_NR_getresgid:
8417 gid_t rgid, egid, sgid;
8418 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8419 if (!is_error(ret)) {
8420 if (put_user_id(high2lowgid(rgid), arg1)
8421 || put_user_id(high2lowgid(egid), arg2)
8422 || put_user_id(high2lowgid(sgid), arg3))
8423 goto efault;
8426 break;
8427 #endif
8428 case TARGET_NR_chown:
8429 if (!(p = lock_user_string(arg1)))
8430 goto efault;
8431 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8432 unlock_user(p, arg1, 0);
8433 break;
8434 case TARGET_NR_setuid:
8435 ret = get_errno(setuid(low2highuid(arg1)));
8436 break;
8437 case TARGET_NR_setgid:
8438 ret = get_errno(setgid(low2highgid(arg1)));
8439 break;
8440 case TARGET_NR_setfsuid:
8441 ret = get_errno(setfsuid(arg1));
8442 break;
8443 case TARGET_NR_setfsgid:
8444 ret = get_errno(setfsgid(arg1));
8445 break;
8447 #ifdef TARGET_NR_lchown32
8448 case TARGET_NR_lchown32:
8449 if (!(p = lock_user_string(arg1)))
8450 goto efault;
8451 ret = get_errno(lchown(p, arg2, arg3));
8452 unlock_user(p, arg1, 0);
8453 break;
8454 #endif
8455 #ifdef TARGET_NR_getuid32
8456 case TARGET_NR_getuid32:
8457 ret = get_errno(getuid());
8458 break;
8459 #endif
8461 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8462 /* Alpha specific */
8463 case TARGET_NR_getxuid:
8465 uid_t euid;
8466 euid=geteuid();
8467 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8469 ret = get_errno(getuid());
8470 break;
8471 #endif
8472 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8473 /* Alpha specific */
8474 case TARGET_NR_getxgid:
8476 uid_t egid;
8477 egid=getegid();
8478 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8480 ret = get_errno(getgid());
8481 break;
8482 #endif
8483 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8484 /* Alpha specific */
8485 case TARGET_NR_osf_getsysinfo:
8486 ret = -TARGET_EOPNOTSUPP;
8487 switch (arg1) {
8488 case TARGET_GSI_IEEE_FP_CONTROL:
8490 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8492 /* Copied from linux ieee_fpcr_to_swcr. */
8493 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8494 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8495 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8496 | SWCR_TRAP_ENABLE_DZE
8497 | SWCR_TRAP_ENABLE_OVF);
8498 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8499 | SWCR_TRAP_ENABLE_INE);
8500 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8501 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8503 if (put_user_u64 (swcr, arg2))
8504 goto efault;
8505 ret = 0;
8507 break;
8509 /* case GSI_IEEE_STATE_AT_SIGNAL:
8510 -- Not implemented in linux kernel.
8511 case GSI_UACPROC:
8512 -- Retrieves current unaligned access state; not much used.
8513 case GSI_PROC_TYPE:
8514 -- Retrieves implver information; surely not used.
8515 case GSI_GET_HWRPB:
8516 -- Grabs a copy of the HWRPB; surely not used.
8519 break;
8520 #endif
8521 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8522 /* Alpha specific */
8523 case TARGET_NR_osf_setsysinfo:
8524 ret = -TARGET_EOPNOTSUPP;
8525 switch (arg1) {
8526 case TARGET_SSI_IEEE_FP_CONTROL:
8528 uint64_t swcr, fpcr, orig_fpcr;
8530 if (get_user_u64 (swcr, arg2)) {
8531 goto efault;
8533 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8534 fpcr = orig_fpcr & FPCR_DYN_MASK;
8536 /* Copied from linux ieee_swcr_to_fpcr. */
8537 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8538 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8539 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8540 | SWCR_TRAP_ENABLE_DZE
8541 | SWCR_TRAP_ENABLE_OVF)) << 48;
8542 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8543 | SWCR_TRAP_ENABLE_INE)) << 57;
8544 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8545 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8547 cpu_alpha_store_fpcr(cpu_env, fpcr);
8548 ret = 0;
8550 break;
8552 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8554 uint64_t exc, fpcr, orig_fpcr;
8555 int si_code;
8557 if (get_user_u64(exc, arg2)) {
8558 goto efault;
8561 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8563 /* We only add to the exception status here. */
8564 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8566 cpu_alpha_store_fpcr(cpu_env, fpcr);
8567 ret = 0;
8569 /* Old exceptions are not signaled. */
8570 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8572 /* If any exceptions set by this call,
8573 and are unmasked, send a signal. */
8574 si_code = 0;
8575 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8576 si_code = TARGET_FPE_FLTRES;
8578 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8579 si_code = TARGET_FPE_FLTUND;
8581 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8582 si_code = TARGET_FPE_FLTOVF;
8584 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8585 si_code = TARGET_FPE_FLTDIV;
8587 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8588 si_code = TARGET_FPE_FLTINV;
8590 if (si_code != 0) {
8591 target_siginfo_t info;
8592 info.si_signo = SIGFPE;
8593 info.si_errno = 0;
8594 info.si_code = si_code;
8595 info._sifields._sigfault._addr
8596 = ((CPUArchState *)cpu_env)->pc;
8597 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8600 break;
8602 /* case SSI_NVPAIRS:
8603 -- Used with SSIN_UACPROC to enable unaligned accesses.
8604 case SSI_IEEE_STATE_AT_SIGNAL:
8605 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8606 -- Not implemented in linux kernel
8609 break;
8610 #endif
8611 #ifdef TARGET_NR_osf_sigprocmask
8612 /* Alpha specific. */
8613 case TARGET_NR_osf_sigprocmask:
8615 abi_ulong mask;
8616 int how;
8617 sigset_t set, oldset;
8619 switch(arg1) {
8620 case TARGET_SIG_BLOCK:
8621 how = SIG_BLOCK;
8622 break;
8623 case TARGET_SIG_UNBLOCK:
8624 how = SIG_UNBLOCK;
8625 break;
8626 case TARGET_SIG_SETMASK:
8627 how = SIG_SETMASK;
8628 break;
8629 default:
8630 ret = -TARGET_EINVAL;
8631 goto fail;
8633 mask = arg2;
8634 target_to_host_old_sigset(&set, &mask);
8635 do_sigprocmask(how, &set, &oldset);
8636 host_to_target_old_sigset(&mask, &oldset);
8637 ret = mask;
8639 break;
8640 #endif
8642 #ifdef TARGET_NR_getgid32
8643 case TARGET_NR_getgid32:
8644 ret = get_errno(getgid());
8645 break;
8646 #endif
8647 #ifdef TARGET_NR_geteuid32
8648 case TARGET_NR_geteuid32:
8649 ret = get_errno(geteuid());
8650 break;
8651 #endif
8652 #ifdef TARGET_NR_getegid32
8653 case TARGET_NR_getegid32:
8654 ret = get_errno(getegid());
8655 break;
8656 #endif
8657 #ifdef TARGET_NR_setreuid32
8658 case TARGET_NR_setreuid32:
8659 ret = get_errno(setreuid(arg1, arg2));
8660 break;
8661 #endif
8662 #ifdef TARGET_NR_setregid32
8663 case TARGET_NR_setregid32:
8664 ret = get_errno(setregid(arg1, arg2));
8665 break;
8666 #endif
8667 #ifdef TARGET_NR_getgroups32
8668 case TARGET_NR_getgroups32:
8670 int gidsetsize = arg1;
8671 uint32_t *target_grouplist;
8672 gid_t *grouplist;
8673 int i;
8675 grouplist = alloca(gidsetsize * sizeof(gid_t));
8676 ret = get_errno(getgroups(gidsetsize, grouplist));
8677 if (gidsetsize == 0)
8678 break;
8679 if (!is_error(ret)) {
8680 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8681 if (!target_grouplist) {
8682 ret = -TARGET_EFAULT;
8683 goto fail;
8685 for(i = 0;i < ret; i++)
8686 target_grouplist[i] = tswap32(grouplist[i]);
8687 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8690 break;
8691 #endif
8692 #ifdef TARGET_NR_setgroups32
8693 case TARGET_NR_setgroups32:
8695 int gidsetsize = arg1;
8696 uint32_t *target_grouplist;
8697 gid_t *grouplist;
8698 int i;
8700 grouplist = alloca(gidsetsize * sizeof(gid_t));
8701 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8702 if (!target_grouplist) {
8703 ret = -TARGET_EFAULT;
8704 goto fail;
8706 for(i = 0;i < gidsetsize; i++)
8707 grouplist[i] = tswap32(target_grouplist[i]);
8708 unlock_user(target_grouplist, arg2, 0);
8709 ret = get_errno(setgroups(gidsetsize, grouplist));
8711 break;
8712 #endif
8713 #ifdef TARGET_NR_fchown32
8714 case TARGET_NR_fchown32:
8715 ret = get_errno(fchown(arg1, arg2, arg3));
8716 break;
8717 #endif
8718 #ifdef TARGET_NR_setresuid32
8719 case TARGET_NR_setresuid32:
8720 ret = get_errno(setresuid(arg1, arg2, arg3));
8721 break;
8722 #endif
8723 #ifdef TARGET_NR_getresuid32
8724 case TARGET_NR_getresuid32:
8726 uid_t ruid, euid, suid;
8727 ret = get_errno(getresuid(&ruid, &euid, &suid));
8728 if (!is_error(ret)) {
8729 if (put_user_u32(ruid, arg1)
8730 || put_user_u32(euid, arg2)
8731 || put_user_u32(suid, arg3))
8732 goto efault;
8735 break;
8736 #endif
8737 #ifdef TARGET_NR_setresgid32
8738 case TARGET_NR_setresgid32:
8739 ret = get_errno(setresgid(arg1, arg2, arg3));
8740 break;
8741 #endif
8742 #ifdef TARGET_NR_getresgid32
8743 case TARGET_NR_getresgid32:
8745 gid_t rgid, egid, sgid;
8746 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8747 if (!is_error(ret)) {
8748 if (put_user_u32(rgid, arg1)
8749 || put_user_u32(egid, arg2)
8750 || put_user_u32(sgid, arg3))
8751 goto efault;
8754 break;
8755 #endif
8756 #ifdef TARGET_NR_chown32
8757 case TARGET_NR_chown32:
8758 if (!(p = lock_user_string(arg1)))
8759 goto efault;
8760 ret = get_errno(chown(p, arg2, arg3));
8761 unlock_user(p, arg1, 0);
8762 break;
8763 #endif
8764 #ifdef TARGET_NR_setuid32
8765 case TARGET_NR_setuid32:
8766 ret = get_errno(setuid(arg1));
8767 break;
8768 #endif
8769 #ifdef TARGET_NR_setgid32
8770 case TARGET_NR_setgid32:
8771 ret = get_errno(setgid(arg1));
8772 break;
8773 #endif
8774 #ifdef TARGET_NR_setfsuid32
8775 case TARGET_NR_setfsuid32:
8776 ret = get_errno(setfsuid(arg1));
8777 break;
8778 #endif
8779 #ifdef TARGET_NR_setfsgid32
8780 case TARGET_NR_setfsgid32:
8781 ret = get_errno(setfsgid(arg1));
8782 break;
8783 #endif
8785 case TARGET_NR_pivot_root:
8786 goto unimplemented;
8787 #ifdef TARGET_NR_mincore
8788 case TARGET_NR_mincore:
8790 void *a;
8791 ret = -TARGET_EFAULT;
8792 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8793 goto efault;
8794 if (!(p = lock_user_string(arg3)))
8795 goto mincore_fail;
8796 ret = get_errno(mincore(a, arg2, p));
8797 unlock_user(p, arg3, ret);
8798 mincore_fail:
8799 unlock_user(a, arg1, 0);
8801 break;
8802 #endif
8803 #ifdef TARGET_NR_arm_fadvise64_64
8804 case TARGET_NR_arm_fadvise64_64:
8807 * arm_fadvise64_64 looks like fadvise64_64 but
8808 * with different argument order
8810 abi_long temp;
8811 temp = arg3;
8812 arg3 = arg4;
8813 arg4 = temp;
8815 #endif
8816 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8817 #ifdef TARGET_NR_fadvise64_64
8818 case TARGET_NR_fadvise64_64:
8819 #endif
8820 #ifdef TARGET_NR_fadvise64
8821 case TARGET_NR_fadvise64:
8822 #endif
8823 #ifdef TARGET_S390X
8824 switch (arg4) {
8825 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8826 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8827 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8828 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8829 default: break;
8831 #endif
8832 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8833 break;
8834 #endif
8835 #ifdef TARGET_NR_madvise
8836 case TARGET_NR_madvise:
8837 /* A straight passthrough may not be safe because qemu sometimes
8838 turns private file-backed mappings into anonymous mappings.
8839 This will break MADV_DONTNEED.
8840 This is a hint, so ignoring and returning success is ok. */
8841 ret = get_errno(0);
8842 break;
8843 #endif
8844 #if TARGET_ABI_BITS == 32
8845 case TARGET_NR_fcntl64:
8847 int cmd;
8848 struct flock64 fl;
8849 struct target_flock64 *target_fl;
8850 #ifdef TARGET_ARM
8851 struct target_eabi_flock64 *target_efl;
8852 #endif
8854 cmd = target_to_host_fcntl_cmd(arg2);
8855 if (cmd == -TARGET_EINVAL) {
8856 ret = cmd;
8857 break;
8860 switch(arg2) {
8861 case TARGET_F_GETLK64:
8862 #ifdef TARGET_ARM
8863 if (((CPUARMState *)cpu_env)->eabi) {
8864 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8865 goto efault;
8866 fl.l_type = tswap16(target_efl->l_type);
8867 fl.l_whence = tswap16(target_efl->l_whence);
8868 fl.l_start = tswap64(target_efl->l_start);
8869 fl.l_len = tswap64(target_efl->l_len);
8870 fl.l_pid = tswap32(target_efl->l_pid);
8871 unlock_user_struct(target_efl, arg3, 0);
8872 } else
8873 #endif
8875 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8876 goto efault;
8877 fl.l_type = tswap16(target_fl->l_type);
8878 fl.l_whence = tswap16(target_fl->l_whence);
8879 fl.l_start = tswap64(target_fl->l_start);
8880 fl.l_len = tswap64(target_fl->l_len);
8881 fl.l_pid = tswap32(target_fl->l_pid);
8882 unlock_user_struct(target_fl, arg3, 0);
8884 ret = get_errno(fcntl(arg1, cmd, &fl));
8885 if (ret == 0) {
8886 #ifdef TARGET_ARM
8887 if (((CPUARMState *)cpu_env)->eabi) {
8888 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8889 goto efault;
8890 target_efl->l_type = tswap16(fl.l_type);
8891 target_efl->l_whence = tswap16(fl.l_whence);
8892 target_efl->l_start = tswap64(fl.l_start);
8893 target_efl->l_len = tswap64(fl.l_len);
8894 target_efl->l_pid = tswap32(fl.l_pid);
8895 unlock_user_struct(target_efl, arg3, 1);
8896 } else
8897 #endif
8899 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8900 goto efault;
8901 target_fl->l_type = tswap16(fl.l_type);
8902 target_fl->l_whence = tswap16(fl.l_whence);
8903 target_fl->l_start = tswap64(fl.l_start);
8904 target_fl->l_len = tswap64(fl.l_len);
8905 target_fl->l_pid = tswap32(fl.l_pid);
8906 unlock_user_struct(target_fl, arg3, 1);
8909 break;
8911 case TARGET_F_SETLK64:
8912 case TARGET_F_SETLKW64:
8913 #ifdef TARGET_ARM
8914 if (((CPUARMState *)cpu_env)->eabi) {
8915 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8916 goto efault;
8917 fl.l_type = tswap16(target_efl->l_type);
8918 fl.l_whence = tswap16(target_efl->l_whence);
8919 fl.l_start = tswap64(target_efl->l_start);
8920 fl.l_len = tswap64(target_efl->l_len);
8921 fl.l_pid = tswap32(target_efl->l_pid);
8922 unlock_user_struct(target_efl, arg3, 0);
8923 } else
8924 #endif
8926 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8927 goto efault;
8928 fl.l_type = tswap16(target_fl->l_type);
8929 fl.l_whence = tswap16(target_fl->l_whence);
8930 fl.l_start = tswap64(target_fl->l_start);
8931 fl.l_len = tswap64(target_fl->l_len);
8932 fl.l_pid = tswap32(target_fl->l_pid);
8933 unlock_user_struct(target_fl, arg3, 0);
8935 ret = get_errno(fcntl(arg1, cmd, &fl));
8936 break;
8937 default:
8938 ret = do_fcntl(arg1, arg2, arg3);
8939 break;
8941 break;
8943 #endif
8944 #ifdef TARGET_NR_cacheflush
8945 case TARGET_NR_cacheflush:
8946 /* self-modifying code is handled automatically, so nothing needed */
8947 ret = 0;
8948 break;
8949 #endif
8950 #ifdef TARGET_NR_security
8951 case TARGET_NR_security:
8952 goto unimplemented;
8953 #endif
8954 #ifdef TARGET_NR_getpagesize
8955 case TARGET_NR_getpagesize:
8956 ret = TARGET_PAGE_SIZE;
8957 break;
8958 #endif
8959 case TARGET_NR_gettid:
8960 ret = get_errno(gettid());
8961 break;
8962 #ifdef TARGET_NR_readahead
8963 case TARGET_NR_readahead:
8964 #if TARGET_ABI_BITS == 32
8965 if (regpairs_aligned(cpu_env)) {
8966 arg2 = arg3;
8967 arg3 = arg4;
8968 arg4 = arg5;
8970 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8971 #else
8972 ret = get_errno(readahead(arg1, arg2, arg3));
8973 #endif
8974 break;
8975 #endif
8976 #ifdef CONFIG_ATTR
8977 #ifdef TARGET_NR_setxattr
8978 case TARGET_NR_listxattr:
8979 case TARGET_NR_llistxattr:
8981 void *p, *b = 0;
8982 if (arg2) {
8983 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8984 if (!b) {
8985 ret = -TARGET_EFAULT;
8986 break;
8989 p = lock_user_string(arg1);
8990 if (p) {
8991 if (num == TARGET_NR_listxattr) {
8992 ret = get_errno(listxattr(p, b, arg3));
8993 } else {
8994 ret = get_errno(llistxattr(p, b, arg3));
8996 } else {
8997 ret = -TARGET_EFAULT;
8999 unlock_user(p, arg1, 0);
9000 unlock_user(b, arg2, arg3);
9001 break;
9003 case TARGET_NR_flistxattr:
9005 void *b = 0;
9006 if (arg2) {
9007 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9008 if (!b) {
9009 ret = -TARGET_EFAULT;
9010 break;
9013 ret = get_errno(flistxattr(arg1, b, arg3));
9014 unlock_user(b, arg2, arg3);
9015 break;
9017 case TARGET_NR_setxattr:
9018 case TARGET_NR_lsetxattr:
9020 void *p, *n, *v = 0;
9021 if (arg3) {
9022 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9023 if (!v) {
9024 ret = -TARGET_EFAULT;
9025 break;
9028 p = lock_user_string(arg1);
9029 n = lock_user_string(arg2);
9030 if (p && n) {
9031 if (num == TARGET_NR_setxattr) {
9032 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9033 } else {
9034 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9036 } else {
9037 ret = -TARGET_EFAULT;
9039 unlock_user(p, arg1, 0);
9040 unlock_user(n, arg2, 0);
9041 unlock_user(v, arg3, 0);
9043 break;
9044 case TARGET_NR_fsetxattr:
9046 void *n, *v = 0;
9047 if (arg3) {
9048 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9049 if (!v) {
9050 ret = -TARGET_EFAULT;
9051 break;
9054 n = lock_user_string(arg2);
9055 if (n) {
9056 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9057 } else {
9058 ret = -TARGET_EFAULT;
9060 unlock_user(n, arg2, 0);
9061 unlock_user(v, arg3, 0);
9063 break;
9064 case TARGET_NR_getxattr:
9065 case TARGET_NR_lgetxattr:
9067 void *p, *n, *v = 0;
9068 if (arg3) {
9069 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9070 if (!v) {
9071 ret = -TARGET_EFAULT;
9072 break;
9075 p = lock_user_string(arg1);
9076 n = lock_user_string(arg2);
9077 if (p && n) {
9078 if (num == TARGET_NR_getxattr) {
9079 ret = get_errno(getxattr(p, n, v, arg4));
9080 } else {
9081 ret = get_errno(lgetxattr(p, n, v, arg4));
9083 } else {
9084 ret = -TARGET_EFAULT;
9086 unlock_user(p, arg1, 0);
9087 unlock_user(n, arg2, 0);
9088 unlock_user(v, arg3, arg4);
9090 break;
9091 case TARGET_NR_fgetxattr:
9093 void *n, *v = 0;
9094 if (arg3) {
9095 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9096 if (!v) {
9097 ret = -TARGET_EFAULT;
9098 break;
9101 n = lock_user_string(arg2);
9102 if (n) {
9103 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9104 } else {
9105 ret = -TARGET_EFAULT;
9107 unlock_user(n, arg2, 0);
9108 unlock_user(v, arg3, arg4);
9110 break;
9111 case TARGET_NR_removexattr:
9112 case TARGET_NR_lremovexattr:
9114 void *p, *n;
9115 p = lock_user_string(arg1);
9116 n = lock_user_string(arg2);
9117 if (p && n) {
9118 if (num == TARGET_NR_removexattr) {
9119 ret = get_errno(removexattr(p, n));
9120 } else {
9121 ret = get_errno(lremovexattr(p, n));
9123 } else {
9124 ret = -TARGET_EFAULT;
9126 unlock_user(p, arg1, 0);
9127 unlock_user(n, arg2, 0);
9129 break;
9130 case TARGET_NR_fremovexattr:
9132 void *n;
9133 n = lock_user_string(arg2);
9134 if (n) {
9135 ret = get_errno(fremovexattr(arg1, n));
9136 } else {
9137 ret = -TARGET_EFAULT;
9139 unlock_user(n, arg2, 0);
9141 break;
9142 #endif
9143 #endif /* CONFIG_ATTR */
9144 #ifdef TARGET_NR_set_thread_area
9145 case TARGET_NR_set_thread_area:
9146 #if defined(TARGET_MIPS)
9147 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9148 ret = 0;
9149 break;
9150 #elif defined(TARGET_CRIS)
9151 if (arg1 & 0xff)
9152 ret = -TARGET_EINVAL;
9153 else {
9154 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9155 ret = 0;
9157 break;
9158 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9159 ret = do_set_thread_area(cpu_env, arg1);
9160 break;
9161 #elif defined(TARGET_M68K)
9163 TaskState *ts = cpu->opaque;
9164 ts->tp_value = arg1;
9165 ret = 0;
9166 break;
9168 #else
9169 goto unimplemented_nowarn;
9170 #endif
9171 #endif
9172 #ifdef TARGET_NR_get_thread_area
9173 case TARGET_NR_get_thread_area:
9174 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9175 ret = do_get_thread_area(cpu_env, arg1);
9176 break;
9177 #elif defined(TARGET_M68K)
9179 TaskState *ts = cpu->opaque;
9180 ret = ts->tp_value;
9181 break;
9183 #else
9184 goto unimplemented_nowarn;
9185 #endif
9186 #endif
9187 #ifdef TARGET_NR_getdomainname
9188 case TARGET_NR_getdomainname:
9189 goto unimplemented_nowarn;
9190 #endif
9192 #ifdef TARGET_NR_clock_gettime
9193 case TARGET_NR_clock_gettime:
9195 struct timespec ts;
9196 ret = get_errno(clock_gettime(arg1, &ts));
9197 if (!is_error(ret)) {
9198 host_to_target_timespec(arg2, &ts);
9200 break;
9202 #endif
9203 #ifdef TARGET_NR_clock_getres
9204 case TARGET_NR_clock_getres:
9206 struct timespec ts;
9207 ret = get_errno(clock_getres(arg1, &ts));
9208 if (!is_error(ret)) {
9209 host_to_target_timespec(arg2, &ts);
9211 break;
9213 #endif
9214 #ifdef TARGET_NR_clock_nanosleep
9215 case TARGET_NR_clock_nanosleep:
9217 struct timespec ts;
9218 target_to_host_timespec(&ts, arg3);
9219 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9220 if (arg4)
9221 host_to_target_timespec(arg4, &ts);
9223 #if defined(TARGET_PPC)
9224 /* clock_nanosleep is odd in that it returns positive errno values.
9225 * On PPC, CR0 bit 3 should be set in such a situation. */
9226 if (ret) {
9227 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9229 #endif
9230 break;
9232 #endif
9234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9235 case TARGET_NR_set_tid_address:
9236 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9237 break;
9238 #endif
9240 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9241 case TARGET_NR_tkill:
9242 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9243 break;
9244 #endif
9246 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9247 case TARGET_NR_tgkill:
9248 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9249 target_to_host_signal(arg3)));
9250 break;
9251 #endif
9253 #ifdef TARGET_NR_set_robust_list
9254 case TARGET_NR_set_robust_list:
9255 case TARGET_NR_get_robust_list:
9256 /* The ABI for supporting robust futexes has userspace pass
9257 * the kernel a pointer to a linked list which is updated by
9258 * userspace after the syscall; the list is walked by the kernel
9259 * when the thread exits. Since the linked list in QEMU guest
9260 * memory isn't a valid linked list for the host and we have
9261 * no way to reliably intercept the thread-death event, we can't
9262 * support these. Silently return ENOSYS so that guest userspace
9263 * falls back to a non-robust futex implementation (which should
9264 * be OK except in the corner case of the guest crashing while
9265 * holding a mutex that is shared with another process via
9266 * shared memory).
9268 goto unimplemented_nowarn;
9269 #endif
9271 #if defined(TARGET_NR_utimensat)
9272 case TARGET_NR_utimensat:
9274 struct timespec *tsp, ts[2];
9275 if (!arg3) {
9276 tsp = NULL;
9277 } else {
9278 target_to_host_timespec(ts, arg3);
9279 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9280 tsp = ts;
9282 if (!arg2)
9283 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9284 else {
9285 if (!(p = lock_user_string(arg2))) {
9286 ret = -TARGET_EFAULT;
9287 goto fail;
9289 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9290 unlock_user(p, arg2, 0);
9293 break;
9294 #endif
9295 case TARGET_NR_futex:
9296 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9297 break;
9298 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9299 case TARGET_NR_inotify_init:
9300 ret = get_errno(sys_inotify_init());
9301 break;
9302 #endif
9303 #ifdef CONFIG_INOTIFY1
9304 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9305 case TARGET_NR_inotify_init1:
9306 ret = get_errno(sys_inotify_init1(arg1));
9307 break;
9308 #endif
9309 #endif
9310 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9311 case TARGET_NR_inotify_add_watch:
9312 p = lock_user_string(arg2);
9313 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9314 unlock_user(p, arg2, 0);
9315 break;
9316 #endif
9317 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9318 case TARGET_NR_inotify_rm_watch:
9319 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9320 break;
9321 #endif
9323 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9324 case TARGET_NR_mq_open:
9326 struct mq_attr posix_mq_attr, *attrp;
9328 p = lock_user_string(arg1 - 1);
9329 if (arg4 != 0) {
9330 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9331 attrp = &posix_mq_attr;
9332 } else {
9333 attrp = 0;
9335 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9336 unlock_user (p, arg1, 0);
9338 break;
9340 case TARGET_NR_mq_unlink:
9341 p = lock_user_string(arg1 - 1);
9342 ret = get_errno(mq_unlink(p));
9343 unlock_user (p, arg1, 0);
9344 break;
9346 case TARGET_NR_mq_timedsend:
9348 struct timespec ts;
9350 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9351 if (arg5 != 0) {
9352 target_to_host_timespec(&ts, arg5);
9353 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9354 host_to_target_timespec(arg5, &ts);
9356 else
9357 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9358 unlock_user (p, arg2, arg3);
9360 break;
9362 case TARGET_NR_mq_timedreceive:
9364 struct timespec ts;
9365 unsigned int prio;
9367 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9368 if (arg5 != 0) {
9369 target_to_host_timespec(&ts, arg5);
9370 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9371 host_to_target_timespec(arg5, &ts);
9373 else
9374 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9375 unlock_user (p, arg2, arg3);
9376 if (arg4 != 0)
9377 put_user_u32(prio, arg4);
9379 break;
9381 /* Not implemented for now... */
9382 /* case TARGET_NR_mq_notify: */
9383 /* break; */
9385 case TARGET_NR_mq_getsetattr:
9387 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9388 ret = 0;
9389 if (arg3 != 0) {
9390 ret = mq_getattr(arg1, &posix_mq_attr_out);
9391 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9393 if (arg2 != 0) {
9394 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9395 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9399 break;
9400 #endif
9402 #ifdef CONFIG_SPLICE
9403 #ifdef TARGET_NR_tee
9404 case TARGET_NR_tee:
9406 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9408 break;
9409 #endif
9410 #ifdef TARGET_NR_splice
9411 case TARGET_NR_splice:
9413 loff_t loff_in, loff_out;
9414 loff_t *ploff_in = NULL, *ploff_out = NULL;
9415 if (arg2) {
9416 if (get_user_u64(loff_in, arg2)) {
9417 goto efault;
9419 ploff_in = &loff_in;
9421 if (arg4) {
9422 if (get_user_u64(loff_out, arg4)) {
9423 goto efault;
9425 ploff_out = &loff_out;
9427 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9428 if (arg2) {
9429 if (put_user_u64(loff_in, arg2)) {
9430 goto efault;
9433 if (arg4) {
9434 if (put_user_u64(loff_out, arg4)) {
9435 goto efault;
9439 break;
9440 #endif
9441 #ifdef TARGET_NR_vmsplice
9442 case TARGET_NR_vmsplice:
9444 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9445 if (vec != NULL) {
9446 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9447 unlock_iovec(vec, arg2, arg3, 0);
9448 } else {
9449 ret = -host_to_target_errno(errno);
9452 break;
9453 #endif
9454 #endif /* CONFIG_SPLICE */
9455 #ifdef CONFIG_EVENTFD
9456 #if defined(TARGET_NR_eventfd)
9457 case TARGET_NR_eventfd:
9458 ret = get_errno(eventfd(arg1, 0));
9459 break;
9460 #endif
9461 #if defined(TARGET_NR_eventfd2)
9462 case TARGET_NR_eventfd2:
9464 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9465 if (arg2 & TARGET_O_NONBLOCK) {
9466 host_flags |= O_NONBLOCK;
9468 if (arg2 & TARGET_O_CLOEXEC) {
9469 host_flags |= O_CLOEXEC;
9471 ret = get_errno(eventfd(arg1, host_flags));
9472 break;
9474 #endif
9475 #endif /* CONFIG_EVENTFD */
9476 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9477 case TARGET_NR_fallocate:
9478 #if TARGET_ABI_BITS == 32
9479 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9480 target_offset64(arg5, arg6)));
9481 #else
9482 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9483 #endif
9484 break;
9485 #endif
9486 #if defined(CONFIG_SYNC_FILE_RANGE)
9487 #if defined(TARGET_NR_sync_file_range)
9488 case TARGET_NR_sync_file_range:
9489 #if TARGET_ABI_BITS == 32
9490 #if defined(TARGET_MIPS)
9491 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9492 target_offset64(arg5, arg6), arg7));
9493 #else
9494 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9495 target_offset64(arg4, arg5), arg6));
9496 #endif /* !TARGET_MIPS */
9497 #else
9498 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9499 #endif
9500 break;
9501 #endif
9502 #if defined(TARGET_NR_sync_file_range2)
9503 case TARGET_NR_sync_file_range2:
9504 /* This is like sync_file_range but the arguments are reordered */
9505 #if TARGET_ABI_BITS == 32
9506 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9507 target_offset64(arg5, arg6), arg2));
9508 #else
9509 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9510 #endif
9511 break;
9512 #endif
9513 #endif
9514 #if defined(CONFIG_EPOLL)
9515 #if defined(TARGET_NR_epoll_create)
9516 case TARGET_NR_epoll_create:
9517 ret = get_errno(epoll_create(arg1));
9518 break;
9519 #endif
9520 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9521 case TARGET_NR_epoll_create1:
9522 ret = get_errno(epoll_create1(arg1));
9523 break;
9524 #endif
9525 #if defined(TARGET_NR_epoll_ctl)
9526 case TARGET_NR_epoll_ctl:
9528 struct epoll_event ep;
9529 struct epoll_event *epp = 0;
9530 if (arg4) {
9531 struct target_epoll_event *target_ep;
9532 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9533 goto efault;
9535 ep.events = tswap32(target_ep->events);
9536 /* The epoll_data_t union is just opaque data to the kernel,
9537 * so we transfer all 64 bits across and need not worry what
9538 * actual data type it is.
9540 ep.data.u64 = tswap64(target_ep->data.u64);
9541 unlock_user_struct(target_ep, arg4, 0);
9542 epp = &ep;
9544 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9545 break;
9547 #endif
9549 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9550 #define IMPLEMENT_EPOLL_PWAIT
9551 #endif
9552 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9553 #if defined(TARGET_NR_epoll_wait)
9554 case TARGET_NR_epoll_wait:
9555 #endif
9556 #if defined(IMPLEMENT_EPOLL_PWAIT)
9557 case TARGET_NR_epoll_pwait:
9558 #endif
9560 struct target_epoll_event *target_ep;
9561 struct epoll_event *ep;
9562 int epfd = arg1;
9563 int maxevents = arg3;
9564 int timeout = arg4;
9566 target_ep = lock_user(VERIFY_WRITE, arg2,
9567 maxevents * sizeof(struct target_epoll_event), 1);
9568 if (!target_ep) {
9569 goto efault;
9572 ep = alloca(maxevents * sizeof(struct epoll_event));
9574 switch (num) {
9575 #if defined(IMPLEMENT_EPOLL_PWAIT)
9576 case TARGET_NR_epoll_pwait:
9578 target_sigset_t *target_set;
9579 sigset_t _set, *set = &_set;
9581 if (arg5) {
9582 target_set = lock_user(VERIFY_READ, arg5,
9583 sizeof(target_sigset_t), 1);
9584 if (!target_set) {
9585 unlock_user(target_ep, arg2, 0);
9586 goto efault;
9588 target_to_host_sigset(set, target_set);
9589 unlock_user(target_set, arg5, 0);
9590 } else {
9591 set = NULL;
9594 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9595 break;
9597 #endif
9598 #if defined(TARGET_NR_epoll_wait)
9599 case TARGET_NR_epoll_wait:
9600 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9601 break;
9602 #endif
9603 default:
9604 ret = -TARGET_ENOSYS;
9606 if (!is_error(ret)) {
9607 int i;
9608 for (i = 0; i < ret; i++) {
9609 target_ep[i].events = tswap32(ep[i].events);
9610 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9613 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9614 break;
9616 #endif
9617 #endif
9618 #ifdef TARGET_NR_prlimit64
9619 case TARGET_NR_prlimit64:
9621 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9622 struct target_rlimit64 *target_rnew, *target_rold;
9623 struct host_rlimit64 rnew, rold, *rnewp = 0;
9624 int resource = target_to_host_resource(arg2);
9625 if (arg3) {
9626 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9627 goto efault;
9629 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9630 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9631 unlock_user_struct(target_rnew, arg3, 0);
9632 rnewp = &rnew;
9635 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
9636 if (!is_error(ret) && arg4) {
9637 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9638 goto efault;
9640 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9641 target_rold->rlim_max = tswap64(rold.rlim_max);
9642 unlock_user_struct(target_rold, arg4, 1);
9644 break;
9646 #endif
9647 #ifdef TARGET_NR_gethostname
9648 case TARGET_NR_gethostname:
9650 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9651 if (name) {
9652 ret = get_errno(gethostname(name, arg2));
9653 unlock_user(name, arg1, arg2);
9654 } else {
9655 ret = -TARGET_EFAULT;
9657 break;
9659 #endif
9660 #ifdef TARGET_NR_atomic_cmpxchg_32
9661 case TARGET_NR_atomic_cmpxchg_32:
9663 /* should use start_exclusive from main.c */
9664 abi_ulong mem_value;
9665 if (get_user_u32(mem_value, arg6)) {
9666 target_siginfo_t info;
9667 info.si_signo = SIGSEGV;
9668 info.si_errno = 0;
9669 info.si_code = TARGET_SEGV_MAPERR;
9670 info._sifields._sigfault._addr = arg6;
9671 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9672 ret = 0xdeadbeef;
9675 if (mem_value == arg2)
9676 put_user_u32(arg1, arg6);
9677 ret = mem_value;
9678 break;
9680 #endif
9681 #ifdef TARGET_NR_atomic_barrier
9682 case TARGET_NR_atomic_barrier:
9684 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9685 ret = 0;
9686 break;
9688 #endif
9690 #ifdef TARGET_NR_timer_create
9691 case TARGET_NR_timer_create:
9693 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9695 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9697 int clkid = arg1;
9698 int timer_index = next_free_host_timer();
9700 if (timer_index < 0) {
9701 ret = -TARGET_EAGAIN;
9702 } else {
9703 timer_t *phtimer = g_posix_timers + timer_index;
9705 if (arg2) {
9706 phost_sevp = &host_sevp;
9707 ret = target_to_host_sigevent(phost_sevp, arg2);
9708 if (ret != 0) {
9709 break;
9713 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9714 if (ret) {
9715 phtimer = NULL;
9716 } else {
9717 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
9718 goto efault;
9722 break;
9724 #endif
9726 #ifdef TARGET_NR_timer_settime
9727 case TARGET_NR_timer_settime:
9729 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9730 * struct itimerspec * old_value */
9731 target_timer_t timerid = get_timer_id(arg1);
9733 if (timerid < 0) {
9734 ret = timerid;
9735 } else if (arg3 == 0) {
9736 ret = -TARGET_EINVAL;
9737 } else {
9738 timer_t htimer = g_posix_timers[timerid];
9739 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9741 target_to_host_itimerspec(&hspec_new, arg3);
9742 ret = get_errno(
9743 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9744 host_to_target_itimerspec(arg2, &hspec_old);
9746 break;
9748 #endif
9750 #ifdef TARGET_NR_timer_gettime
9751 case TARGET_NR_timer_gettime:
9753 /* args: timer_t timerid, struct itimerspec *curr_value */
9754 target_timer_t timerid = get_timer_id(arg1);
9756 if (timerid < 0) {
9757 ret = timerid;
9758 } else if (!arg2) {
9759 ret = -TARGET_EFAULT;
9760 } else {
9761 timer_t htimer = g_posix_timers[timerid];
9762 struct itimerspec hspec;
9763 ret = get_errno(timer_gettime(htimer, &hspec));
9765 if (host_to_target_itimerspec(arg2, &hspec)) {
9766 ret = -TARGET_EFAULT;
9769 break;
9771 #endif
9773 #ifdef TARGET_NR_timer_getoverrun
9774 case TARGET_NR_timer_getoverrun:
9776 /* args: timer_t timerid */
9777 target_timer_t timerid = get_timer_id(arg1);
9779 if (timerid < 0) {
9780 ret = timerid;
9781 } else {
9782 timer_t htimer = g_posix_timers[timerid];
9783 ret = get_errno(timer_getoverrun(htimer));
9785 break;
9787 #endif
9789 #ifdef TARGET_NR_timer_delete
9790 case TARGET_NR_timer_delete:
9792 /* args: timer_t timerid */
9793 target_timer_t timerid = get_timer_id(arg1);
9795 if (timerid < 0) {
9796 ret = timerid;
9797 } else {
9798 timer_t htimer = g_posix_timers[timerid];
9799 ret = get_errno(timer_delete(htimer));
9800 g_posix_timers[timerid] = 0;
9802 break;
9804 #endif
9806 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
9807 case TARGET_NR_timerfd_create:
9808 ret = get_errno(timerfd_create(arg1,
9809 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
9810 break;
9811 #endif
9813 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
9814 case TARGET_NR_timerfd_gettime:
9816 struct itimerspec its_curr;
9818 ret = get_errno(timerfd_gettime(arg1, &its_curr));
9820 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
9821 goto efault;
9824 break;
9825 #endif
9827 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
9828 case TARGET_NR_timerfd_settime:
9830 struct itimerspec its_new, its_old, *p_new;
9832 if (arg3) {
9833 if (target_to_host_itimerspec(&its_new, arg3)) {
9834 goto efault;
9836 p_new = &its_new;
9837 } else {
9838 p_new = NULL;
9841 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
9843 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
9844 goto efault;
9847 break;
9848 #endif
9850 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
9851 case TARGET_NR_ioprio_get:
9852 ret = get_errno(ioprio_get(arg1, arg2));
9853 break;
9854 #endif
9856 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
9857 case TARGET_NR_ioprio_set:
9858 ret = get_errno(ioprio_set(arg1, arg2, arg3));
9859 break;
9860 #endif
9862 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
9863 case TARGET_NR_setns:
9864 ret = get_errno(setns(arg1, arg2));
9865 break;
9866 #endif
9867 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
9868 case TARGET_NR_unshare:
9869 ret = get_errno(unshare(arg1));
9870 break;
9871 #endif
9873 default:
9874 unimplemented:
9875 gemu_log("qemu: Unsupported syscall: %d\n", num);
9876 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9877 unimplemented_nowarn:
9878 #endif
9879 ret = -TARGET_ENOSYS;
9880 break;
9882 fail:
9883 #ifdef DEBUG
9884 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9885 #endif
9886 if(do_strace)
9887 print_syscall_ret(num, ret);
9888 return ret;
9889 efault:
9890 ret = -TARGET_EFAULT;
9891 goto fail;