Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / linux-user / syscall.c
blobbcb9474de34b20e64dbdd664cc348440d5f7017d
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <linux/capability.h>
47 #include <signal.h>
48 #include <sched.h>
49 #ifdef __ia64__
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
52 #endif
53 #include <sys/socket.h>
54 #include <sys/un.h>
55 #include <sys/uio.h>
56 #include <sys/poll.h>
57 #include <sys/times.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/statfs.h>
61 #include <utime.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #include "qemu/sockets.h"
70 #ifdef CONFIG_TIMERFD
71 #include <sys/timerfd.h>
72 #endif
73 #ifdef TARGET_GPROF
74 #include <sys/gmon.h>
75 #endif
76 #ifdef CONFIG_EVENTFD
77 #include <sys/eventfd.h>
78 #endif
79 #ifdef CONFIG_EPOLL
80 #include <sys/epoll.h>
81 #endif
82 #ifdef CONFIG_ATTR
83 #include "qemu/xattr.h"
84 #endif
85 #ifdef CONFIG_SENDFILE
86 #include <sys/sendfile.h>
87 #endif
89 #define termios host_termios
90 #define winsize host_winsize
91 #define termio host_termio
92 #define sgttyb host_sgttyb /* same as target */
93 #define tchars host_tchars /* same as target */
94 #define ltchars host_ltchars /* same as target */
96 #include <linux/termios.h>
97 #include <linux/unistd.h>
98 #include <linux/cdrom.h>
99 #include <linux/hdreg.h>
100 #include <linux/soundcard.h>
101 #include <linux/kd.h>
102 #include <linux/mtio.h>
103 #include <linux/fs.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
106 #endif
107 #include <linux/fb.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include "linux_loop.h"
115 #include "uname.h"
117 #include "qemu.h"
119 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
120 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
122 //#define DEBUG
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
129 #undef _syscall0
130 #undef _syscall1
131 #undef _syscall2
132 #undef _syscall3
133 #undef _syscall4
134 #undef _syscall5
135 #undef _syscall6
137 #define _syscall0(type,name) \
138 static type name (void) \
140 return syscall(__NR_##name); \
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
146 return syscall(__NR_##name, arg1); \
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
152 return syscall(__NR_##name, arg1, arg2); \
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 type5,arg5) \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
178 type6 arg6) \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
199 defined(__s390x__)
200 #define __NR__llseek __NR_lseek
201 #endif
203 /* Newer kernel ports have llseek() instead of _llseek() */
204 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
205 #define TARGET_NR__llseek TARGET_NR_llseek
206 #endif
208 #ifdef __NR_gettid
209 _syscall0(int, gettid)
210 #else
211 /* This is a replacement for the host gettid() and must return a host
212 errno. */
213 static int gettid(void) {
214 return -ENOSYS;
216 #endif
217 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
218 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
219 #endif
220 #if !defined(__NR_getdents) || \
221 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
222 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
223 #endif
224 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
225 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
226 loff_t *, res, uint, wh);
227 #endif
228 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
229 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
230 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
231 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
232 #endif
233 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
234 _syscall2(int,sys_tkill,int,tid,int,sig)
235 #endif
236 #ifdef __NR_exit_group
237 _syscall1(int,exit_group,int,error_code)
238 #endif
239 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
240 _syscall1(int,set_tid_address,int *,tidptr)
241 #endif
242 #if defined(TARGET_NR_futex) && defined(__NR_futex)
243 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
244 const struct timespec *,timeout,int *,uaddr2,int,val3)
245 #endif
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
253 void *, arg);
254 _syscall2(int, capget, struct __user_cap_header_struct *, header,
255 struct __user_cap_data_struct *, data);
256 _syscall2(int, capset, struct __user_cap_header_struct *, header,
257 struct __user_cap_data_struct *, data);
258 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
259 _syscall2(int, ioprio_get, int, which, int, who)
260 #endif
261 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
262 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
263 #endif
265 static bitmask_transtbl fcntl_flags_tbl[] = {
266 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
267 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
268 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
269 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
270 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
271 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
272 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
273 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
274 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
275 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
276 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
277 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
278 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
279 #if defined(O_DIRECT)
280 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
281 #endif
282 #if defined(O_NOATIME)
283 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
284 #endif
285 #if defined(O_CLOEXEC)
286 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
287 #endif
288 #if defined(O_PATH)
289 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
290 #endif
291 /* Don't terminate the list prematurely on 64-bit host+guest. */
292 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
293 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
294 #endif
295 { 0, 0, 0, 0 }
298 static int sys_getcwd1(char *buf, size_t size)
300 if (getcwd(buf, size) == NULL) {
301 /* getcwd() sets errno */
302 return (-1);
304 return strlen(buf)+1;
307 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
310 * open(2) has extra parameter 'mode' when called with
311 * flag O_CREAT.
313 if ((flags & O_CREAT) != 0) {
314 return (openat(dirfd, pathname, flags, mode));
316 return (openat(dirfd, pathname, flags));
319 #ifdef TARGET_NR_utimensat
320 #ifdef CONFIG_UTIMENSAT
321 static int sys_utimensat(int dirfd, const char *pathname,
322 const struct timespec times[2], int flags)
324 if (pathname == NULL)
325 return futimens(dirfd, times);
326 else
327 return utimensat(dirfd, pathname, times, flags);
329 #elif defined(__NR_utimensat)
330 #define __NR_sys_utimensat __NR_utimensat
331 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
332 const struct timespec *,tsp,int,flags)
333 #else
334 static int sys_utimensat(int dirfd, const char *pathname,
335 const struct timespec times[2], int flags)
337 errno = ENOSYS;
338 return -1;
340 #endif
341 #endif /* TARGET_NR_utimensat */
343 #ifdef CONFIG_INOTIFY
344 #include <sys/inotify.h>
346 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
347 static int sys_inotify_init(void)
349 return (inotify_init());
351 #endif
352 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
353 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
355 return (inotify_add_watch(fd, pathname, mask));
357 #endif
358 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
359 static int sys_inotify_rm_watch(int fd, int32_t wd)
361 return (inotify_rm_watch(fd, wd));
363 #endif
364 #ifdef CONFIG_INOTIFY1
365 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
366 static int sys_inotify_init1(int flags)
368 return (inotify_init1(flags));
370 #endif
371 #endif
372 #else
373 /* Userspace can usually survive runtime without inotify */
374 #undef TARGET_NR_inotify_init
375 #undef TARGET_NR_inotify_init1
376 #undef TARGET_NR_inotify_add_watch
377 #undef TARGET_NR_inotify_rm_watch
378 #endif /* CONFIG_INOTIFY */
380 #if defined(TARGET_NR_ppoll)
381 #ifndef __NR_ppoll
382 # define __NR_ppoll -1
383 #endif
384 #define __NR_sys_ppoll __NR_ppoll
385 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
386 struct timespec *, timeout, const sigset_t *, sigmask,
387 size_t, sigsetsize)
388 #endif
390 #if defined(TARGET_NR_pselect6)
391 #ifndef __NR_pselect6
392 # define __NR_pselect6 -1
393 #endif
394 #define __NR_sys_pselect6 __NR_pselect6
395 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
396 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
397 #endif
399 #if defined(TARGET_NR_prlimit64)
400 #ifndef __NR_prlimit64
401 # define __NR_prlimit64 -1
402 #endif
403 #define __NR_sys_prlimit64 __NR_prlimit64
404 /* The glibc rlimit structure may not be that used by the underlying syscall */
405 struct host_rlimit64 {
406 uint64_t rlim_cur;
407 uint64_t rlim_max;
409 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
410 const struct host_rlimit64 *, new_limit,
411 struct host_rlimit64 *, old_limit)
412 #endif
415 #if defined(TARGET_NR_timer_create)
416 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
417 static timer_t g_posix_timers[32] = { 0, } ;
419 static inline int next_free_host_timer(void)
421 int k ;
422 /* FIXME: Does finding the next free slot require a lock? */
423 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
424 if (g_posix_timers[k] == 0) {
425 g_posix_timers[k] = (timer_t) 1;
426 return k;
429 return -1;
431 #endif
433 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
434 #ifdef TARGET_ARM
435 static inline int regpairs_aligned(void *cpu_env) {
436 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
438 #elif defined(TARGET_MIPS)
439 static inline int regpairs_aligned(void *cpu_env) { return 1; }
440 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
441 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
442 * of registers which translates to the same as ARM/MIPS, because we start with
443 * r3 as arg1 */
444 static inline int regpairs_aligned(void *cpu_env) { return 1; }
445 #else
446 static inline int regpairs_aligned(void *cpu_env) { return 0; }
447 #endif
449 #define ERRNO_TABLE_SIZE 1200
451 /* target_to_host_errno_table[] is initialized from
452 * host_to_target_errno_table[] in syscall_init(). */
453 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
457 * This list is the union of errno values overridden in asm-<arch>/errno.h
458 * minus the errnos that are not actually generic to all archs.
460 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
461 [EIDRM] = TARGET_EIDRM,
462 [ECHRNG] = TARGET_ECHRNG,
463 [EL2NSYNC] = TARGET_EL2NSYNC,
464 [EL3HLT] = TARGET_EL3HLT,
465 [EL3RST] = TARGET_EL3RST,
466 [ELNRNG] = TARGET_ELNRNG,
467 [EUNATCH] = TARGET_EUNATCH,
468 [ENOCSI] = TARGET_ENOCSI,
469 [EL2HLT] = TARGET_EL2HLT,
470 [EDEADLK] = TARGET_EDEADLK,
471 [ENOLCK] = TARGET_ENOLCK,
472 [EBADE] = TARGET_EBADE,
473 [EBADR] = TARGET_EBADR,
474 [EXFULL] = TARGET_EXFULL,
475 [ENOANO] = TARGET_ENOANO,
476 [EBADRQC] = TARGET_EBADRQC,
477 [EBADSLT] = TARGET_EBADSLT,
478 [EBFONT] = TARGET_EBFONT,
479 [ENOSTR] = TARGET_ENOSTR,
480 [ENODATA] = TARGET_ENODATA,
481 [ETIME] = TARGET_ETIME,
482 [ENOSR] = TARGET_ENOSR,
483 [ENONET] = TARGET_ENONET,
484 [ENOPKG] = TARGET_ENOPKG,
485 [EREMOTE] = TARGET_EREMOTE,
486 [ENOLINK] = TARGET_ENOLINK,
487 [EADV] = TARGET_EADV,
488 [ESRMNT] = TARGET_ESRMNT,
489 [ECOMM] = TARGET_ECOMM,
490 [EPROTO] = TARGET_EPROTO,
491 [EDOTDOT] = TARGET_EDOTDOT,
492 [EMULTIHOP] = TARGET_EMULTIHOP,
493 [EBADMSG] = TARGET_EBADMSG,
494 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
495 [EOVERFLOW] = TARGET_EOVERFLOW,
496 [ENOTUNIQ] = TARGET_ENOTUNIQ,
497 [EBADFD] = TARGET_EBADFD,
498 [EREMCHG] = TARGET_EREMCHG,
499 [ELIBACC] = TARGET_ELIBACC,
500 [ELIBBAD] = TARGET_ELIBBAD,
501 [ELIBSCN] = TARGET_ELIBSCN,
502 [ELIBMAX] = TARGET_ELIBMAX,
503 [ELIBEXEC] = TARGET_ELIBEXEC,
504 [EILSEQ] = TARGET_EILSEQ,
505 [ENOSYS] = TARGET_ENOSYS,
506 [ELOOP] = TARGET_ELOOP,
507 [ERESTART] = TARGET_ERESTART,
508 [ESTRPIPE] = TARGET_ESTRPIPE,
509 [ENOTEMPTY] = TARGET_ENOTEMPTY,
510 [EUSERS] = TARGET_EUSERS,
511 [ENOTSOCK] = TARGET_ENOTSOCK,
512 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
513 [EMSGSIZE] = TARGET_EMSGSIZE,
514 [EPROTOTYPE] = TARGET_EPROTOTYPE,
515 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
516 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
517 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
518 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
519 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
520 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
521 [EADDRINUSE] = TARGET_EADDRINUSE,
522 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
523 [ENETDOWN] = TARGET_ENETDOWN,
524 [ENETUNREACH] = TARGET_ENETUNREACH,
525 [ENETRESET] = TARGET_ENETRESET,
526 [ECONNABORTED] = TARGET_ECONNABORTED,
527 [ECONNRESET] = TARGET_ECONNRESET,
528 [ENOBUFS] = TARGET_ENOBUFS,
529 [EISCONN] = TARGET_EISCONN,
530 [ENOTCONN] = TARGET_ENOTCONN,
531 [EUCLEAN] = TARGET_EUCLEAN,
532 [ENOTNAM] = TARGET_ENOTNAM,
533 [ENAVAIL] = TARGET_ENAVAIL,
534 [EISNAM] = TARGET_EISNAM,
535 [EREMOTEIO] = TARGET_EREMOTEIO,
536 [ESHUTDOWN] = TARGET_ESHUTDOWN,
537 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
538 [ETIMEDOUT] = TARGET_ETIMEDOUT,
539 [ECONNREFUSED] = TARGET_ECONNREFUSED,
540 [EHOSTDOWN] = TARGET_EHOSTDOWN,
541 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
542 [EALREADY] = TARGET_EALREADY,
543 [EINPROGRESS] = TARGET_EINPROGRESS,
544 [ESTALE] = TARGET_ESTALE,
545 [ECANCELED] = TARGET_ECANCELED,
546 [ENOMEDIUM] = TARGET_ENOMEDIUM,
547 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
548 #ifdef ENOKEY
549 [ENOKEY] = TARGET_ENOKEY,
550 #endif
551 #ifdef EKEYEXPIRED
552 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
553 #endif
554 #ifdef EKEYREVOKED
555 [EKEYREVOKED] = TARGET_EKEYREVOKED,
556 #endif
557 #ifdef EKEYREJECTED
558 [EKEYREJECTED] = TARGET_EKEYREJECTED,
559 #endif
560 #ifdef EOWNERDEAD
561 [EOWNERDEAD] = TARGET_EOWNERDEAD,
562 #endif
563 #ifdef ENOTRECOVERABLE
564 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
565 #endif
568 static inline int host_to_target_errno(int err)
570 if(host_to_target_errno_table[err])
571 return host_to_target_errno_table[err];
572 return err;
575 static inline int target_to_host_errno(int err)
577 if (target_to_host_errno_table[err])
578 return target_to_host_errno_table[err];
579 return err;
582 static inline abi_long get_errno(abi_long ret)
584 if (ret == -1)
585 return -host_to_target_errno(errno);
586 else
587 return ret;
590 static inline int is_error(abi_long ret)
592 return (abi_ulong)ret >= (abi_ulong)(-4096);
595 char *target_strerror(int err)
597 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
598 return NULL;
600 return strerror(target_to_host_errno(err));
603 static inline int host_to_target_sock_type(int host_type)
605 int target_type;
607 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
608 case SOCK_DGRAM:
609 target_type = TARGET_SOCK_DGRAM;
610 break;
611 case SOCK_STREAM:
612 target_type = TARGET_SOCK_STREAM;
613 break;
614 default:
615 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
616 break;
619 #if defined(SOCK_CLOEXEC)
620 if (host_type & SOCK_CLOEXEC) {
621 target_type |= TARGET_SOCK_CLOEXEC;
623 #endif
625 #if defined(SOCK_NONBLOCK)
626 if (host_type & SOCK_NONBLOCK) {
627 target_type |= TARGET_SOCK_NONBLOCK;
629 #endif
631 return target_type;
634 static abi_ulong target_brk;
635 static abi_ulong target_original_brk;
636 static abi_ulong brk_page;
638 void target_set_brk(abi_ulong new_brk)
640 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
641 brk_page = HOST_PAGE_ALIGN(target_brk);
644 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
645 #define DEBUGF_BRK(message, args...)
647 /* do_brk() must return target values and target errnos. */
648 abi_long do_brk(abi_ulong new_brk)
650 abi_long mapped_addr;
651 int new_alloc_size;
653 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
655 if (!new_brk) {
656 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
657 return target_brk;
659 if (new_brk < target_original_brk) {
660 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
661 target_brk);
662 return target_brk;
665 /* If the new brk is less than the highest page reserved to the
666 * target heap allocation, set it and we're almost done... */
667 if (new_brk <= brk_page) {
668 /* Heap contents are initialized to zero, as for anonymous
669 * mapped pages. */
670 if (new_brk > target_brk) {
671 memset(g2h(target_brk), 0, new_brk - target_brk);
673 target_brk = new_brk;
674 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
675 return target_brk;
678 /* We need to allocate more memory after the brk... Note that
679 * we don't use MAP_FIXED because that will map over the top of
680 * any existing mapping (like the one with the host libc or qemu
681 * itself); instead we treat "mapped but at wrong address" as
682 * a failure and unmap again.
684 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
685 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
686 PROT_READ|PROT_WRITE,
687 MAP_ANON|MAP_PRIVATE, 0, 0));
689 if (mapped_addr == brk_page) {
690 /* Heap contents are initialized to zero, as for anonymous
691 * mapped pages. Technically the new pages are already
692 * initialized to zero since they *are* anonymous mapped
693 * pages, however we have to take care with the contents that
694 * come from the remaining part of the previous page: it may
695 * contains garbage data due to a previous heap usage (grown
696 * then shrunken). */
697 memset(g2h(target_brk), 0, brk_page - target_brk);
699 target_brk = new_brk;
700 brk_page = HOST_PAGE_ALIGN(target_brk);
701 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
702 target_brk);
703 return target_brk;
704 } else if (mapped_addr != -1) {
705 /* Mapped but at wrong address, meaning there wasn't actually
706 * enough space for this brk.
708 target_munmap(mapped_addr, new_alloc_size);
709 mapped_addr = -1;
710 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
712 else {
713 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
716 #if defined(TARGET_ALPHA)
717 /* We (partially) emulate OSF/1 on Alpha, which requires we
718 return a proper errno, not an unchanged brk value. */
719 return -TARGET_ENOMEM;
720 #endif
721 /* For everything else, return the previous break. */
722 return target_brk;
725 static inline abi_long copy_from_user_fdset(fd_set *fds,
726 abi_ulong target_fds_addr,
727 int n)
729 int i, nw, j, k;
730 abi_ulong b, *target_fds;
732 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
733 if (!(target_fds = lock_user(VERIFY_READ,
734 target_fds_addr,
735 sizeof(abi_ulong) * nw,
736 1)))
737 return -TARGET_EFAULT;
739 FD_ZERO(fds);
740 k = 0;
741 for (i = 0; i < nw; i++) {
742 /* grab the abi_ulong */
743 __get_user(b, &target_fds[i]);
744 for (j = 0; j < TARGET_ABI_BITS; j++) {
745 /* check the bit inside the abi_ulong */
746 if ((b >> j) & 1)
747 FD_SET(k, fds);
748 k++;
752 unlock_user(target_fds, target_fds_addr, 0);
754 return 0;
757 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
758 abi_ulong target_fds_addr,
759 int n)
761 if (target_fds_addr) {
762 if (copy_from_user_fdset(fds, target_fds_addr, n))
763 return -TARGET_EFAULT;
764 *fds_ptr = fds;
765 } else {
766 *fds_ptr = NULL;
768 return 0;
771 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
772 const fd_set *fds,
773 int n)
775 int i, nw, j, k;
776 abi_long v;
777 abi_ulong *target_fds;
779 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
780 if (!(target_fds = lock_user(VERIFY_WRITE,
781 target_fds_addr,
782 sizeof(abi_ulong) * nw,
783 0)))
784 return -TARGET_EFAULT;
786 k = 0;
787 for (i = 0; i < nw; i++) {
788 v = 0;
789 for (j = 0; j < TARGET_ABI_BITS; j++) {
790 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
791 k++;
793 __put_user(v, &target_fds[i]);
796 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
798 return 0;
801 #if defined(__alpha__)
802 #define HOST_HZ 1024
803 #else
804 #define HOST_HZ 100
805 #endif
807 static inline abi_long host_to_target_clock_t(long ticks)
809 #if HOST_HZ == TARGET_HZ
810 return ticks;
811 #else
812 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
813 #endif
816 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
817 const struct rusage *rusage)
819 struct target_rusage *target_rusage;
821 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
822 return -TARGET_EFAULT;
823 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
824 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
825 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
826 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
827 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
828 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
829 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
830 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
831 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
832 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
833 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
834 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
835 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
836 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
837 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
838 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
839 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
840 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
841 unlock_user_struct(target_rusage, target_addr, 1);
843 return 0;
846 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
848 abi_ulong target_rlim_swap;
849 rlim_t result;
851 target_rlim_swap = tswapal(target_rlim);
852 if (target_rlim_swap == TARGET_RLIM_INFINITY)
853 return RLIM_INFINITY;
855 result = target_rlim_swap;
856 if (target_rlim_swap != (rlim_t)result)
857 return RLIM_INFINITY;
859 return result;
862 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
864 abi_ulong target_rlim_swap;
865 abi_ulong result;
867 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
868 target_rlim_swap = TARGET_RLIM_INFINITY;
869 else
870 target_rlim_swap = rlim;
871 result = tswapal(target_rlim_swap);
873 return result;
876 static inline int target_to_host_resource(int code)
878 switch (code) {
879 case TARGET_RLIMIT_AS:
880 return RLIMIT_AS;
881 case TARGET_RLIMIT_CORE:
882 return RLIMIT_CORE;
883 case TARGET_RLIMIT_CPU:
884 return RLIMIT_CPU;
885 case TARGET_RLIMIT_DATA:
886 return RLIMIT_DATA;
887 case TARGET_RLIMIT_FSIZE:
888 return RLIMIT_FSIZE;
889 case TARGET_RLIMIT_LOCKS:
890 return RLIMIT_LOCKS;
891 case TARGET_RLIMIT_MEMLOCK:
892 return RLIMIT_MEMLOCK;
893 case TARGET_RLIMIT_MSGQUEUE:
894 return RLIMIT_MSGQUEUE;
895 case TARGET_RLIMIT_NICE:
896 return RLIMIT_NICE;
897 case TARGET_RLIMIT_NOFILE:
898 return RLIMIT_NOFILE;
899 case TARGET_RLIMIT_NPROC:
900 return RLIMIT_NPROC;
901 case TARGET_RLIMIT_RSS:
902 return RLIMIT_RSS;
903 case TARGET_RLIMIT_RTPRIO:
904 return RLIMIT_RTPRIO;
905 case TARGET_RLIMIT_SIGPENDING:
906 return RLIMIT_SIGPENDING;
907 case TARGET_RLIMIT_STACK:
908 return RLIMIT_STACK;
909 default:
910 return code;
914 static inline abi_long copy_from_user_timeval(struct timeval *tv,
915 abi_ulong target_tv_addr)
917 struct target_timeval *target_tv;
919 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
920 return -TARGET_EFAULT;
922 __get_user(tv->tv_sec, &target_tv->tv_sec);
923 __get_user(tv->tv_usec, &target_tv->tv_usec);
925 unlock_user_struct(target_tv, target_tv_addr, 0);
927 return 0;
930 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
931 const struct timeval *tv)
933 struct target_timeval *target_tv;
935 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
936 return -TARGET_EFAULT;
938 __put_user(tv->tv_sec, &target_tv->tv_sec);
939 __put_user(tv->tv_usec, &target_tv->tv_usec);
941 unlock_user_struct(target_tv, target_tv_addr, 1);
943 return 0;
946 static inline abi_long copy_from_user_timezone(struct timezone *tz,
947 abi_ulong target_tz_addr)
949 struct target_timezone *target_tz;
951 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
952 return -TARGET_EFAULT;
955 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
956 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
958 unlock_user_struct(target_tz, target_tz_addr, 0);
960 return 0;
963 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
964 #include <mqueue.h>
966 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
967 abi_ulong target_mq_attr_addr)
969 struct target_mq_attr *target_mq_attr;
971 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
972 target_mq_attr_addr, 1))
973 return -TARGET_EFAULT;
975 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
976 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
977 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
978 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
980 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
982 return 0;
985 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
986 const struct mq_attr *attr)
988 struct target_mq_attr *target_mq_attr;
990 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
991 target_mq_attr_addr, 0))
992 return -TARGET_EFAULT;
994 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
995 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
996 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
997 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
999 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1001 return 0;
1003 #endif
1005 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1006 /* do_select() must return target values and target errnos. */
1007 static abi_long do_select(int n,
1008 abi_ulong rfd_addr, abi_ulong wfd_addr,
1009 abi_ulong efd_addr, abi_ulong target_tv_addr)
1011 fd_set rfds, wfds, efds;
1012 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1013 struct timeval tv, *tv_ptr;
1014 abi_long ret;
1016 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1017 if (ret) {
1018 return ret;
1020 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1021 if (ret) {
1022 return ret;
1024 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1025 if (ret) {
1026 return ret;
1029 if (target_tv_addr) {
1030 if (copy_from_user_timeval(&tv, target_tv_addr))
1031 return -TARGET_EFAULT;
1032 tv_ptr = &tv;
1033 } else {
1034 tv_ptr = NULL;
1037 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1039 if (!is_error(ret)) {
1040 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1041 return -TARGET_EFAULT;
1042 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1043 return -TARGET_EFAULT;
1044 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1045 return -TARGET_EFAULT;
1047 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1048 return -TARGET_EFAULT;
1051 return ret;
1053 #endif
1055 static abi_long do_pipe2(int host_pipe[], int flags)
1057 #ifdef CONFIG_PIPE2
1058 return pipe2(host_pipe, flags);
1059 #else
1060 return -ENOSYS;
1061 #endif
1064 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1065 int flags, int is_pipe2)
1067 int host_pipe[2];
1068 abi_long ret;
1069 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1071 if (is_error(ret))
1072 return get_errno(ret);
1074 /* Several targets have special calling conventions for the original
1075 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1076 if (!is_pipe2) {
1077 #if defined(TARGET_ALPHA)
1078 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1079 return host_pipe[0];
1080 #elif defined(TARGET_MIPS)
1081 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1082 return host_pipe[0];
1083 #elif defined(TARGET_SH4)
1084 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1085 return host_pipe[0];
1086 #elif defined(TARGET_SPARC)
1087 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1088 return host_pipe[0];
1089 #endif
1092 if (put_user_s32(host_pipe[0], pipedes)
1093 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1094 return -TARGET_EFAULT;
1095 return get_errno(ret);
1098 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1099 abi_ulong target_addr,
1100 socklen_t len)
1102 struct target_ip_mreqn *target_smreqn;
1104 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1105 if (!target_smreqn)
1106 return -TARGET_EFAULT;
1107 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1108 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1109 if (len == sizeof(struct target_ip_mreqn))
1110 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1111 unlock_user(target_smreqn, target_addr, 0);
1113 return 0;
1116 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1117 abi_ulong target_addr,
1118 socklen_t len)
1120 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1121 sa_family_t sa_family;
1122 struct target_sockaddr *target_saddr;
1124 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1125 if (!target_saddr)
1126 return -TARGET_EFAULT;
1128 sa_family = tswap16(target_saddr->sa_family);
1130 /* Oops. The caller might send a incomplete sun_path; sun_path
1131 * must be terminated by \0 (see the manual page), but
1132 * unfortunately it is quite common to specify sockaddr_un
1133 * length as "strlen(x->sun_path)" while it should be
1134 * "strlen(...) + 1". We'll fix that here if needed.
1135 * Linux kernel has a similar feature.
1138 if (sa_family == AF_UNIX) {
1139 if (len < unix_maxlen && len > 0) {
1140 char *cp = (char*)target_saddr;
1142 if ( cp[len-1] && !cp[len] )
1143 len++;
1145 if (len > unix_maxlen)
1146 len = unix_maxlen;
1149 memcpy(addr, target_saddr, len);
1150 addr->sa_family = sa_family;
1151 if (sa_family == AF_PACKET) {
1152 struct target_sockaddr_ll *lladdr;
1154 lladdr = (struct target_sockaddr_ll *)addr;
1155 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1156 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1158 unlock_user(target_saddr, target_addr, 0);
1160 return 0;
1163 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1164 struct sockaddr *addr,
1165 socklen_t len)
1167 struct target_sockaddr *target_saddr;
1169 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1170 if (!target_saddr)
1171 return -TARGET_EFAULT;
1172 memcpy(target_saddr, addr, len);
1173 target_saddr->sa_family = tswap16(addr->sa_family);
1174 unlock_user(target_saddr, target_addr, len);
1176 return 0;
1179 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1180 struct target_msghdr *target_msgh)
1182 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1183 abi_long msg_controllen;
1184 abi_ulong target_cmsg_addr;
1185 struct target_cmsghdr *target_cmsg;
1186 socklen_t space = 0;
1188 msg_controllen = tswapal(target_msgh->msg_controllen);
1189 if (msg_controllen < sizeof (struct target_cmsghdr))
1190 goto the_end;
1191 target_cmsg_addr = tswapal(target_msgh->msg_control);
1192 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1193 if (!target_cmsg)
1194 return -TARGET_EFAULT;
1196 while (cmsg && target_cmsg) {
1197 void *data = CMSG_DATA(cmsg);
1198 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1200 int len = tswapal(target_cmsg->cmsg_len)
1201 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1203 space += CMSG_SPACE(len);
1204 if (space > msgh->msg_controllen) {
1205 space -= CMSG_SPACE(len);
1206 /* This is a QEMU bug, since we allocated the payload
1207 * area ourselves (unlike overflow in host-to-target
1208 * conversion, which is just the guest giving us a buffer
1209 * that's too small). It can't happen for the payload types
1210 * we currently support; if it becomes an issue in future
1211 * we would need to improve our allocation strategy to
1212 * something more intelligent than "twice the size of the
1213 * target buffer we're reading from".
1215 gemu_log("Host cmsg overflow\n");
1216 break;
1219 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1220 cmsg->cmsg_level = SOL_SOCKET;
1221 } else {
1222 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1224 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1225 cmsg->cmsg_len = CMSG_LEN(len);
1227 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1228 int *fd = (int *)data;
1229 int *target_fd = (int *)target_data;
1230 int i, numfds = len / sizeof(int);
1232 for (i = 0; i < numfds; i++) {
1233 __get_user(fd[i], target_fd + i);
1235 } else if (cmsg->cmsg_level == SOL_SOCKET
1236 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1237 struct ucred *cred = (struct ucred *)data;
1238 struct target_ucred *target_cred =
1239 (struct target_ucred *)target_data;
1241 __get_user(cred->pid, &target_cred->pid);
1242 __get_user(cred->uid, &target_cred->uid);
1243 __get_user(cred->gid, &target_cred->gid);
1244 } else {
1245 gemu_log("Unsupported ancillary data: %d/%d\n",
1246 cmsg->cmsg_level, cmsg->cmsg_type);
1247 memcpy(data, target_data, len);
1250 cmsg = CMSG_NXTHDR(msgh, cmsg);
1251 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1253 unlock_user(target_cmsg, target_cmsg_addr, 0);
1254 the_end:
1255 msgh->msg_controllen = space;
1256 return 0;
1259 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1260 struct msghdr *msgh)
1262 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1263 abi_long msg_controllen;
1264 abi_ulong target_cmsg_addr;
1265 struct target_cmsghdr *target_cmsg;
1266 socklen_t space = 0;
1268 msg_controllen = tswapal(target_msgh->msg_controllen);
1269 if (msg_controllen < sizeof (struct target_cmsghdr))
1270 goto the_end;
1271 target_cmsg_addr = tswapal(target_msgh->msg_control);
1272 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1273 if (!target_cmsg)
1274 return -TARGET_EFAULT;
1276 while (cmsg && target_cmsg) {
1277 void *data = CMSG_DATA(cmsg);
1278 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1280 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1281 int tgt_len, tgt_space;
1283 /* We never copy a half-header but may copy half-data;
1284 * this is Linux's behaviour in put_cmsg(). Note that
1285 * truncation here is a guest problem (which we report
1286 * to the guest via the CTRUNC bit), unlike truncation
1287 * in target_to_host_cmsg, which is a QEMU bug.
1289 if (msg_controllen < sizeof(struct cmsghdr)) {
1290 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1291 break;
1294 if (cmsg->cmsg_level == SOL_SOCKET) {
1295 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1296 } else {
1297 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1299 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1301 tgt_len = TARGET_CMSG_LEN(len);
1303 /* Payload types which need a different size of payload on
1304 * the target must adjust tgt_len here.
1306 switch (cmsg->cmsg_level) {
1307 case SOL_SOCKET:
1308 switch (cmsg->cmsg_type) {
1309 case SO_TIMESTAMP:
1310 tgt_len = sizeof(struct target_timeval);
1311 break;
1312 default:
1313 break;
1315 default:
1316 break;
1319 if (msg_controllen < tgt_len) {
1320 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1321 tgt_len = msg_controllen;
1324 /* We must now copy-and-convert len bytes of payload
1325 * into tgt_len bytes of destination space. Bear in mind
1326 * that in both source and destination we may be dealing
1327 * with a truncated value!
1329 switch (cmsg->cmsg_level) {
1330 case SOL_SOCKET:
1331 switch (cmsg->cmsg_type) {
1332 case SCM_RIGHTS:
1334 int *fd = (int *)data;
1335 int *target_fd = (int *)target_data;
1336 int i, numfds = tgt_len / sizeof(int);
1338 for (i = 0; i < numfds; i++) {
1339 __put_user(fd[i], target_fd + i);
1341 break;
1343 case SO_TIMESTAMP:
1345 struct timeval *tv = (struct timeval *)data;
1346 struct target_timeval *target_tv =
1347 (struct target_timeval *)target_data;
1349 if (len != sizeof(struct timeval) ||
1350 tgt_len != sizeof(struct target_timeval)) {
1351 goto unimplemented;
1354 /* copy struct timeval to target */
1355 __put_user(tv->tv_sec, &target_tv->tv_sec);
1356 __put_user(tv->tv_usec, &target_tv->tv_usec);
1357 break;
1359 case SCM_CREDENTIALS:
1361 struct ucred *cred = (struct ucred *)data;
1362 struct target_ucred *target_cred =
1363 (struct target_ucred *)target_data;
1365 __put_user(cred->pid, &target_cred->pid);
1366 __put_user(cred->uid, &target_cred->uid);
1367 __put_user(cred->gid, &target_cred->gid);
1368 break;
1370 default:
1371 goto unimplemented;
1373 break;
1375 default:
1376 unimplemented:
1377 gemu_log("Unsupported ancillary data: %d/%d\n",
1378 cmsg->cmsg_level, cmsg->cmsg_type);
1379 memcpy(target_data, data, MIN(len, tgt_len));
1380 if (tgt_len > len) {
1381 memset(target_data + len, 0, tgt_len - len);
1385 target_cmsg->cmsg_len = tswapal(tgt_len);
1386 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1387 if (msg_controllen < tgt_space) {
1388 tgt_space = msg_controllen;
1390 msg_controllen -= tgt_space;
1391 space += tgt_space;
1392 cmsg = CMSG_NXTHDR(msgh, cmsg);
1393 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1395 unlock_user(target_cmsg, target_cmsg_addr, space);
1396 the_end:
1397 target_msgh->msg_controllen = tswapal(space);
1398 return 0;
1401 /* do_setsockopt() Must return target values and target errnos. */
1402 static abi_long do_setsockopt(int sockfd, int level, int optname,
1403 abi_ulong optval_addr, socklen_t optlen)
1405 abi_long ret;
1406 int val;
1407 struct ip_mreqn *ip_mreq;
1408 struct ip_mreq_source *ip_mreq_source;
1410 switch(level) {
1411 case SOL_TCP:
1412 /* TCP options all take an 'int' value. */
1413 if (optlen < sizeof(uint32_t))
1414 return -TARGET_EINVAL;
1416 if (get_user_u32(val, optval_addr))
1417 return -TARGET_EFAULT;
1418 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1419 break;
1420 case SOL_IP:
1421 switch(optname) {
1422 case IP_TOS:
1423 case IP_TTL:
1424 case IP_HDRINCL:
1425 case IP_ROUTER_ALERT:
1426 case IP_RECVOPTS:
1427 case IP_RETOPTS:
1428 case IP_PKTINFO:
1429 case IP_MTU_DISCOVER:
1430 case IP_RECVERR:
1431 case IP_RECVTOS:
1432 #ifdef IP_FREEBIND
1433 case IP_FREEBIND:
1434 #endif
1435 case IP_MULTICAST_TTL:
1436 case IP_MULTICAST_LOOP:
1437 val = 0;
1438 if (optlen >= sizeof(uint32_t)) {
1439 if (get_user_u32(val, optval_addr))
1440 return -TARGET_EFAULT;
1441 } else if (optlen >= 1) {
1442 if (get_user_u8(val, optval_addr))
1443 return -TARGET_EFAULT;
1445 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1446 break;
1447 case IP_ADD_MEMBERSHIP:
1448 case IP_DROP_MEMBERSHIP:
1449 if (optlen < sizeof (struct target_ip_mreq) ||
1450 optlen > sizeof (struct target_ip_mreqn))
1451 return -TARGET_EINVAL;
1453 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1454 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1455 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1456 break;
1458 case IP_BLOCK_SOURCE:
1459 case IP_UNBLOCK_SOURCE:
1460 case IP_ADD_SOURCE_MEMBERSHIP:
1461 case IP_DROP_SOURCE_MEMBERSHIP:
1462 if (optlen != sizeof (struct target_ip_mreq_source))
1463 return -TARGET_EINVAL;
1465 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1466 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1467 unlock_user (ip_mreq_source, optval_addr, 0);
1468 break;
1470 default:
1471 goto unimplemented;
1473 break;
1474 case SOL_IPV6:
1475 switch (optname) {
1476 case IPV6_MTU_DISCOVER:
1477 case IPV6_MTU:
1478 case IPV6_V6ONLY:
1479 case IPV6_RECVPKTINFO:
1480 val = 0;
1481 if (optlen < sizeof(uint32_t)) {
1482 return -TARGET_EINVAL;
1484 if (get_user_u32(val, optval_addr)) {
1485 return -TARGET_EFAULT;
1487 ret = get_errno(setsockopt(sockfd, level, optname,
1488 &val, sizeof(val)));
1489 break;
1490 default:
1491 goto unimplemented;
1493 break;
1494 case SOL_RAW:
1495 switch (optname) {
1496 case ICMP_FILTER:
1497 /* struct icmp_filter takes an u32 value */
1498 if (optlen < sizeof(uint32_t)) {
1499 return -TARGET_EINVAL;
1502 if (get_user_u32(val, optval_addr)) {
1503 return -TARGET_EFAULT;
1505 ret = get_errno(setsockopt(sockfd, level, optname,
1506 &val, sizeof(val)));
1507 break;
1509 default:
1510 goto unimplemented;
1512 break;
1513 case TARGET_SOL_SOCKET:
1514 switch (optname) {
1515 case TARGET_SO_RCVTIMEO:
1517 struct timeval tv;
1519 optname = SO_RCVTIMEO;
1521 set_timeout:
1522 if (optlen != sizeof(struct target_timeval)) {
1523 return -TARGET_EINVAL;
1526 if (copy_from_user_timeval(&tv, optval_addr)) {
1527 return -TARGET_EFAULT;
1530 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1531 &tv, sizeof(tv)));
1532 return ret;
1534 case TARGET_SO_SNDTIMEO:
1535 optname = SO_SNDTIMEO;
1536 goto set_timeout;
1537 case TARGET_SO_ATTACH_FILTER:
1539 struct target_sock_fprog *tfprog;
1540 struct target_sock_filter *tfilter;
1541 struct sock_fprog fprog;
1542 struct sock_filter *filter;
1543 int i;
1545 if (optlen != sizeof(*tfprog)) {
1546 return -TARGET_EINVAL;
1548 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1549 return -TARGET_EFAULT;
1551 if (!lock_user_struct(VERIFY_READ, tfilter,
1552 tswapal(tfprog->filter), 0)) {
1553 unlock_user_struct(tfprog, optval_addr, 1);
1554 return -TARGET_EFAULT;
1557 fprog.len = tswap16(tfprog->len);
1558 filter = malloc(fprog.len * sizeof(*filter));
1559 if (filter == NULL) {
1560 unlock_user_struct(tfilter, tfprog->filter, 1);
1561 unlock_user_struct(tfprog, optval_addr, 1);
1562 return -TARGET_ENOMEM;
1564 for (i = 0; i < fprog.len; i++) {
1565 filter[i].code = tswap16(tfilter[i].code);
1566 filter[i].jt = tfilter[i].jt;
1567 filter[i].jf = tfilter[i].jf;
1568 filter[i].k = tswap32(tfilter[i].k);
1570 fprog.filter = filter;
1572 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1573 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1574 free(filter);
1576 unlock_user_struct(tfilter, tfprog->filter, 1);
1577 unlock_user_struct(tfprog, optval_addr, 1);
1578 return ret;
1580 case TARGET_SO_BINDTODEVICE:
1582 char *dev_ifname, *addr_ifname;
1584 if (optlen > IFNAMSIZ - 1) {
1585 optlen = IFNAMSIZ - 1;
1587 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1588 if (!dev_ifname) {
1589 return -TARGET_EFAULT;
1591 optname = SO_BINDTODEVICE;
1592 addr_ifname = alloca(IFNAMSIZ);
1593 memcpy(addr_ifname, dev_ifname, optlen);
1594 addr_ifname[optlen] = 0;
1595 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
1596 unlock_user (dev_ifname, optval_addr, 0);
1597 return ret;
1599 /* Options with 'int' argument. */
1600 case TARGET_SO_DEBUG:
1601 optname = SO_DEBUG;
1602 break;
1603 case TARGET_SO_REUSEADDR:
1604 optname = SO_REUSEADDR;
1605 break;
1606 case TARGET_SO_TYPE:
1607 optname = SO_TYPE;
1608 break;
1609 case TARGET_SO_ERROR:
1610 optname = SO_ERROR;
1611 break;
1612 case TARGET_SO_DONTROUTE:
1613 optname = SO_DONTROUTE;
1614 break;
1615 case TARGET_SO_BROADCAST:
1616 optname = SO_BROADCAST;
1617 break;
1618 case TARGET_SO_SNDBUF:
1619 optname = SO_SNDBUF;
1620 break;
1621 case TARGET_SO_SNDBUFFORCE:
1622 optname = SO_SNDBUFFORCE;
1623 break;
1624 case TARGET_SO_RCVBUF:
1625 optname = SO_RCVBUF;
1626 break;
1627 case TARGET_SO_RCVBUFFORCE:
1628 optname = SO_RCVBUFFORCE;
1629 break;
1630 case TARGET_SO_KEEPALIVE:
1631 optname = SO_KEEPALIVE;
1632 break;
1633 case TARGET_SO_OOBINLINE:
1634 optname = SO_OOBINLINE;
1635 break;
1636 case TARGET_SO_NO_CHECK:
1637 optname = SO_NO_CHECK;
1638 break;
1639 case TARGET_SO_PRIORITY:
1640 optname = SO_PRIORITY;
1641 break;
1642 #ifdef SO_BSDCOMPAT
1643 case TARGET_SO_BSDCOMPAT:
1644 optname = SO_BSDCOMPAT;
1645 break;
1646 #endif
1647 case TARGET_SO_PASSCRED:
1648 optname = SO_PASSCRED;
1649 break;
1650 case TARGET_SO_PASSSEC:
1651 optname = SO_PASSSEC;
1652 break;
1653 case TARGET_SO_TIMESTAMP:
1654 optname = SO_TIMESTAMP;
1655 break;
1656 case TARGET_SO_RCVLOWAT:
1657 optname = SO_RCVLOWAT;
1658 break;
1659 break;
1660 default:
1661 goto unimplemented;
1663 if (optlen < sizeof(uint32_t))
1664 return -TARGET_EINVAL;
1666 if (get_user_u32(val, optval_addr))
1667 return -TARGET_EFAULT;
1668 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1669 break;
1670 default:
1671 unimplemented:
1672 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1673 ret = -TARGET_ENOPROTOOPT;
1675 return ret;
1678 /* do_getsockopt() Must return target values and target errnos. */
1679 static abi_long do_getsockopt(int sockfd, int level, int optname,
1680 abi_ulong optval_addr, abi_ulong optlen)
1682 abi_long ret;
1683 int len, val;
1684 socklen_t lv;
1686 switch(level) {
1687 case TARGET_SOL_SOCKET:
1688 level = SOL_SOCKET;
1689 switch (optname) {
1690 /* These don't just return a single integer */
1691 case TARGET_SO_LINGER:
1692 case TARGET_SO_RCVTIMEO:
1693 case TARGET_SO_SNDTIMEO:
1694 case TARGET_SO_PEERNAME:
1695 goto unimplemented;
1696 case TARGET_SO_PEERCRED: {
1697 struct ucred cr;
1698 socklen_t crlen;
1699 struct target_ucred *tcr;
1701 if (get_user_u32(len, optlen)) {
1702 return -TARGET_EFAULT;
1704 if (len < 0) {
1705 return -TARGET_EINVAL;
1708 crlen = sizeof(cr);
1709 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1710 &cr, &crlen));
1711 if (ret < 0) {
1712 return ret;
1714 if (len > crlen) {
1715 len = crlen;
1717 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1718 return -TARGET_EFAULT;
1720 __put_user(cr.pid, &tcr->pid);
1721 __put_user(cr.uid, &tcr->uid);
1722 __put_user(cr.gid, &tcr->gid);
1723 unlock_user_struct(tcr, optval_addr, 1);
1724 if (put_user_u32(len, optlen)) {
1725 return -TARGET_EFAULT;
1727 break;
1729 /* Options with 'int' argument. */
1730 case TARGET_SO_DEBUG:
1731 optname = SO_DEBUG;
1732 goto int_case;
1733 case TARGET_SO_REUSEADDR:
1734 optname = SO_REUSEADDR;
1735 goto int_case;
1736 case TARGET_SO_TYPE:
1737 optname = SO_TYPE;
1738 goto int_case;
1739 case TARGET_SO_ERROR:
1740 optname = SO_ERROR;
1741 goto int_case;
1742 case TARGET_SO_DONTROUTE:
1743 optname = SO_DONTROUTE;
1744 goto int_case;
1745 case TARGET_SO_BROADCAST:
1746 optname = SO_BROADCAST;
1747 goto int_case;
1748 case TARGET_SO_SNDBUF:
1749 optname = SO_SNDBUF;
1750 goto int_case;
1751 case TARGET_SO_RCVBUF:
1752 optname = SO_RCVBUF;
1753 goto int_case;
1754 case TARGET_SO_KEEPALIVE:
1755 optname = SO_KEEPALIVE;
1756 goto int_case;
1757 case TARGET_SO_OOBINLINE:
1758 optname = SO_OOBINLINE;
1759 goto int_case;
1760 case TARGET_SO_NO_CHECK:
1761 optname = SO_NO_CHECK;
1762 goto int_case;
1763 case TARGET_SO_PRIORITY:
1764 optname = SO_PRIORITY;
1765 goto int_case;
1766 #ifdef SO_BSDCOMPAT
1767 case TARGET_SO_BSDCOMPAT:
1768 optname = SO_BSDCOMPAT;
1769 goto int_case;
1770 #endif
1771 case TARGET_SO_PASSCRED:
1772 optname = SO_PASSCRED;
1773 goto int_case;
1774 case TARGET_SO_TIMESTAMP:
1775 optname = SO_TIMESTAMP;
1776 goto int_case;
1777 case TARGET_SO_RCVLOWAT:
1778 optname = SO_RCVLOWAT;
1779 goto int_case;
1780 case TARGET_SO_ACCEPTCONN:
1781 optname = SO_ACCEPTCONN;
1782 goto int_case;
1783 default:
1784 goto int_case;
1786 break;
1787 case SOL_TCP:
1788 /* TCP options all take an 'int' value. */
1789 int_case:
1790 if (get_user_u32(len, optlen))
1791 return -TARGET_EFAULT;
1792 if (len < 0)
1793 return -TARGET_EINVAL;
1794 lv = sizeof(lv);
1795 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1796 if (ret < 0)
1797 return ret;
1798 if (optname == SO_TYPE) {
1799 val = host_to_target_sock_type(val);
1801 if (len > lv)
1802 len = lv;
1803 if (len == 4) {
1804 if (put_user_u32(val, optval_addr))
1805 return -TARGET_EFAULT;
1806 } else {
1807 if (put_user_u8(val, optval_addr))
1808 return -TARGET_EFAULT;
1810 if (put_user_u32(len, optlen))
1811 return -TARGET_EFAULT;
1812 break;
1813 case SOL_IP:
1814 switch(optname) {
1815 case IP_TOS:
1816 case IP_TTL:
1817 case IP_HDRINCL:
1818 case IP_ROUTER_ALERT:
1819 case IP_RECVOPTS:
1820 case IP_RETOPTS:
1821 case IP_PKTINFO:
1822 case IP_MTU_DISCOVER:
1823 case IP_RECVERR:
1824 case IP_RECVTOS:
1825 #ifdef IP_FREEBIND
1826 case IP_FREEBIND:
1827 #endif
1828 case IP_MULTICAST_TTL:
1829 case IP_MULTICAST_LOOP:
1830 if (get_user_u32(len, optlen))
1831 return -TARGET_EFAULT;
1832 if (len < 0)
1833 return -TARGET_EINVAL;
1834 lv = sizeof(lv);
1835 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1836 if (ret < 0)
1837 return ret;
1838 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1839 len = 1;
1840 if (put_user_u32(len, optlen)
1841 || put_user_u8(val, optval_addr))
1842 return -TARGET_EFAULT;
1843 } else {
1844 if (len > sizeof(int))
1845 len = sizeof(int);
1846 if (put_user_u32(len, optlen)
1847 || put_user_u32(val, optval_addr))
1848 return -TARGET_EFAULT;
1850 break;
1851 default:
1852 ret = -TARGET_ENOPROTOOPT;
1853 break;
1855 break;
1856 default:
1857 unimplemented:
1858 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1859 level, optname);
1860 ret = -TARGET_EOPNOTSUPP;
1861 break;
1863 return ret;
1866 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1867 int count, int copy)
1869 struct target_iovec *target_vec;
1870 struct iovec *vec;
1871 abi_ulong total_len, max_len;
1872 int i;
1873 int err = 0;
1874 bool bad_address = false;
1876 if (count == 0) {
1877 errno = 0;
1878 return NULL;
1880 if (count < 0 || count > IOV_MAX) {
1881 errno = EINVAL;
1882 return NULL;
1885 vec = calloc(count, sizeof(struct iovec));
1886 if (vec == NULL) {
1887 errno = ENOMEM;
1888 return NULL;
1891 target_vec = lock_user(VERIFY_READ, target_addr,
1892 count * sizeof(struct target_iovec), 1);
1893 if (target_vec == NULL) {
1894 err = EFAULT;
1895 goto fail2;
1898 /* ??? If host page size > target page size, this will result in a
1899 value larger than what we can actually support. */
1900 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1901 total_len = 0;
1903 for (i = 0; i < count; i++) {
1904 abi_ulong base = tswapal(target_vec[i].iov_base);
1905 abi_long len = tswapal(target_vec[i].iov_len);
1907 if (len < 0) {
1908 err = EINVAL;
1909 goto fail;
1910 } else if (len == 0) {
1911 /* Zero length pointer is ignored. */
1912 vec[i].iov_base = 0;
1913 } else {
1914 vec[i].iov_base = lock_user(type, base, len, copy);
1915 /* If the first buffer pointer is bad, this is a fault. But
1916 * subsequent bad buffers will result in a partial write; this
1917 * is realized by filling the vector with null pointers and
1918 * zero lengths. */
1919 if (!vec[i].iov_base) {
1920 if (i == 0) {
1921 err = EFAULT;
1922 goto fail;
1923 } else {
1924 bad_address = true;
1927 if (bad_address) {
1928 len = 0;
1930 if (len > max_len - total_len) {
1931 len = max_len - total_len;
1934 vec[i].iov_len = len;
1935 total_len += len;
1938 unlock_user(target_vec, target_addr, 0);
1939 return vec;
1941 fail:
1942 while (--i >= 0) {
1943 if (tswapal(target_vec[i].iov_len) > 0) {
1944 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
1947 unlock_user(target_vec, target_addr, 0);
1948 fail2:
1949 free(vec);
1950 errno = err;
1951 return NULL;
1954 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1955 int count, int copy)
1957 struct target_iovec *target_vec;
1958 int i;
1960 target_vec = lock_user(VERIFY_READ, target_addr,
1961 count * sizeof(struct target_iovec), 1);
1962 if (target_vec) {
1963 for (i = 0; i < count; i++) {
1964 abi_ulong base = tswapal(target_vec[i].iov_base);
1965 abi_long len = tswapal(target_vec[i].iov_len);
1966 if (len < 0) {
1967 break;
1969 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1971 unlock_user(target_vec, target_addr, 0);
1974 free(vec);
1977 static inline int target_to_host_sock_type(int *type)
1979 int host_type = 0;
1980 int target_type = *type;
1982 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1983 case TARGET_SOCK_DGRAM:
1984 host_type = SOCK_DGRAM;
1985 break;
1986 case TARGET_SOCK_STREAM:
1987 host_type = SOCK_STREAM;
1988 break;
1989 default:
1990 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1991 break;
1993 if (target_type & TARGET_SOCK_CLOEXEC) {
1994 #if defined(SOCK_CLOEXEC)
1995 host_type |= SOCK_CLOEXEC;
1996 #else
1997 return -TARGET_EINVAL;
1998 #endif
2000 if (target_type & TARGET_SOCK_NONBLOCK) {
2001 #if defined(SOCK_NONBLOCK)
2002 host_type |= SOCK_NONBLOCK;
2003 #elif !defined(O_NONBLOCK)
2004 return -TARGET_EINVAL;
2005 #endif
2007 *type = host_type;
2008 return 0;
2011 /* Try to emulate socket type flags after socket creation. */
2012 static int sock_flags_fixup(int fd, int target_type)
2014 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2015 if (target_type & TARGET_SOCK_NONBLOCK) {
2016 int flags = fcntl(fd, F_GETFL);
2017 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2018 close(fd);
2019 return -TARGET_EINVAL;
2022 #endif
2023 return fd;
2026 /* do_socket() Must return target values and target errnos. */
2027 static abi_long do_socket(int domain, int type, int protocol)
2029 int target_type = type;
2030 int ret;
2032 ret = target_to_host_sock_type(&type);
2033 if (ret) {
2034 return ret;
2037 if (domain == PF_NETLINK)
2038 return -TARGET_EAFNOSUPPORT;
2039 ret = get_errno(socket(domain, type, protocol));
2040 if (ret >= 0) {
2041 ret = sock_flags_fixup(ret, target_type);
2043 return ret;
2046 /* do_bind() Must return target values and target errnos. */
2047 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2048 socklen_t addrlen)
2050 void *addr;
2051 abi_long ret;
2053 if ((int)addrlen < 0) {
2054 return -TARGET_EINVAL;
2057 addr = alloca(addrlen+1);
2059 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2060 if (ret)
2061 return ret;
2063 return get_errno(bind(sockfd, addr, addrlen));
2066 /* do_connect() Must return target values and target errnos. */
2067 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2068 socklen_t addrlen)
2070 void *addr;
2071 abi_long ret;
2073 if ((int)addrlen < 0) {
2074 return -TARGET_EINVAL;
2077 addr = alloca(addrlen+1);
2079 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2080 if (ret)
2081 return ret;
2083 return get_errno(connect(sockfd, addr, addrlen));
2086 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2087 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2088 int flags, int send)
2090 abi_long ret, len;
2091 struct msghdr msg;
2092 int count;
2093 struct iovec *vec;
2094 abi_ulong target_vec;
2096 if (msgp->msg_name) {
2097 msg.msg_namelen = tswap32(msgp->msg_namelen);
2098 msg.msg_name = alloca(msg.msg_namelen+1);
2099 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
2100 msg.msg_namelen);
2101 if (ret) {
2102 goto out2;
2104 } else {
2105 msg.msg_name = NULL;
2106 msg.msg_namelen = 0;
2108 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2109 msg.msg_control = alloca(msg.msg_controllen);
2110 msg.msg_flags = tswap32(msgp->msg_flags);
2112 count = tswapal(msgp->msg_iovlen);
2113 target_vec = tswapal(msgp->msg_iov);
2114 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2115 target_vec, count, send);
2116 if (vec == NULL) {
2117 ret = -host_to_target_errno(errno);
2118 goto out2;
2120 msg.msg_iovlen = count;
2121 msg.msg_iov = vec;
2123 if (send) {
2124 ret = target_to_host_cmsg(&msg, msgp);
2125 if (ret == 0)
2126 ret = get_errno(sendmsg(fd, &msg, flags));
2127 } else {
2128 ret = get_errno(recvmsg(fd, &msg, flags));
2129 if (!is_error(ret)) {
2130 len = ret;
2131 ret = host_to_target_cmsg(msgp, &msg);
2132 if (!is_error(ret)) {
2133 msgp->msg_namelen = tswap32(msg.msg_namelen);
2134 if (msg.msg_name != NULL) {
2135 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2136 msg.msg_name, msg.msg_namelen);
2137 if (ret) {
2138 goto out;
2142 ret = len;
2147 out:
2148 unlock_iovec(vec, target_vec, count, !send);
2149 out2:
2150 return ret;
2153 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2154 int flags, int send)
2156 abi_long ret;
2157 struct target_msghdr *msgp;
2159 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2160 msgp,
2161 target_msg,
2162 send ? 1 : 0)) {
2163 return -TARGET_EFAULT;
2165 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2166 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2167 return ret;
2170 #ifdef TARGET_NR_sendmmsg
2171 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2172 * so it might not have this *mmsg-specific flag either.
2174 #ifndef MSG_WAITFORONE
2175 #define MSG_WAITFORONE 0x10000
2176 #endif
2178 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2179 unsigned int vlen, unsigned int flags,
2180 int send)
2182 struct target_mmsghdr *mmsgp;
2183 abi_long ret = 0;
2184 int i;
2186 if (vlen > UIO_MAXIOV) {
2187 vlen = UIO_MAXIOV;
2190 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2191 if (!mmsgp) {
2192 return -TARGET_EFAULT;
2195 for (i = 0; i < vlen; i++) {
2196 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2197 if (is_error(ret)) {
2198 break;
2200 mmsgp[i].msg_len = tswap32(ret);
2201 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2202 if (flags & MSG_WAITFORONE) {
2203 flags |= MSG_DONTWAIT;
2207 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2209 /* Return number of datagrams sent if we sent any at all;
2210 * otherwise return the error.
2212 if (i) {
2213 return i;
2215 return ret;
2217 #endif
2219 /* If we don't have a system accept4() then just call accept.
2220 * The callsites to do_accept4() will ensure that they don't
2221 * pass a non-zero flags argument in this config.
2223 #ifndef CONFIG_ACCEPT4
2224 static inline int accept4(int sockfd, struct sockaddr *addr,
2225 socklen_t *addrlen, int flags)
2227 assert(flags == 0);
2228 return accept(sockfd, addr, addrlen);
2230 #endif
2232 /* do_accept4() Must return target values and target errnos. */
2233 static abi_long do_accept4(int fd, abi_ulong target_addr,
2234 abi_ulong target_addrlen_addr, int flags)
2236 socklen_t addrlen;
2237 void *addr;
2238 abi_long ret;
2239 int host_flags;
2241 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2243 if (target_addr == 0) {
2244 return get_errno(accept4(fd, NULL, NULL, host_flags));
2247 /* linux returns EINVAL if addrlen pointer is invalid */
2248 if (get_user_u32(addrlen, target_addrlen_addr))
2249 return -TARGET_EINVAL;
2251 if ((int)addrlen < 0) {
2252 return -TARGET_EINVAL;
2255 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2256 return -TARGET_EINVAL;
2258 addr = alloca(addrlen);
2260 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2261 if (!is_error(ret)) {
2262 host_to_target_sockaddr(target_addr, addr, addrlen);
2263 if (put_user_u32(addrlen, target_addrlen_addr))
2264 ret = -TARGET_EFAULT;
2266 return ret;
2269 /* do_getpeername() Must return target values and target errnos. */
2270 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2271 abi_ulong target_addrlen_addr)
2273 socklen_t addrlen;
2274 void *addr;
2275 abi_long ret;
2277 if (get_user_u32(addrlen, target_addrlen_addr))
2278 return -TARGET_EFAULT;
2280 if ((int)addrlen < 0) {
2281 return -TARGET_EINVAL;
2284 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2285 return -TARGET_EFAULT;
2287 addr = alloca(addrlen);
2289 ret = get_errno(getpeername(fd, addr, &addrlen));
2290 if (!is_error(ret)) {
2291 host_to_target_sockaddr(target_addr, addr, addrlen);
2292 if (put_user_u32(addrlen, target_addrlen_addr))
2293 ret = -TARGET_EFAULT;
2295 return ret;
2298 /* do_getsockname() Must return target values and target errnos. */
2299 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2300 abi_ulong target_addrlen_addr)
2302 socklen_t addrlen;
2303 void *addr;
2304 abi_long ret;
2306 if (get_user_u32(addrlen, target_addrlen_addr))
2307 return -TARGET_EFAULT;
2309 if ((int)addrlen < 0) {
2310 return -TARGET_EINVAL;
2313 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2314 return -TARGET_EFAULT;
2316 addr = alloca(addrlen);
2318 ret = get_errno(getsockname(fd, addr, &addrlen));
2319 if (!is_error(ret)) {
2320 host_to_target_sockaddr(target_addr, addr, addrlen);
2321 if (put_user_u32(addrlen, target_addrlen_addr))
2322 ret = -TARGET_EFAULT;
2324 return ret;
2327 /* do_socketpair() Must return target values and target errnos. */
2328 static abi_long do_socketpair(int domain, int type, int protocol,
2329 abi_ulong target_tab_addr)
2331 int tab[2];
2332 abi_long ret;
2334 target_to_host_sock_type(&type);
2336 ret = get_errno(socketpair(domain, type, protocol, tab));
2337 if (!is_error(ret)) {
2338 if (put_user_s32(tab[0], target_tab_addr)
2339 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2340 ret = -TARGET_EFAULT;
2342 return ret;
2345 /* do_sendto() Must return target values and target errnos. */
2346 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2347 abi_ulong target_addr, socklen_t addrlen)
2349 void *addr;
2350 void *host_msg;
2351 abi_long ret;
2353 if ((int)addrlen < 0) {
2354 return -TARGET_EINVAL;
2357 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2358 if (!host_msg)
2359 return -TARGET_EFAULT;
2360 if (target_addr) {
2361 addr = alloca(addrlen+1);
2362 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2363 if (ret) {
2364 unlock_user(host_msg, msg, 0);
2365 return ret;
2367 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2368 } else {
2369 ret = get_errno(send(fd, host_msg, len, flags));
2371 unlock_user(host_msg, msg, 0);
2372 return ret;
2375 /* do_recvfrom() Must return target values and target errnos. */
2376 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2377 abi_ulong target_addr,
2378 abi_ulong target_addrlen)
2380 socklen_t addrlen;
2381 void *addr;
2382 void *host_msg;
2383 abi_long ret;
2385 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2386 if (!host_msg)
2387 return -TARGET_EFAULT;
2388 if (target_addr) {
2389 if (get_user_u32(addrlen, target_addrlen)) {
2390 ret = -TARGET_EFAULT;
2391 goto fail;
2393 if ((int)addrlen < 0) {
2394 ret = -TARGET_EINVAL;
2395 goto fail;
2397 addr = alloca(addrlen);
2398 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2399 } else {
2400 addr = NULL; /* To keep compiler quiet. */
2401 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2403 if (!is_error(ret)) {
2404 if (target_addr) {
2405 host_to_target_sockaddr(target_addr, addr, addrlen);
2406 if (put_user_u32(addrlen, target_addrlen)) {
2407 ret = -TARGET_EFAULT;
2408 goto fail;
2411 unlock_user(host_msg, msg, len);
2412 } else {
2413 fail:
2414 unlock_user(host_msg, msg, 0);
2416 return ret;
2419 #ifdef TARGET_NR_socketcall
2420 /* do_socketcall() Must return target values and target errnos. */
2421 static abi_long do_socketcall(int num, abi_ulong vptr)
2423 static const unsigned ac[] = { /* number of arguments per call */
2424 [SOCKOP_socket] = 3, /* domain, type, protocol */
2425 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2426 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2427 [SOCKOP_listen] = 2, /* sockfd, backlog */
2428 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2429 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2430 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2431 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2432 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2433 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2434 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2435 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2436 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2437 [SOCKOP_shutdown] = 2, /* sockfd, how */
2438 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2439 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2440 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2441 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2443 abi_long a[6]; /* max 6 args */
2445 /* first, collect the arguments in a[] according to ac[] */
2446 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2447 unsigned i;
2448 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2449 for (i = 0; i < ac[num]; ++i) {
2450 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2451 return -TARGET_EFAULT;
2456 /* now when we have the args, actually handle the call */
2457 switch (num) {
2458 case SOCKOP_socket: /* domain, type, protocol */
2459 return do_socket(a[0], a[1], a[2]);
2460 case SOCKOP_bind: /* sockfd, addr, addrlen */
2461 return do_bind(a[0], a[1], a[2]);
2462 case SOCKOP_connect: /* sockfd, addr, addrlen */
2463 return do_connect(a[0], a[1], a[2]);
2464 case SOCKOP_listen: /* sockfd, backlog */
2465 return get_errno(listen(a[0], a[1]));
2466 case SOCKOP_accept: /* sockfd, addr, addrlen */
2467 return do_accept4(a[0], a[1], a[2], 0);
2468 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2469 return do_accept4(a[0], a[1], a[2], a[3]);
2470 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2471 return do_getsockname(a[0], a[1], a[2]);
2472 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2473 return do_getpeername(a[0], a[1], a[2]);
2474 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2475 return do_socketpair(a[0], a[1], a[2], a[3]);
2476 case SOCKOP_send: /* sockfd, msg, len, flags */
2477 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2478 case SOCKOP_recv: /* sockfd, msg, len, flags */
2479 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2480 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2481 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2482 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2483 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2484 case SOCKOP_shutdown: /* sockfd, how */
2485 return get_errno(shutdown(a[0], a[1]));
2486 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2487 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2488 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2489 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2490 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2491 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2492 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2493 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2494 default:
2495 gemu_log("Unsupported socketcall: %d\n", num);
2496 return -TARGET_ENOSYS;
2499 #endif
2501 #define N_SHM_REGIONS 32
2503 static struct shm_region {
2504 abi_ulong start;
2505 abi_ulong size;
2506 } shm_regions[N_SHM_REGIONS];
2508 struct target_semid_ds
2510 struct target_ipc_perm sem_perm;
2511 abi_ulong sem_otime;
2512 #if !defined(TARGET_PPC64)
2513 abi_ulong __unused1;
2514 #endif
2515 abi_ulong sem_ctime;
2516 #if !defined(TARGET_PPC64)
2517 abi_ulong __unused2;
2518 #endif
2519 abi_ulong sem_nsems;
2520 abi_ulong __unused3;
2521 abi_ulong __unused4;
2524 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2525 abi_ulong target_addr)
2527 struct target_ipc_perm *target_ip;
2528 struct target_semid_ds *target_sd;
2530 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2531 return -TARGET_EFAULT;
2532 target_ip = &(target_sd->sem_perm);
2533 host_ip->__key = tswap32(target_ip->__key);
2534 host_ip->uid = tswap32(target_ip->uid);
2535 host_ip->gid = tswap32(target_ip->gid);
2536 host_ip->cuid = tswap32(target_ip->cuid);
2537 host_ip->cgid = tswap32(target_ip->cgid);
2538 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2539 host_ip->mode = tswap32(target_ip->mode);
2540 #else
2541 host_ip->mode = tswap16(target_ip->mode);
2542 #endif
2543 #if defined(TARGET_PPC)
2544 host_ip->__seq = tswap32(target_ip->__seq);
2545 #else
2546 host_ip->__seq = tswap16(target_ip->__seq);
2547 #endif
2548 unlock_user_struct(target_sd, target_addr, 0);
2549 return 0;
2552 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2553 struct ipc_perm *host_ip)
2555 struct target_ipc_perm *target_ip;
2556 struct target_semid_ds *target_sd;
2558 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2559 return -TARGET_EFAULT;
2560 target_ip = &(target_sd->sem_perm);
2561 target_ip->__key = tswap32(host_ip->__key);
2562 target_ip->uid = tswap32(host_ip->uid);
2563 target_ip->gid = tswap32(host_ip->gid);
2564 target_ip->cuid = tswap32(host_ip->cuid);
2565 target_ip->cgid = tswap32(host_ip->cgid);
2566 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2567 target_ip->mode = tswap32(host_ip->mode);
2568 #else
2569 target_ip->mode = tswap16(host_ip->mode);
2570 #endif
2571 #if defined(TARGET_PPC)
2572 target_ip->__seq = tswap32(host_ip->__seq);
2573 #else
2574 target_ip->__seq = tswap16(host_ip->__seq);
2575 #endif
2576 unlock_user_struct(target_sd, target_addr, 1);
2577 return 0;
2580 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2581 abi_ulong target_addr)
2583 struct target_semid_ds *target_sd;
2585 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2586 return -TARGET_EFAULT;
2587 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2588 return -TARGET_EFAULT;
2589 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2590 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2591 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2592 unlock_user_struct(target_sd, target_addr, 0);
2593 return 0;
2596 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2597 struct semid_ds *host_sd)
2599 struct target_semid_ds *target_sd;
2601 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2602 return -TARGET_EFAULT;
2603 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2604 return -TARGET_EFAULT;
2605 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2606 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2607 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2608 unlock_user_struct(target_sd, target_addr, 1);
2609 return 0;
2612 struct target_seminfo {
2613 int semmap;
2614 int semmni;
2615 int semmns;
2616 int semmnu;
2617 int semmsl;
2618 int semopm;
2619 int semume;
2620 int semusz;
2621 int semvmx;
2622 int semaem;
2625 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2626 struct seminfo *host_seminfo)
2628 struct target_seminfo *target_seminfo;
2629 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2630 return -TARGET_EFAULT;
2631 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2632 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2633 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2634 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2635 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2636 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2637 __put_user(host_seminfo->semume, &target_seminfo->semume);
2638 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2639 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2640 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2641 unlock_user_struct(target_seminfo, target_addr, 1);
2642 return 0;
2645 union semun {
2646 int val;
2647 struct semid_ds *buf;
2648 unsigned short *array;
2649 struct seminfo *__buf;
2652 union target_semun {
2653 int val;
2654 abi_ulong buf;
2655 abi_ulong array;
2656 abi_ulong __buf;
2659 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2660 abi_ulong target_addr)
2662 int nsems;
2663 unsigned short *array;
2664 union semun semun;
2665 struct semid_ds semid_ds;
2666 int i, ret;
2668 semun.buf = &semid_ds;
2670 ret = semctl(semid, 0, IPC_STAT, semun);
2671 if (ret == -1)
2672 return get_errno(ret);
2674 nsems = semid_ds.sem_nsems;
2676 *host_array = malloc(nsems*sizeof(unsigned short));
2677 if (!*host_array) {
2678 return -TARGET_ENOMEM;
2680 array = lock_user(VERIFY_READ, target_addr,
2681 nsems*sizeof(unsigned short), 1);
2682 if (!array) {
2683 free(*host_array);
2684 return -TARGET_EFAULT;
2687 for(i=0; i<nsems; i++) {
2688 __get_user((*host_array)[i], &array[i]);
2690 unlock_user(array, target_addr, 0);
2692 return 0;
2695 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2696 unsigned short **host_array)
2698 int nsems;
2699 unsigned short *array;
2700 union semun semun;
2701 struct semid_ds semid_ds;
2702 int i, ret;
2704 semun.buf = &semid_ds;
2706 ret = semctl(semid, 0, IPC_STAT, semun);
2707 if (ret == -1)
2708 return get_errno(ret);
2710 nsems = semid_ds.sem_nsems;
2712 array = lock_user(VERIFY_WRITE, target_addr,
2713 nsems*sizeof(unsigned short), 0);
2714 if (!array)
2715 return -TARGET_EFAULT;
2717 for(i=0; i<nsems; i++) {
2718 __put_user((*host_array)[i], &array[i]);
2720 free(*host_array);
2721 unlock_user(array, target_addr, 1);
2723 return 0;
2726 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2727 union target_semun target_su)
2729 union semun arg;
2730 struct semid_ds dsarg;
2731 unsigned short *array = NULL;
2732 struct seminfo seminfo;
2733 abi_long ret = -TARGET_EINVAL;
2734 abi_long err;
2735 cmd &= 0xff;
2737 switch( cmd ) {
2738 case GETVAL:
2739 case SETVAL:
2740 /* In 64 bit cross-endian situations, we will erroneously pick up
2741 * the wrong half of the union for the "val" element. To rectify
2742 * this, the entire 8-byte structure is byteswapped, followed by
2743 * a swap of the 4 byte val field. In other cases, the data is
2744 * already in proper host byte order. */
2745 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2746 target_su.buf = tswapal(target_su.buf);
2747 arg.val = tswap32(target_su.val);
2748 } else {
2749 arg.val = target_su.val;
2751 ret = get_errno(semctl(semid, semnum, cmd, arg));
2752 break;
2753 case GETALL:
2754 case SETALL:
2755 err = target_to_host_semarray(semid, &array, target_su.array);
2756 if (err)
2757 return err;
2758 arg.array = array;
2759 ret = get_errno(semctl(semid, semnum, cmd, arg));
2760 err = host_to_target_semarray(semid, target_su.array, &array);
2761 if (err)
2762 return err;
2763 break;
2764 case IPC_STAT:
2765 case IPC_SET:
2766 case SEM_STAT:
2767 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2768 if (err)
2769 return err;
2770 arg.buf = &dsarg;
2771 ret = get_errno(semctl(semid, semnum, cmd, arg));
2772 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2773 if (err)
2774 return err;
2775 break;
2776 case IPC_INFO:
2777 case SEM_INFO:
2778 arg.__buf = &seminfo;
2779 ret = get_errno(semctl(semid, semnum, cmd, arg));
2780 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2781 if (err)
2782 return err;
2783 break;
2784 case IPC_RMID:
2785 case GETPID:
2786 case GETNCNT:
2787 case GETZCNT:
2788 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2789 break;
2792 return ret;
2795 struct target_sembuf {
2796 unsigned short sem_num;
2797 short sem_op;
2798 short sem_flg;
2801 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2802 abi_ulong target_addr,
2803 unsigned nsops)
2805 struct target_sembuf *target_sembuf;
2806 int i;
2808 target_sembuf = lock_user(VERIFY_READ, target_addr,
2809 nsops*sizeof(struct target_sembuf), 1);
2810 if (!target_sembuf)
2811 return -TARGET_EFAULT;
2813 for(i=0; i<nsops; i++) {
2814 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2815 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2816 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2819 unlock_user(target_sembuf, target_addr, 0);
2821 return 0;
2824 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2826 struct sembuf sops[nsops];
2828 if (target_to_host_sembuf(sops, ptr, nsops))
2829 return -TARGET_EFAULT;
2831 return get_errno(semop(semid, sops, nsops));
2834 struct target_msqid_ds
2836 struct target_ipc_perm msg_perm;
2837 abi_ulong msg_stime;
2838 #if TARGET_ABI_BITS == 32
2839 abi_ulong __unused1;
2840 #endif
2841 abi_ulong msg_rtime;
2842 #if TARGET_ABI_BITS == 32
2843 abi_ulong __unused2;
2844 #endif
2845 abi_ulong msg_ctime;
2846 #if TARGET_ABI_BITS == 32
2847 abi_ulong __unused3;
2848 #endif
2849 abi_ulong __msg_cbytes;
2850 abi_ulong msg_qnum;
2851 abi_ulong msg_qbytes;
2852 abi_ulong msg_lspid;
2853 abi_ulong msg_lrpid;
2854 abi_ulong __unused4;
2855 abi_ulong __unused5;
2858 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2859 abi_ulong target_addr)
2861 struct target_msqid_ds *target_md;
2863 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2864 return -TARGET_EFAULT;
2865 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2866 return -TARGET_EFAULT;
2867 host_md->msg_stime = tswapal(target_md->msg_stime);
2868 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2869 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2870 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2871 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2872 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2873 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2874 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2875 unlock_user_struct(target_md, target_addr, 0);
2876 return 0;
2879 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2880 struct msqid_ds *host_md)
2882 struct target_msqid_ds *target_md;
2884 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2885 return -TARGET_EFAULT;
2886 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2887 return -TARGET_EFAULT;
2888 target_md->msg_stime = tswapal(host_md->msg_stime);
2889 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2890 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2891 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2892 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2893 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2894 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2895 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2896 unlock_user_struct(target_md, target_addr, 1);
2897 return 0;
2900 struct target_msginfo {
2901 int msgpool;
2902 int msgmap;
2903 int msgmax;
2904 int msgmnb;
2905 int msgmni;
2906 int msgssz;
2907 int msgtql;
2908 unsigned short int msgseg;
2911 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2912 struct msginfo *host_msginfo)
2914 struct target_msginfo *target_msginfo;
2915 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2916 return -TARGET_EFAULT;
2917 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2918 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2919 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2920 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2921 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2922 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2923 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2924 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2925 unlock_user_struct(target_msginfo, target_addr, 1);
2926 return 0;
2929 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2931 struct msqid_ds dsarg;
2932 struct msginfo msginfo;
2933 abi_long ret = -TARGET_EINVAL;
2935 cmd &= 0xff;
2937 switch (cmd) {
2938 case IPC_STAT:
2939 case IPC_SET:
2940 case MSG_STAT:
2941 if (target_to_host_msqid_ds(&dsarg,ptr))
2942 return -TARGET_EFAULT;
2943 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2944 if (host_to_target_msqid_ds(ptr,&dsarg))
2945 return -TARGET_EFAULT;
2946 break;
2947 case IPC_RMID:
2948 ret = get_errno(msgctl(msgid, cmd, NULL));
2949 break;
2950 case IPC_INFO:
2951 case MSG_INFO:
2952 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2953 if (host_to_target_msginfo(ptr, &msginfo))
2954 return -TARGET_EFAULT;
2955 break;
2958 return ret;
2961 struct target_msgbuf {
2962 abi_long mtype;
2963 char mtext[1];
2966 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2967 ssize_t msgsz, int msgflg)
2969 struct target_msgbuf *target_mb;
2970 struct msgbuf *host_mb;
2971 abi_long ret = 0;
2973 if (msgsz < 0) {
2974 return -TARGET_EINVAL;
2977 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2978 return -TARGET_EFAULT;
2979 host_mb = malloc(msgsz+sizeof(long));
2980 if (!host_mb) {
2981 unlock_user_struct(target_mb, msgp, 0);
2982 return -TARGET_ENOMEM;
2984 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2985 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2986 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2987 free(host_mb);
2988 unlock_user_struct(target_mb, msgp, 0);
2990 return ret;
2993 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2994 unsigned int msgsz, abi_long msgtyp,
2995 int msgflg)
2997 struct target_msgbuf *target_mb;
2998 char *target_mtext;
2999 struct msgbuf *host_mb;
3000 abi_long ret = 0;
3002 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3003 return -TARGET_EFAULT;
3005 host_mb = g_malloc(msgsz+sizeof(long));
3006 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3008 if (ret > 0) {
3009 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3010 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3011 if (!target_mtext) {
3012 ret = -TARGET_EFAULT;
3013 goto end;
3015 memcpy(target_mb->mtext, host_mb->mtext, ret);
3016 unlock_user(target_mtext, target_mtext_addr, ret);
3019 target_mb->mtype = tswapal(host_mb->mtype);
3021 end:
3022 if (target_mb)
3023 unlock_user_struct(target_mb, msgp, 1);
3024 g_free(host_mb);
3025 return ret;
3028 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3029 abi_ulong target_addr)
3031 struct target_shmid_ds *target_sd;
3033 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3034 return -TARGET_EFAULT;
3035 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3036 return -TARGET_EFAULT;
3037 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3038 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3039 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3040 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3041 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3042 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3043 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3044 unlock_user_struct(target_sd, target_addr, 0);
3045 return 0;
3048 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3049 struct shmid_ds *host_sd)
3051 struct target_shmid_ds *target_sd;
3053 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3054 return -TARGET_EFAULT;
3055 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3056 return -TARGET_EFAULT;
3057 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3058 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3059 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3060 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3061 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3062 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3063 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3064 unlock_user_struct(target_sd, target_addr, 1);
3065 return 0;
3068 struct target_shminfo {
3069 abi_ulong shmmax;
3070 abi_ulong shmmin;
3071 abi_ulong shmmni;
3072 abi_ulong shmseg;
3073 abi_ulong shmall;
3076 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3077 struct shminfo *host_shminfo)
3079 struct target_shminfo *target_shminfo;
3080 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3081 return -TARGET_EFAULT;
3082 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3083 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3084 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3085 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3086 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3087 unlock_user_struct(target_shminfo, target_addr, 1);
3088 return 0;
3091 struct target_shm_info {
3092 int used_ids;
3093 abi_ulong shm_tot;
3094 abi_ulong shm_rss;
3095 abi_ulong shm_swp;
3096 abi_ulong swap_attempts;
3097 abi_ulong swap_successes;
3100 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3101 struct shm_info *host_shm_info)
3103 struct target_shm_info *target_shm_info;
3104 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3105 return -TARGET_EFAULT;
3106 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3107 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3108 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3109 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3110 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3111 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3112 unlock_user_struct(target_shm_info, target_addr, 1);
3113 return 0;
3116 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3118 struct shmid_ds dsarg;
3119 struct shminfo shminfo;
3120 struct shm_info shm_info;
3121 abi_long ret = -TARGET_EINVAL;
3123 cmd &= 0xff;
3125 switch(cmd) {
3126 case IPC_STAT:
3127 case IPC_SET:
3128 case SHM_STAT:
3129 if (target_to_host_shmid_ds(&dsarg, buf))
3130 return -TARGET_EFAULT;
3131 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3132 if (host_to_target_shmid_ds(buf, &dsarg))
3133 return -TARGET_EFAULT;
3134 break;
3135 case IPC_INFO:
3136 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3137 if (host_to_target_shminfo(buf, &shminfo))
3138 return -TARGET_EFAULT;
3139 break;
3140 case SHM_INFO:
3141 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3142 if (host_to_target_shm_info(buf, &shm_info))
3143 return -TARGET_EFAULT;
3144 break;
3145 case IPC_RMID:
3146 case SHM_LOCK:
3147 case SHM_UNLOCK:
3148 ret = get_errno(shmctl(shmid, cmd, NULL));
3149 break;
3152 return ret;
3155 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3157 abi_long raddr;
3158 void *host_raddr;
3159 struct shmid_ds shm_info;
3160 int i,ret;
3162 /* find out the length of the shared memory segment */
3163 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3164 if (is_error(ret)) {
3165 /* can't get length, bail out */
3166 return ret;
3169 mmap_lock();
3171 if (shmaddr)
3172 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3173 else {
3174 abi_ulong mmap_start;
3176 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3178 if (mmap_start == -1) {
3179 errno = ENOMEM;
3180 host_raddr = (void *)-1;
3181 } else
3182 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3185 if (host_raddr == (void *)-1) {
3186 mmap_unlock();
3187 return get_errno((long)host_raddr);
3189 raddr=h2g((unsigned long)host_raddr);
3191 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3192 PAGE_VALID | PAGE_READ |
3193 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3195 for (i = 0; i < N_SHM_REGIONS; i++) {
3196 if (shm_regions[i].start == 0) {
3197 shm_regions[i].start = raddr;
3198 shm_regions[i].size = shm_info.shm_segsz;
3199 break;
3203 mmap_unlock();
3204 return raddr;
3208 static inline abi_long do_shmdt(abi_ulong shmaddr)
3210 int i;
3212 for (i = 0; i < N_SHM_REGIONS; ++i) {
3213 if (shm_regions[i].start == shmaddr) {
3214 shm_regions[i].start = 0;
3215 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3216 break;
3220 return get_errno(shmdt(g2h(shmaddr)));
3223 #ifdef TARGET_NR_ipc
3224 /* ??? This only works with linear mappings. */
3225 /* do_ipc() must return target values and target errnos. */
3226 static abi_long do_ipc(unsigned int call, abi_long first,
3227 abi_long second, abi_long third,
3228 abi_long ptr, abi_long fifth)
3230 int version;
3231 abi_long ret = 0;
3233 version = call >> 16;
3234 call &= 0xffff;
3236 switch (call) {
3237 case IPCOP_semop:
3238 ret = do_semop(first, ptr, second);
3239 break;
3241 case IPCOP_semget:
3242 ret = get_errno(semget(first, second, third));
3243 break;
3245 case IPCOP_semctl: {
3246 /* The semun argument to semctl is passed by value, so dereference the
3247 * ptr argument. */
3248 abi_ulong atptr;
3249 get_user_ual(atptr, ptr);
3250 ret = do_semctl(first, second, third,
3251 (union target_semun) atptr);
3252 break;
3255 case IPCOP_msgget:
3256 ret = get_errno(msgget(first, second));
3257 break;
3259 case IPCOP_msgsnd:
3260 ret = do_msgsnd(first, ptr, second, third);
3261 break;
3263 case IPCOP_msgctl:
3264 ret = do_msgctl(first, second, ptr);
3265 break;
3267 case IPCOP_msgrcv:
3268 switch (version) {
3269 case 0:
3271 struct target_ipc_kludge {
3272 abi_long msgp;
3273 abi_long msgtyp;
3274 } *tmp;
3276 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3277 ret = -TARGET_EFAULT;
3278 break;
3281 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3283 unlock_user_struct(tmp, ptr, 0);
3284 break;
3286 default:
3287 ret = do_msgrcv(first, ptr, second, fifth, third);
3289 break;
3291 case IPCOP_shmat:
3292 switch (version) {
3293 default:
3295 abi_ulong raddr;
3296 raddr = do_shmat(first, ptr, second);
3297 if (is_error(raddr))
3298 return get_errno(raddr);
3299 if (put_user_ual(raddr, third))
3300 return -TARGET_EFAULT;
3301 break;
3303 case 1:
3304 ret = -TARGET_EINVAL;
3305 break;
3307 break;
3308 case IPCOP_shmdt:
3309 ret = do_shmdt(ptr);
3310 break;
3312 case IPCOP_shmget:
3313 /* IPC_* flag values are the same on all linux platforms */
3314 ret = get_errno(shmget(first, second, third));
3315 break;
3317 /* IPC_* and SHM_* command values are the same on all linux platforms */
3318 case IPCOP_shmctl:
3319 ret = do_shmctl(first, second, ptr);
3320 break;
3321 default:
3322 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3323 ret = -TARGET_ENOSYS;
3324 break;
3326 return ret;
3328 #endif
3330 /* kernel structure types definitions */
3332 #define STRUCT(name, ...) STRUCT_ ## name,
3333 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3334 enum {
3335 #include "syscall_types.h"
3336 STRUCT_MAX
3338 #undef STRUCT
3339 #undef STRUCT_SPECIAL
3341 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3342 #define STRUCT_SPECIAL(name)
3343 #include "syscall_types.h"
3344 #undef STRUCT
3345 #undef STRUCT_SPECIAL
3347 typedef struct IOCTLEntry IOCTLEntry;
3349 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3350 int fd, int cmd, abi_long arg);
3352 struct IOCTLEntry {
3353 int target_cmd;
3354 unsigned int host_cmd;
3355 const char *name;
3356 int access;
3357 do_ioctl_fn *do_ioctl;
3358 const argtype arg_type[5];
3361 #define IOC_R 0x0001
3362 #define IOC_W 0x0002
3363 #define IOC_RW (IOC_R | IOC_W)
3365 #define MAX_STRUCT_SIZE 4096
3367 #ifdef CONFIG_FIEMAP
3368 /* So fiemap access checks don't overflow on 32 bit systems.
3369 * This is very slightly smaller than the limit imposed by
3370 * the underlying kernel.
3372 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3373 / sizeof(struct fiemap_extent))
3375 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3376 int fd, int cmd, abi_long arg)
3378 /* The parameter for this ioctl is a struct fiemap followed
3379 * by an array of struct fiemap_extent whose size is set
3380 * in fiemap->fm_extent_count. The array is filled in by the
3381 * ioctl.
3383 int target_size_in, target_size_out;
3384 struct fiemap *fm;
3385 const argtype *arg_type = ie->arg_type;
3386 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3387 void *argptr, *p;
3388 abi_long ret;
3389 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3390 uint32_t outbufsz;
3391 int free_fm = 0;
3393 assert(arg_type[0] == TYPE_PTR);
3394 assert(ie->access == IOC_RW);
3395 arg_type++;
3396 target_size_in = thunk_type_size(arg_type, 0);
3397 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3398 if (!argptr) {
3399 return -TARGET_EFAULT;
3401 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3402 unlock_user(argptr, arg, 0);
3403 fm = (struct fiemap *)buf_temp;
3404 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3405 return -TARGET_EINVAL;
3408 outbufsz = sizeof (*fm) +
3409 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3411 if (outbufsz > MAX_STRUCT_SIZE) {
3412 /* We can't fit all the extents into the fixed size buffer.
3413 * Allocate one that is large enough and use it instead.
3415 fm = malloc(outbufsz);
3416 if (!fm) {
3417 return -TARGET_ENOMEM;
3419 memcpy(fm, buf_temp, sizeof(struct fiemap));
3420 free_fm = 1;
3422 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3423 if (!is_error(ret)) {
3424 target_size_out = target_size_in;
3425 /* An extent_count of 0 means we were only counting the extents
3426 * so there are no structs to copy
3428 if (fm->fm_extent_count != 0) {
3429 target_size_out += fm->fm_mapped_extents * extent_size;
3431 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3432 if (!argptr) {
3433 ret = -TARGET_EFAULT;
3434 } else {
3435 /* Convert the struct fiemap */
3436 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3437 if (fm->fm_extent_count != 0) {
3438 p = argptr + target_size_in;
3439 /* ...and then all the struct fiemap_extents */
3440 for (i = 0; i < fm->fm_mapped_extents; i++) {
3441 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3442 THUNK_TARGET);
3443 p += extent_size;
3446 unlock_user(argptr, arg, target_size_out);
3449 if (free_fm) {
3450 free(fm);
3452 return ret;
3454 #endif
3456 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3457 int fd, int cmd, abi_long arg)
3459 const argtype *arg_type = ie->arg_type;
3460 int target_size;
3461 void *argptr;
3462 int ret;
3463 struct ifconf *host_ifconf;
3464 uint32_t outbufsz;
3465 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3466 int target_ifreq_size;
3467 int nb_ifreq;
3468 int free_buf = 0;
3469 int i;
3470 int target_ifc_len;
3471 abi_long target_ifc_buf;
3472 int host_ifc_len;
3473 char *host_ifc_buf;
3475 assert(arg_type[0] == TYPE_PTR);
3476 assert(ie->access == IOC_RW);
3478 arg_type++;
3479 target_size = thunk_type_size(arg_type, 0);
3481 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3482 if (!argptr)
3483 return -TARGET_EFAULT;
3484 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3485 unlock_user(argptr, arg, 0);
3487 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3488 target_ifc_len = host_ifconf->ifc_len;
3489 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3491 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3492 nb_ifreq = target_ifc_len / target_ifreq_size;
3493 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3495 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3496 if (outbufsz > MAX_STRUCT_SIZE) {
3497 /* We can't fit all the extents into the fixed size buffer.
3498 * Allocate one that is large enough and use it instead.
3500 host_ifconf = malloc(outbufsz);
3501 if (!host_ifconf) {
3502 return -TARGET_ENOMEM;
3504 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3505 free_buf = 1;
3507 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3509 host_ifconf->ifc_len = host_ifc_len;
3510 host_ifconf->ifc_buf = host_ifc_buf;
3512 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3513 if (!is_error(ret)) {
3514 /* convert host ifc_len to target ifc_len */
3516 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3517 target_ifc_len = nb_ifreq * target_ifreq_size;
3518 host_ifconf->ifc_len = target_ifc_len;
3520 /* restore target ifc_buf */
3522 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3524 /* copy struct ifconf to target user */
3526 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3527 if (!argptr)
3528 return -TARGET_EFAULT;
3529 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3530 unlock_user(argptr, arg, target_size);
3532 /* copy ifreq[] to target user */
3534 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3535 for (i = 0; i < nb_ifreq ; i++) {
3536 thunk_convert(argptr + i * target_ifreq_size,
3537 host_ifc_buf + i * sizeof(struct ifreq),
3538 ifreq_arg_type, THUNK_TARGET);
3540 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3543 if (free_buf) {
3544 free(host_ifconf);
3547 return ret;
3550 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3551 int cmd, abi_long arg)
3553 void *argptr;
3554 struct dm_ioctl *host_dm;
3555 abi_long guest_data;
3556 uint32_t guest_data_size;
3557 int target_size;
3558 const argtype *arg_type = ie->arg_type;
3559 abi_long ret;
3560 void *big_buf = NULL;
3561 char *host_data;
3563 arg_type++;
3564 target_size = thunk_type_size(arg_type, 0);
3565 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3566 if (!argptr) {
3567 ret = -TARGET_EFAULT;
3568 goto out;
3570 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3571 unlock_user(argptr, arg, 0);
3573 /* buf_temp is too small, so fetch things into a bigger buffer */
3574 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3575 memcpy(big_buf, buf_temp, target_size);
3576 buf_temp = big_buf;
3577 host_dm = big_buf;
3579 guest_data = arg + host_dm->data_start;
3580 if ((guest_data - arg) < 0) {
3581 ret = -EINVAL;
3582 goto out;
3584 guest_data_size = host_dm->data_size - host_dm->data_start;
3585 host_data = (char*)host_dm + host_dm->data_start;
3587 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3588 switch (ie->host_cmd) {
3589 case DM_REMOVE_ALL:
3590 case DM_LIST_DEVICES:
3591 case DM_DEV_CREATE:
3592 case DM_DEV_REMOVE:
3593 case DM_DEV_SUSPEND:
3594 case DM_DEV_STATUS:
3595 case DM_DEV_WAIT:
3596 case DM_TABLE_STATUS:
3597 case DM_TABLE_CLEAR:
3598 case DM_TABLE_DEPS:
3599 case DM_LIST_VERSIONS:
3600 /* no input data */
3601 break;
3602 case DM_DEV_RENAME:
3603 case DM_DEV_SET_GEOMETRY:
3604 /* data contains only strings */
3605 memcpy(host_data, argptr, guest_data_size);
3606 break;
3607 case DM_TARGET_MSG:
3608 memcpy(host_data, argptr, guest_data_size);
3609 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3610 break;
3611 case DM_TABLE_LOAD:
3613 void *gspec = argptr;
3614 void *cur_data = host_data;
3615 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3616 int spec_size = thunk_type_size(arg_type, 0);
3617 int i;
3619 for (i = 0; i < host_dm->target_count; i++) {
3620 struct dm_target_spec *spec = cur_data;
3621 uint32_t next;
3622 int slen;
3624 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3625 slen = strlen((char*)gspec + spec_size) + 1;
3626 next = spec->next;
3627 spec->next = sizeof(*spec) + slen;
3628 strcpy((char*)&spec[1], gspec + spec_size);
3629 gspec += next;
3630 cur_data += spec->next;
3632 break;
3634 default:
3635 ret = -TARGET_EINVAL;
3636 unlock_user(argptr, guest_data, 0);
3637 goto out;
3639 unlock_user(argptr, guest_data, 0);
3641 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3642 if (!is_error(ret)) {
3643 guest_data = arg + host_dm->data_start;
3644 guest_data_size = host_dm->data_size - host_dm->data_start;
3645 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3646 switch (ie->host_cmd) {
3647 case DM_REMOVE_ALL:
3648 case DM_DEV_CREATE:
3649 case DM_DEV_REMOVE:
3650 case DM_DEV_RENAME:
3651 case DM_DEV_SUSPEND:
3652 case DM_DEV_STATUS:
3653 case DM_TABLE_LOAD:
3654 case DM_TABLE_CLEAR:
3655 case DM_TARGET_MSG:
3656 case DM_DEV_SET_GEOMETRY:
3657 /* no return data */
3658 break;
3659 case DM_LIST_DEVICES:
3661 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3662 uint32_t remaining_data = guest_data_size;
3663 void *cur_data = argptr;
3664 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3665 int nl_size = 12; /* can't use thunk_size due to alignment */
3667 while (1) {
3668 uint32_t next = nl->next;
3669 if (next) {
3670 nl->next = nl_size + (strlen(nl->name) + 1);
3672 if (remaining_data < nl->next) {
3673 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3674 break;
3676 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3677 strcpy(cur_data + nl_size, nl->name);
3678 cur_data += nl->next;
3679 remaining_data -= nl->next;
3680 if (!next) {
3681 break;
3683 nl = (void*)nl + next;
3685 break;
3687 case DM_DEV_WAIT:
3688 case DM_TABLE_STATUS:
3690 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3691 void *cur_data = argptr;
3692 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3693 int spec_size = thunk_type_size(arg_type, 0);
3694 int i;
3696 for (i = 0; i < host_dm->target_count; i++) {
3697 uint32_t next = spec->next;
3698 int slen = strlen((char*)&spec[1]) + 1;
3699 spec->next = (cur_data - argptr) + spec_size + slen;
3700 if (guest_data_size < spec->next) {
3701 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3702 break;
3704 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3705 strcpy(cur_data + spec_size, (char*)&spec[1]);
3706 cur_data = argptr + spec->next;
3707 spec = (void*)host_dm + host_dm->data_start + next;
3709 break;
3711 case DM_TABLE_DEPS:
3713 void *hdata = (void*)host_dm + host_dm->data_start;
3714 int count = *(uint32_t*)hdata;
3715 uint64_t *hdev = hdata + 8;
3716 uint64_t *gdev = argptr + 8;
3717 int i;
3719 *(uint32_t*)argptr = tswap32(count);
3720 for (i = 0; i < count; i++) {
3721 *gdev = tswap64(*hdev);
3722 gdev++;
3723 hdev++;
3725 break;
3727 case DM_LIST_VERSIONS:
3729 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3730 uint32_t remaining_data = guest_data_size;
3731 void *cur_data = argptr;
3732 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3733 int vers_size = thunk_type_size(arg_type, 0);
3735 while (1) {
3736 uint32_t next = vers->next;
3737 if (next) {
3738 vers->next = vers_size + (strlen(vers->name) + 1);
3740 if (remaining_data < vers->next) {
3741 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3742 break;
3744 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3745 strcpy(cur_data + vers_size, vers->name);
3746 cur_data += vers->next;
3747 remaining_data -= vers->next;
3748 if (!next) {
3749 break;
3751 vers = (void*)vers + next;
3753 break;
3755 default:
3756 unlock_user(argptr, guest_data, 0);
3757 ret = -TARGET_EINVAL;
3758 goto out;
3760 unlock_user(argptr, guest_data, guest_data_size);
3762 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3763 if (!argptr) {
3764 ret = -TARGET_EFAULT;
3765 goto out;
3767 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3768 unlock_user(argptr, arg, target_size);
3770 out:
3771 g_free(big_buf);
3772 return ret;
3775 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3776 int cmd, abi_long arg)
3778 void *argptr;
3779 int target_size;
3780 const argtype *arg_type = ie->arg_type;
3781 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3782 abi_long ret;
3784 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3785 struct blkpg_partition host_part;
3787 /* Read and convert blkpg */
3788 arg_type++;
3789 target_size = thunk_type_size(arg_type, 0);
3790 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3791 if (!argptr) {
3792 ret = -TARGET_EFAULT;
3793 goto out;
3795 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3796 unlock_user(argptr, arg, 0);
3798 switch (host_blkpg->op) {
3799 case BLKPG_ADD_PARTITION:
3800 case BLKPG_DEL_PARTITION:
3801 /* payload is struct blkpg_partition */
3802 break;
3803 default:
3804 /* Unknown opcode */
3805 ret = -TARGET_EINVAL;
3806 goto out;
3809 /* Read and convert blkpg->data */
3810 arg = (abi_long)(uintptr_t)host_blkpg->data;
3811 target_size = thunk_type_size(part_arg_type, 0);
3812 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3813 if (!argptr) {
3814 ret = -TARGET_EFAULT;
3815 goto out;
3817 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3818 unlock_user(argptr, arg, 0);
3820 /* Swizzle the data pointer to our local copy and call! */
3821 host_blkpg->data = &host_part;
3822 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3824 out:
3825 return ret;
3828 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3829 int fd, int cmd, abi_long arg)
3831 const argtype *arg_type = ie->arg_type;
3832 const StructEntry *se;
3833 const argtype *field_types;
3834 const int *dst_offsets, *src_offsets;
3835 int target_size;
3836 void *argptr;
3837 abi_ulong *target_rt_dev_ptr;
3838 unsigned long *host_rt_dev_ptr;
3839 abi_long ret;
3840 int i;
3842 assert(ie->access == IOC_W);
3843 assert(*arg_type == TYPE_PTR);
3844 arg_type++;
3845 assert(*arg_type == TYPE_STRUCT);
3846 target_size = thunk_type_size(arg_type, 0);
3847 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3848 if (!argptr) {
3849 return -TARGET_EFAULT;
3851 arg_type++;
3852 assert(*arg_type == (int)STRUCT_rtentry);
3853 se = struct_entries + *arg_type++;
3854 assert(se->convert[0] == NULL);
3855 /* convert struct here to be able to catch rt_dev string */
3856 field_types = se->field_types;
3857 dst_offsets = se->field_offsets[THUNK_HOST];
3858 src_offsets = se->field_offsets[THUNK_TARGET];
3859 for (i = 0; i < se->nb_fields; i++) {
3860 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3861 assert(*field_types == TYPE_PTRVOID);
3862 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3863 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3864 if (*target_rt_dev_ptr != 0) {
3865 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3866 tswapal(*target_rt_dev_ptr));
3867 if (!*host_rt_dev_ptr) {
3868 unlock_user(argptr, arg, 0);
3869 return -TARGET_EFAULT;
3871 } else {
3872 *host_rt_dev_ptr = 0;
3874 field_types++;
3875 continue;
3877 field_types = thunk_convert(buf_temp + dst_offsets[i],
3878 argptr + src_offsets[i],
3879 field_types, THUNK_HOST);
3881 unlock_user(argptr, arg, 0);
3883 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3884 if (*host_rt_dev_ptr != 0) {
3885 unlock_user((void *)*host_rt_dev_ptr,
3886 *target_rt_dev_ptr, 0);
3888 return ret;
3891 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3892 int fd, int cmd, abi_long arg)
3894 int sig = target_to_host_signal(arg);
3895 return get_errno(ioctl(fd, ie->host_cmd, sig));
3898 static IOCTLEntry ioctl_entries[] = {
3899 #define IOCTL(cmd, access, ...) \
3900 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3901 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3902 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3903 #include "ioctls.h"
3904 { 0, 0, },
3907 /* ??? Implement proper locking for ioctls. */
3908 /* do_ioctl() Must return target values and target errnos. */
3909 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
3911 const IOCTLEntry *ie;
3912 const argtype *arg_type;
3913 abi_long ret;
3914 uint8_t buf_temp[MAX_STRUCT_SIZE];
3915 int target_size;
3916 void *argptr;
3918 ie = ioctl_entries;
3919 for(;;) {
3920 if (ie->target_cmd == 0) {
3921 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3922 return -TARGET_ENOSYS;
3924 if (ie->target_cmd == cmd)
3925 break;
3926 ie++;
3928 arg_type = ie->arg_type;
3929 #if defined(DEBUG)
3930 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3931 #endif
3932 if (ie->do_ioctl) {
3933 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3936 switch(arg_type[0]) {
3937 case TYPE_NULL:
3938 /* no argument */
3939 ret = get_errno(ioctl(fd, ie->host_cmd));
3940 break;
3941 case TYPE_PTRVOID:
3942 case TYPE_INT:
3943 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3944 break;
3945 case TYPE_PTR:
3946 arg_type++;
3947 target_size = thunk_type_size(arg_type, 0);
3948 switch(ie->access) {
3949 case IOC_R:
3950 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3951 if (!is_error(ret)) {
3952 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3953 if (!argptr)
3954 return -TARGET_EFAULT;
3955 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3956 unlock_user(argptr, arg, target_size);
3958 break;
3959 case IOC_W:
3960 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3961 if (!argptr)
3962 return -TARGET_EFAULT;
3963 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3964 unlock_user(argptr, arg, 0);
3965 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3966 break;
3967 default:
3968 case IOC_RW:
3969 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3970 if (!argptr)
3971 return -TARGET_EFAULT;
3972 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3973 unlock_user(argptr, arg, 0);
3974 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3975 if (!is_error(ret)) {
3976 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3977 if (!argptr)
3978 return -TARGET_EFAULT;
3979 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3980 unlock_user(argptr, arg, target_size);
3982 break;
3984 break;
3985 default:
3986 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3987 (long)cmd, arg_type[0]);
3988 ret = -TARGET_ENOSYS;
3989 break;
3991 return ret;
3994 static const bitmask_transtbl iflag_tbl[] = {
3995 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3996 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3997 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3998 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3999 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4000 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4001 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4002 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4003 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4004 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4005 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4006 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4007 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4008 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4009 { 0, 0, 0, 0 }
4012 static const bitmask_transtbl oflag_tbl[] = {
4013 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4014 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4015 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4016 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4017 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4018 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4019 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4020 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4021 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4022 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4023 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4024 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4025 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4026 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4027 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4028 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4029 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4030 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4031 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4032 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4033 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4034 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4035 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4036 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4037 { 0, 0, 0, 0 }
4040 static const bitmask_transtbl cflag_tbl[] = {
4041 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4042 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4043 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4044 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4045 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4046 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4047 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4048 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4049 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4050 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4051 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4052 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4053 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4054 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4055 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4056 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4057 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4058 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4059 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4060 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4061 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4062 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4063 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4064 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4065 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4066 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4067 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4068 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4069 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4070 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4071 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4072 { 0, 0, 0, 0 }
4075 static const bitmask_transtbl lflag_tbl[] = {
4076 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4077 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4078 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4079 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4080 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4081 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4082 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4083 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4084 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4085 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4086 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4087 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4088 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4089 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4090 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4091 { 0, 0, 0, 0 }
4094 static void target_to_host_termios (void *dst, const void *src)
4096 struct host_termios *host = dst;
4097 const struct target_termios *target = src;
4099 host->c_iflag =
4100 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4101 host->c_oflag =
4102 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4103 host->c_cflag =
4104 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4105 host->c_lflag =
4106 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4107 host->c_line = target->c_line;
4109 memset(host->c_cc, 0, sizeof(host->c_cc));
4110 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4111 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4112 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4113 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4114 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4115 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4116 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4117 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4118 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4119 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4120 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4121 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4122 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4123 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4124 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4125 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4126 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4129 static void host_to_target_termios (void *dst, const void *src)
4131 struct target_termios *target = dst;
4132 const struct host_termios *host = src;
4134 target->c_iflag =
4135 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4136 target->c_oflag =
4137 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4138 target->c_cflag =
4139 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4140 target->c_lflag =
4141 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4142 target->c_line = host->c_line;
4144 memset(target->c_cc, 0, sizeof(target->c_cc));
4145 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4146 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4147 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4148 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4149 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4150 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4151 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4152 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4153 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4154 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4155 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4156 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4157 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4158 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4159 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4160 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4161 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4164 static const StructEntry struct_termios_def = {
4165 .convert = { host_to_target_termios, target_to_host_termios },
4166 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4167 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4170 static bitmask_transtbl mmap_flags_tbl[] = {
4171 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4172 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4173 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4174 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4175 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4176 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4177 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4178 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4179 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4180 MAP_NORESERVE },
4181 { 0, 0, 0, 0 }
4184 #if defined(TARGET_I386)
4186 /* NOTE: there is really one LDT for all the threads */
4187 static uint8_t *ldt_table;
4189 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4191 int size;
4192 void *p;
4194 if (!ldt_table)
4195 return 0;
4196 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4197 if (size > bytecount)
4198 size = bytecount;
4199 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4200 if (!p)
4201 return -TARGET_EFAULT;
4202 /* ??? Should this by byteswapped? */
4203 memcpy(p, ldt_table, size);
4204 unlock_user(p, ptr, size);
4205 return size;
4208 /* XXX: add locking support */
4209 static abi_long write_ldt(CPUX86State *env,
4210 abi_ulong ptr, unsigned long bytecount, int oldmode)
4212 struct target_modify_ldt_ldt_s ldt_info;
4213 struct target_modify_ldt_ldt_s *target_ldt_info;
4214 int seg_32bit, contents, read_exec_only, limit_in_pages;
4215 int seg_not_present, useable, lm;
4216 uint32_t *lp, entry_1, entry_2;
4218 if (bytecount != sizeof(ldt_info))
4219 return -TARGET_EINVAL;
4220 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4221 return -TARGET_EFAULT;
4222 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4223 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4224 ldt_info.limit = tswap32(target_ldt_info->limit);
4225 ldt_info.flags = tswap32(target_ldt_info->flags);
4226 unlock_user_struct(target_ldt_info, ptr, 0);
4228 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4229 return -TARGET_EINVAL;
4230 seg_32bit = ldt_info.flags & 1;
4231 contents = (ldt_info.flags >> 1) & 3;
4232 read_exec_only = (ldt_info.flags >> 3) & 1;
4233 limit_in_pages = (ldt_info.flags >> 4) & 1;
4234 seg_not_present = (ldt_info.flags >> 5) & 1;
4235 useable = (ldt_info.flags >> 6) & 1;
4236 #ifdef TARGET_ABI32
4237 lm = 0;
4238 #else
4239 lm = (ldt_info.flags >> 7) & 1;
4240 #endif
4241 if (contents == 3) {
4242 if (oldmode)
4243 return -TARGET_EINVAL;
4244 if (seg_not_present == 0)
4245 return -TARGET_EINVAL;
4247 /* allocate the LDT */
4248 if (!ldt_table) {
4249 env->ldt.base = target_mmap(0,
4250 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4251 PROT_READ|PROT_WRITE,
4252 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4253 if (env->ldt.base == -1)
4254 return -TARGET_ENOMEM;
4255 memset(g2h(env->ldt.base), 0,
4256 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4257 env->ldt.limit = 0xffff;
4258 ldt_table = g2h(env->ldt.base);
4261 /* NOTE: same code as Linux kernel */
4262 /* Allow LDTs to be cleared by the user. */
4263 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4264 if (oldmode ||
4265 (contents == 0 &&
4266 read_exec_only == 1 &&
4267 seg_32bit == 0 &&
4268 limit_in_pages == 0 &&
4269 seg_not_present == 1 &&
4270 useable == 0 )) {
4271 entry_1 = 0;
4272 entry_2 = 0;
4273 goto install;
4277 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4278 (ldt_info.limit & 0x0ffff);
4279 entry_2 = (ldt_info.base_addr & 0xff000000) |
4280 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4281 (ldt_info.limit & 0xf0000) |
4282 ((read_exec_only ^ 1) << 9) |
4283 (contents << 10) |
4284 ((seg_not_present ^ 1) << 15) |
4285 (seg_32bit << 22) |
4286 (limit_in_pages << 23) |
4287 (lm << 21) |
4288 0x7000;
4289 if (!oldmode)
4290 entry_2 |= (useable << 20);
4292 /* Install the new entry ... */
4293 install:
4294 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4295 lp[0] = tswap32(entry_1);
4296 lp[1] = tswap32(entry_2);
4297 return 0;
4300 /* specific and weird i386 syscalls */
4301 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4302 unsigned long bytecount)
4304 abi_long ret;
4306 switch (func) {
4307 case 0:
4308 ret = read_ldt(ptr, bytecount);
4309 break;
4310 case 1:
4311 ret = write_ldt(env, ptr, bytecount, 1);
4312 break;
4313 case 0x11:
4314 ret = write_ldt(env, ptr, bytecount, 0);
4315 break;
4316 default:
4317 ret = -TARGET_ENOSYS;
4318 break;
4320 return ret;
4323 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4324 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4326 uint64_t *gdt_table = g2h(env->gdt.base);
4327 struct target_modify_ldt_ldt_s ldt_info;
4328 struct target_modify_ldt_ldt_s *target_ldt_info;
4329 int seg_32bit, contents, read_exec_only, limit_in_pages;
4330 int seg_not_present, useable, lm;
4331 uint32_t *lp, entry_1, entry_2;
4332 int i;
4334 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4335 if (!target_ldt_info)
4336 return -TARGET_EFAULT;
4337 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4338 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4339 ldt_info.limit = tswap32(target_ldt_info->limit);
4340 ldt_info.flags = tswap32(target_ldt_info->flags);
4341 if (ldt_info.entry_number == -1) {
4342 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4343 if (gdt_table[i] == 0) {
4344 ldt_info.entry_number = i;
4345 target_ldt_info->entry_number = tswap32(i);
4346 break;
4350 unlock_user_struct(target_ldt_info, ptr, 1);
4352 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4353 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4354 return -TARGET_EINVAL;
4355 seg_32bit = ldt_info.flags & 1;
4356 contents = (ldt_info.flags >> 1) & 3;
4357 read_exec_only = (ldt_info.flags >> 3) & 1;
4358 limit_in_pages = (ldt_info.flags >> 4) & 1;
4359 seg_not_present = (ldt_info.flags >> 5) & 1;
4360 useable = (ldt_info.flags >> 6) & 1;
4361 #ifdef TARGET_ABI32
4362 lm = 0;
4363 #else
4364 lm = (ldt_info.flags >> 7) & 1;
4365 #endif
4367 if (contents == 3) {
4368 if (seg_not_present == 0)
4369 return -TARGET_EINVAL;
4372 /* NOTE: same code as Linux kernel */
4373 /* Allow LDTs to be cleared by the user. */
4374 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4375 if ((contents == 0 &&
4376 read_exec_only == 1 &&
4377 seg_32bit == 0 &&
4378 limit_in_pages == 0 &&
4379 seg_not_present == 1 &&
4380 useable == 0 )) {
4381 entry_1 = 0;
4382 entry_2 = 0;
4383 goto install;
4387 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4388 (ldt_info.limit & 0x0ffff);
4389 entry_2 = (ldt_info.base_addr & 0xff000000) |
4390 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4391 (ldt_info.limit & 0xf0000) |
4392 ((read_exec_only ^ 1) << 9) |
4393 (contents << 10) |
4394 ((seg_not_present ^ 1) << 15) |
4395 (seg_32bit << 22) |
4396 (limit_in_pages << 23) |
4397 (useable << 20) |
4398 (lm << 21) |
4399 0x7000;
4401 /* Install the new entry ... */
4402 install:
4403 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4404 lp[0] = tswap32(entry_1);
4405 lp[1] = tswap32(entry_2);
4406 return 0;
4409 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4411 struct target_modify_ldt_ldt_s *target_ldt_info;
4412 uint64_t *gdt_table = g2h(env->gdt.base);
4413 uint32_t base_addr, limit, flags;
4414 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4415 int seg_not_present, useable, lm;
4416 uint32_t *lp, entry_1, entry_2;
4418 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4419 if (!target_ldt_info)
4420 return -TARGET_EFAULT;
4421 idx = tswap32(target_ldt_info->entry_number);
4422 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4423 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4424 unlock_user_struct(target_ldt_info, ptr, 1);
4425 return -TARGET_EINVAL;
4427 lp = (uint32_t *)(gdt_table + idx);
4428 entry_1 = tswap32(lp[0]);
4429 entry_2 = tswap32(lp[1]);
4431 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4432 contents = (entry_2 >> 10) & 3;
4433 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4434 seg_32bit = (entry_2 >> 22) & 1;
4435 limit_in_pages = (entry_2 >> 23) & 1;
4436 useable = (entry_2 >> 20) & 1;
4437 #ifdef TARGET_ABI32
4438 lm = 0;
4439 #else
4440 lm = (entry_2 >> 21) & 1;
4441 #endif
4442 flags = (seg_32bit << 0) | (contents << 1) |
4443 (read_exec_only << 3) | (limit_in_pages << 4) |
4444 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4445 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4446 base_addr = (entry_1 >> 16) |
4447 (entry_2 & 0xff000000) |
4448 ((entry_2 & 0xff) << 16);
4449 target_ldt_info->base_addr = tswapal(base_addr);
4450 target_ldt_info->limit = tswap32(limit);
4451 target_ldt_info->flags = tswap32(flags);
4452 unlock_user_struct(target_ldt_info, ptr, 1);
4453 return 0;
4455 #endif /* TARGET_I386 && TARGET_ABI32 */
4457 #ifndef TARGET_ABI32
4458 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4460 abi_long ret = 0;
4461 abi_ulong val;
4462 int idx;
4464 switch(code) {
4465 case TARGET_ARCH_SET_GS:
4466 case TARGET_ARCH_SET_FS:
4467 if (code == TARGET_ARCH_SET_GS)
4468 idx = R_GS;
4469 else
4470 idx = R_FS;
4471 cpu_x86_load_seg(env, idx, 0);
4472 env->segs[idx].base = addr;
4473 break;
4474 case TARGET_ARCH_GET_GS:
4475 case TARGET_ARCH_GET_FS:
4476 if (code == TARGET_ARCH_GET_GS)
4477 idx = R_GS;
4478 else
4479 idx = R_FS;
4480 val = env->segs[idx].base;
4481 if (put_user(val, addr, abi_ulong))
4482 ret = -TARGET_EFAULT;
4483 break;
4484 default:
4485 ret = -TARGET_EINVAL;
4486 break;
4488 return ret;
4490 #endif
4492 #endif /* defined(TARGET_I386) */
4494 #define NEW_STACK_SIZE 0x40000
4497 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4498 typedef struct {
4499 CPUArchState *env;
4500 pthread_mutex_t mutex;
4501 pthread_cond_t cond;
4502 pthread_t thread;
4503 uint32_t tid;
4504 abi_ulong child_tidptr;
4505 abi_ulong parent_tidptr;
4506 sigset_t sigmask;
4507 } new_thread_info;
4509 static void * QEMU_NORETURN clone_func(void *arg)
4511 new_thread_info *info = arg;
4512 CPUArchState *env;
4513 CPUState *cpu;
4514 TaskState *ts;
4516 rcu_register_thread();
4517 env = info->env;
4518 cpu = ENV_GET_CPU(env);
4519 thread_cpu = cpu;
4520 ts = (TaskState *)cpu->opaque;
4521 info->tid = gettid();
4522 cpu->host_tid = info->tid;
4523 task_settid(ts);
4524 if (info->child_tidptr)
4525 put_user_u32(info->tid, info->child_tidptr);
4526 if (info->parent_tidptr)
4527 put_user_u32(info->tid, info->parent_tidptr);
4528 /* Enable signals. */
4529 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4530 /* Signal to the parent that we're ready. */
4531 pthread_mutex_lock(&info->mutex);
4532 pthread_cond_broadcast(&info->cond);
4533 pthread_mutex_unlock(&info->mutex);
4534 /* Wait until the parent has finshed initializing the tls state. */
4535 pthread_mutex_lock(&clone_lock);
4536 pthread_mutex_unlock(&clone_lock);
4537 cpu_loop(env);
4538 /* never exits */
4541 /* do_fork() Must return host values and target errnos (unlike most
4542 do_*() functions). */
4543 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4544 abi_ulong parent_tidptr, target_ulong newtls,
4545 abi_ulong child_tidptr)
4547 CPUState *cpu = ENV_GET_CPU(env);
4548 int ret;
4549 TaskState *ts;
4550 CPUState *new_cpu;
4551 CPUArchState *new_env;
4552 unsigned int nptl_flags;
4553 sigset_t sigmask;
4555 /* Emulate vfork() with fork() */
4556 if (flags & CLONE_VFORK)
4557 flags &= ~(CLONE_VFORK | CLONE_VM);
4559 if (flags & CLONE_VM) {
4560 TaskState *parent_ts = (TaskState *)cpu->opaque;
4561 new_thread_info info;
4562 pthread_attr_t attr;
4564 ts = g_malloc0(sizeof(TaskState));
4565 init_task_state(ts);
4566 /* we create a new CPU instance. */
4567 new_env = cpu_copy(env);
4568 /* Init regs that differ from the parent. */
4569 cpu_clone_regs(new_env, newsp);
4570 new_cpu = ENV_GET_CPU(new_env);
4571 new_cpu->opaque = ts;
4572 ts->bprm = parent_ts->bprm;
4573 ts->info = parent_ts->info;
4574 nptl_flags = flags;
4575 flags &= ~CLONE_NPTL_FLAGS2;
4577 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4578 ts->child_tidptr = child_tidptr;
4581 if (nptl_flags & CLONE_SETTLS)
4582 cpu_set_tls (new_env, newtls);
4584 /* Grab a mutex so that thread setup appears atomic. */
4585 pthread_mutex_lock(&clone_lock);
4587 memset(&info, 0, sizeof(info));
4588 pthread_mutex_init(&info.mutex, NULL);
4589 pthread_mutex_lock(&info.mutex);
4590 pthread_cond_init(&info.cond, NULL);
4591 info.env = new_env;
4592 if (nptl_flags & CLONE_CHILD_SETTID)
4593 info.child_tidptr = child_tidptr;
4594 if (nptl_flags & CLONE_PARENT_SETTID)
4595 info.parent_tidptr = parent_tidptr;
4597 ret = pthread_attr_init(&attr);
4598 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4599 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4600 /* It is not safe to deliver signals until the child has finished
4601 initializing, so temporarily block all signals. */
4602 sigfillset(&sigmask);
4603 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4605 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4606 /* TODO: Free new CPU state if thread creation failed. */
4608 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4609 pthread_attr_destroy(&attr);
4610 if (ret == 0) {
4611 /* Wait for the child to initialize. */
4612 pthread_cond_wait(&info.cond, &info.mutex);
4613 ret = info.tid;
4614 if (flags & CLONE_PARENT_SETTID)
4615 put_user_u32(ret, parent_tidptr);
4616 } else {
4617 ret = -1;
4619 pthread_mutex_unlock(&info.mutex);
4620 pthread_cond_destroy(&info.cond);
4621 pthread_mutex_destroy(&info.mutex);
4622 pthread_mutex_unlock(&clone_lock);
4623 } else {
4624 /* if no CLONE_VM, we consider it is a fork */
4625 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4626 return -EINVAL;
4627 fork_start();
4628 ret = fork();
4629 if (ret == 0) {
4630 /* Child Process. */
4631 rcu_after_fork();
4632 cpu_clone_regs(env, newsp);
4633 fork_end(1);
4634 /* There is a race condition here. The parent process could
4635 theoretically read the TID in the child process before the child
4636 tid is set. This would require using either ptrace
4637 (not implemented) or having *_tidptr to point at a shared memory
4638 mapping. We can't repeat the spinlock hack used above because
4639 the child process gets its own copy of the lock. */
4640 if (flags & CLONE_CHILD_SETTID)
4641 put_user_u32(gettid(), child_tidptr);
4642 if (flags & CLONE_PARENT_SETTID)
4643 put_user_u32(gettid(), parent_tidptr);
4644 ts = (TaskState *)cpu->opaque;
4645 if (flags & CLONE_SETTLS)
4646 cpu_set_tls (env, newtls);
4647 if (flags & CLONE_CHILD_CLEARTID)
4648 ts->child_tidptr = child_tidptr;
4649 } else {
4650 fork_end(0);
4653 return ret;
4656 /* warning : doesn't handle linux specific flags... */
4657 static int target_to_host_fcntl_cmd(int cmd)
4659 switch(cmd) {
4660 case TARGET_F_DUPFD:
4661 case TARGET_F_GETFD:
4662 case TARGET_F_SETFD:
4663 case TARGET_F_GETFL:
4664 case TARGET_F_SETFL:
4665 return cmd;
4666 case TARGET_F_GETLK:
4667 return F_GETLK;
4668 case TARGET_F_SETLK:
4669 return F_SETLK;
4670 case TARGET_F_SETLKW:
4671 return F_SETLKW;
4672 case TARGET_F_GETOWN:
4673 return F_GETOWN;
4674 case TARGET_F_SETOWN:
4675 return F_SETOWN;
4676 case TARGET_F_GETSIG:
4677 return F_GETSIG;
4678 case TARGET_F_SETSIG:
4679 return F_SETSIG;
4680 #if TARGET_ABI_BITS == 32
4681 case TARGET_F_GETLK64:
4682 return F_GETLK64;
4683 case TARGET_F_SETLK64:
4684 return F_SETLK64;
4685 case TARGET_F_SETLKW64:
4686 return F_SETLKW64;
4687 #endif
4688 case TARGET_F_SETLEASE:
4689 return F_SETLEASE;
4690 case TARGET_F_GETLEASE:
4691 return F_GETLEASE;
4692 #ifdef F_DUPFD_CLOEXEC
4693 case TARGET_F_DUPFD_CLOEXEC:
4694 return F_DUPFD_CLOEXEC;
4695 #endif
4696 case TARGET_F_NOTIFY:
4697 return F_NOTIFY;
4698 #ifdef F_GETOWN_EX
4699 case TARGET_F_GETOWN_EX:
4700 return F_GETOWN_EX;
4701 #endif
4702 #ifdef F_SETOWN_EX
4703 case TARGET_F_SETOWN_EX:
4704 return F_SETOWN_EX;
4705 #endif
4706 default:
4707 return -TARGET_EINVAL;
4709 return -TARGET_EINVAL;
4712 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4713 static const bitmask_transtbl flock_tbl[] = {
4714 TRANSTBL_CONVERT(F_RDLCK),
4715 TRANSTBL_CONVERT(F_WRLCK),
4716 TRANSTBL_CONVERT(F_UNLCK),
4717 TRANSTBL_CONVERT(F_EXLCK),
4718 TRANSTBL_CONVERT(F_SHLCK),
4719 { 0, 0, 0, 0 }
4722 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4724 struct flock fl;
4725 struct target_flock *target_fl;
4726 struct flock64 fl64;
4727 struct target_flock64 *target_fl64;
4728 #ifdef F_GETOWN_EX
4729 struct f_owner_ex fox;
4730 struct target_f_owner_ex *target_fox;
4731 #endif
4732 abi_long ret;
4733 int host_cmd = target_to_host_fcntl_cmd(cmd);
4735 if (host_cmd == -TARGET_EINVAL)
4736 return host_cmd;
4738 switch(cmd) {
4739 case TARGET_F_GETLK:
4740 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4741 return -TARGET_EFAULT;
4742 fl.l_type =
4743 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4744 fl.l_whence = tswap16(target_fl->l_whence);
4745 fl.l_start = tswapal(target_fl->l_start);
4746 fl.l_len = tswapal(target_fl->l_len);
4747 fl.l_pid = tswap32(target_fl->l_pid);
4748 unlock_user_struct(target_fl, arg, 0);
4749 ret = get_errno(fcntl(fd, host_cmd, &fl));
4750 if (ret == 0) {
4751 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4752 return -TARGET_EFAULT;
4753 target_fl->l_type =
4754 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4755 target_fl->l_whence = tswap16(fl.l_whence);
4756 target_fl->l_start = tswapal(fl.l_start);
4757 target_fl->l_len = tswapal(fl.l_len);
4758 target_fl->l_pid = tswap32(fl.l_pid);
4759 unlock_user_struct(target_fl, arg, 1);
4761 break;
4763 case TARGET_F_SETLK:
4764 case TARGET_F_SETLKW:
4765 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4766 return -TARGET_EFAULT;
4767 fl.l_type =
4768 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4769 fl.l_whence = tswap16(target_fl->l_whence);
4770 fl.l_start = tswapal(target_fl->l_start);
4771 fl.l_len = tswapal(target_fl->l_len);
4772 fl.l_pid = tswap32(target_fl->l_pid);
4773 unlock_user_struct(target_fl, arg, 0);
4774 ret = get_errno(fcntl(fd, host_cmd, &fl));
4775 break;
4777 case TARGET_F_GETLK64:
4778 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4779 return -TARGET_EFAULT;
4780 fl64.l_type =
4781 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4782 fl64.l_whence = tswap16(target_fl64->l_whence);
4783 fl64.l_start = tswap64(target_fl64->l_start);
4784 fl64.l_len = tswap64(target_fl64->l_len);
4785 fl64.l_pid = tswap32(target_fl64->l_pid);
4786 unlock_user_struct(target_fl64, arg, 0);
4787 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4788 if (ret == 0) {
4789 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4790 return -TARGET_EFAULT;
4791 target_fl64->l_type =
4792 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4793 target_fl64->l_whence = tswap16(fl64.l_whence);
4794 target_fl64->l_start = tswap64(fl64.l_start);
4795 target_fl64->l_len = tswap64(fl64.l_len);
4796 target_fl64->l_pid = tswap32(fl64.l_pid);
4797 unlock_user_struct(target_fl64, arg, 1);
4799 break;
4800 case TARGET_F_SETLK64:
4801 case TARGET_F_SETLKW64:
4802 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4803 return -TARGET_EFAULT;
4804 fl64.l_type =
4805 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4806 fl64.l_whence = tswap16(target_fl64->l_whence);
4807 fl64.l_start = tswap64(target_fl64->l_start);
4808 fl64.l_len = tswap64(target_fl64->l_len);
4809 fl64.l_pid = tswap32(target_fl64->l_pid);
4810 unlock_user_struct(target_fl64, arg, 0);
4811 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4812 break;
4814 case TARGET_F_GETFL:
4815 ret = get_errno(fcntl(fd, host_cmd, arg));
4816 if (ret >= 0) {
4817 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4819 break;
4821 case TARGET_F_SETFL:
4822 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4823 break;
4825 #ifdef F_GETOWN_EX
4826 case TARGET_F_GETOWN_EX:
4827 ret = get_errno(fcntl(fd, host_cmd, &fox));
4828 if (ret >= 0) {
4829 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4830 return -TARGET_EFAULT;
4831 target_fox->type = tswap32(fox.type);
4832 target_fox->pid = tswap32(fox.pid);
4833 unlock_user_struct(target_fox, arg, 1);
4835 break;
4836 #endif
4838 #ifdef F_SETOWN_EX
4839 case TARGET_F_SETOWN_EX:
4840 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4841 return -TARGET_EFAULT;
4842 fox.type = tswap32(target_fox->type);
4843 fox.pid = tswap32(target_fox->pid);
4844 unlock_user_struct(target_fox, arg, 0);
4845 ret = get_errno(fcntl(fd, host_cmd, &fox));
4846 break;
4847 #endif
4849 case TARGET_F_SETOWN:
4850 case TARGET_F_GETOWN:
4851 case TARGET_F_SETSIG:
4852 case TARGET_F_GETSIG:
4853 case TARGET_F_SETLEASE:
4854 case TARGET_F_GETLEASE:
4855 ret = get_errno(fcntl(fd, host_cmd, arg));
4856 break;
4858 default:
4859 ret = get_errno(fcntl(fd, cmd, arg));
4860 break;
4862 return ret;
4865 #ifdef USE_UID16
4867 static inline int high2lowuid(int uid)
4869 if (uid > 65535)
4870 return 65534;
4871 else
4872 return uid;
4875 static inline int high2lowgid(int gid)
4877 if (gid > 65535)
4878 return 65534;
4879 else
4880 return gid;
4883 static inline int low2highuid(int uid)
4885 if ((int16_t)uid == -1)
4886 return -1;
4887 else
4888 return uid;
4891 static inline int low2highgid(int gid)
4893 if ((int16_t)gid == -1)
4894 return -1;
4895 else
4896 return gid;
4898 static inline int tswapid(int id)
4900 return tswap16(id);
4903 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4905 #else /* !USE_UID16 */
4906 static inline int high2lowuid(int uid)
4908 return uid;
4910 static inline int high2lowgid(int gid)
4912 return gid;
4914 static inline int low2highuid(int uid)
4916 return uid;
4918 static inline int low2highgid(int gid)
4920 return gid;
4922 static inline int tswapid(int id)
4924 return tswap32(id);
4927 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4929 #endif /* USE_UID16 */
4931 void syscall_init(void)
4933 IOCTLEntry *ie;
4934 const argtype *arg_type;
4935 int size;
4936 int i;
4938 thunk_init(STRUCT_MAX);
4940 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4941 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4942 #include "syscall_types.h"
4943 #undef STRUCT
4944 #undef STRUCT_SPECIAL
4946 /* Build target_to_host_errno_table[] table from
4947 * host_to_target_errno_table[]. */
4948 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4949 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4952 /* we patch the ioctl size if necessary. We rely on the fact that
4953 no ioctl has all the bits at '1' in the size field */
4954 ie = ioctl_entries;
4955 while (ie->target_cmd != 0) {
4956 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4957 TARGET_IOC_SIZEMASK) {
4958 arg_type = ie->arg_type;
4959 if (arg_type[0] != TYPE_PTR) {
4960 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4961 ie->target_cmd);
4962 exit(1);
4964 arg_type++;
4965 size = thunk_type_size(arg_type, 0);
4966 ie->target_cmd = (ie->target_cmd &
4967 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4968 (size << TARGET_IOC_SIZESHIFT);
4971 /* automatic consistency check if same arch */
4972 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4973 (defined(__x86_64__) && defined(TARGET_X86_64))
4974 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4975 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4976 ie->name, ie->target_cmd, ie->host_cmd);
4978 #endif
4979 ie++;
4983 #if TARGET_ABI_BITS == 32
4984 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4986 #ifdef TARGET_WORDS_BIGENDIAN
4987 return ((uint64_t)word0 << 32) | word1;
4988 #else
4989 return ((uint64_t)word1 << 32) | word0;
4990 #endif
4992 #else /* TARGET_ABI_BITS == 32 */
4993 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4995 return word0;
4997 #endif /* TARGET_ABI_BITS != 32 */
4999 #ifdef TARGET_NR_truncate64
5000 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5001 abi_long arg2,
5002 abi_long arg3,
5003 abi_long arg4)
5005 if (regpairs_aligned(cpu_env)) {
5006 arg2 = arg3;
5007 arg3 = arg4;
5009 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5011 #endif
5013 #ifdef TARGET_NR_ftruncate64
5014 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5015 abi_long arg2,
5016 abi_long arg3,
5017 abi_long arg4)
5019 if (regpairs_aligned(cpu_env)) {
5020 arg2 = arg3;
5021 arg3 = arg4;
5023 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5025 #endif
5027 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5028 abi_ulong target_addr)
5030 struct target_timespec *target_ts;
5032 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5033 return -TARGET_EFAULT;
5034 host_ts->tv_sec = tswapal(target_ts->tv_sec);
5035 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
5036 unlock_user_struct(target_ts, target_addr, 0);
5037 return 0;
5040 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5041 struct timespec *host_ts)
5043 struct target_timespec *target_ts;
5045 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5046 return -TARGET_EFAULT;
5047 target_ts->tv_sec = tswapal(host_ts->tv_sec);
5048 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
5049 unlock_user_struct(target_ts, target_addr, 1);
5050 return 0;
5053 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5054 abi_ulong target_addr)
5056 struct target_itimerspec *target_itspec;
5058 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5059 return -TARGET_EFAULT;
5062 host_itspec->it_interval.tv_sec =
5063 tswapal(target_itspec->it_interval.tv_sec);
5064 host_itspec->it_interval.tv_nsec =
5065 tswapal(target_itspec->it_interval.tv_nsec);
5066 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5067 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5069 unlock_user_struct(target_itspec, target_addr, 1);
5070 return 0;
5073 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5074 struct itimerspec *host_its)
5076 struct target_itimerspec *target_itspec;
5078 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5079 return -TARGET_EFAULT;
5082 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5083 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5085 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5086 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5088 unlock_user_struct(target_itspec, target_addr, 0);
5089 return 0;
5092 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5093 abi_ulong target_addr)
5095 struct target_sigevent *target_sevp;
5097 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5098 return -TARGET_EFAULT;
5101 /* This union is awkward on 64 bit systems because it has a 32 bit
5102 * integer and a pointer in it; we follow the conversion approach
5103 * used for handling sigval types in signal.c so the guest should get
5104 * the correct value back even if we did a 64 bit byteswap and it's
5105 * using the 32 bit integer.
5107 host_sevp->sigev_value.sival_ptr =
5108 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5109 host_sevp->sigev_signo =
5110 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5111 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5112 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5114 unlock_user_struct(target_sevp, target_addr, 1);
5115 return 0;
5118 #if defined(TARGET_NR_mlockall)
5119 static inline int target_to_host_mlockall_arg(int arg)
5121 int result = 0;
5123 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5124 result |= MCL_CURRENT;
5126 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5127 result |= MCL_FUTURE;
5129 return result;
5131 #endif
5133 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5134 static inline abi_long host_to_target_stat64(void *cpu_env,
5135 abi_ulong target_addr,
5136 struct stat *host_st)
5138 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5139 if (((CPUARMState *)cpu_env)->eabi) {
5140 struct target_eabi_stat64 *target_st;
5142 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5143 return -TARGET_EFAULT;
5144 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5145 __put_user(host_st->st_dev, &target_st->st_dev);
5146 __put_user(host_st->st_ino, &target_st->st_ino);
5147 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5148 __put_user(host_st->st_ino, &target_st->__st_ino);
5149 #endif
5150 __put_user(host_st->st_mode, &target_st->st_mode);
5151 __put_user(host_st->st_nlink, &target_st->st_nlink);
5152 __put_user(host_st->st_uid, &target_st->st_uid);
5153 __put_user(host_st->st_gid, &target_st->st_gid);
5154 __put_user(host_st->st_rdev, &target_st->st_rdev);
5155 __put_user(host_st->st_size, &target_st->st_size);
5156 __put_user(host_st->st_blksize, &target_st->st_blksize);
5157 __put_user(host_st->st_blocks, &target_st->st_blocks);
5158 __put_user(host_st->st_atime, &target_st->target_st_atime);
5159 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5160 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5161 unlock_user_struct(target_st, target_addr, 1);
5162 } else
5163 #endif
5165 #if defined(TARGET_HAS_STRUCT_STAT64)
5166 struct target_stat64 *target_st;
5167 #else
5168 struct target_stat *target_st;
5169 #endif
5171 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5172 return -TARGET_EFAULT;
5173 memset(target_st, 0, sizeof(*target_st));
5174 __put_user(host_st->st_dev, &target_st->st_dev);
5175 __put_user(host_st->st_ino, &target_st->st_ino);
5176 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5177 __put_user(host_st->st_ino, &target_st->__st_ino);
5178 #endif
5179 __put_user(host_st->st_mode, &target_st->st_mode);
5180 __put_user(host_st->st_nlink, &target_st->st_nlink);
5181 __put_user(host_st->st_uid, &target_st->st_uid);
5182 __put_user(host_st->st_gid, &target_st->st_gid);
5183 __put_user(host_st->st_rdev, &target_st->st_rdev);
5184 /* XXX: better use of kernel struct */
5185 __put_user(host_st->st_size, &target_st->st_size);
5186 __put_user(host_st->st_blksize, &target_st->st_blksize);
5187 __put_user(host_st->st_blocks, &target_st->st_blocks);
5188 __put_user(host_st->st_atime, &target_st->target_st_atime);
5189 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5190 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5191 unlock_user_struct(target_st, target_addr, 1);
5194 return 0;
5196 #endif
5198 /* ??? Using host futex calls even when target atomic operations
5199 are not really atomic probably breaks things. However implementing
5200 futexes locally would make futexes shared between multiple processes
5201 tricky. However they're probably useless because guest atomic
5202 operations won't work either. */
5203 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5204 target_ulong uaddr2, int val3)
5206 struct timespec ts, *pts;
5207 int base_op;
5209 /* ??? We assume FUTEX_* constants are the same on both host
5210 and target. */
5211 #ifdef FUTEX_CMD_MASK
5212 base_op = op & FUTEX_CMD_MASK;
5213 #else
5214 base_op = op;
5215 #endif
5216 switch (base_op) {
5217 case FUTEX_WAIT:
5218 case FUTEX_WAIT_BITSET:
5219 if (timeout) {
5220 pts = &ts;
5221 target_to_host_timespec(pts, timeout);
5222 } else {
5223 pts = NULL;
5225 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5226 pts, NULL, val3));
5227 case FUTEX_WAKE:
5228 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5229 case FUTEX_FD:
5230 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5231 case FUTEX_REQUEUE:
5232 case FUTEX_CMP_REQUEUE:
5233 case FUTEX_WAKE_OP:
5234 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5235 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5236 But the prototype takes a `struct timespec *'; insert casts
5237 to satisfy the compiler. We do not need to tswap TIMEOUT
5238 since it's not compared to guest memory. */
5239 pts = (struct timespec *)(uintptr_t) timeout;
5240 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5241 g2h(uaddr2),
5242 (base_op == FUTEX_CMP_REQUEUE
5243 ? tswap32(val3)
5244 : val3)));
5245 default:
5246 return -TARGET_ENOSYS;
5250 /* Map host to target signal numbers for the wait family of syscalls.
5251 Assume all other status bits are the same. */
5252 int host_to_target_waitstatus(int status)
5254 if (WIFSIGNALED(status)) {
5255 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5257 if (WIFSTOPPED(status)) {
5258 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5259 | (status & 0xff);
5261 return status;
5264 static int open_self_cmdline(void *cpu_env, int fd)
5266 int fd_orig = -1;
5267 bool word_skipped = false;
5269 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5270 if (fd_orig < 0) {
5271 return fd_orig;
5274 while (true) {
5275 ssize_t nb_read;
5276 char buf[128];
5277 char *cp_buf = buf;
5279 nb_read = read(fd_orig, buf, sizeof(buf));
5280 if (nb_read < 0) {
5281 fd_orig = close(fd_orig);
5282 return -1;
5283 } else if (nb_read == 0) {
5284 break;
5287 if (!word_skipped) {
5288 /* Skip the first string, which is the path to qemu-*-static
5289 instead of the actual command. */
5290 cp_buf = memchr(buf, 0, sizeof(buf));
5291 if (cp_buf) {
5292 /* Null byte found, skip one string */
5293 cp_buf++;
5294 nb_read -= cp_buf - buf;
5295 word_skipped = true;
5299 if (word_skipped) {
5300 if (write(fd, cp_buf, nb_read) != nb_read) {
5301 close(fd_orig);
5302 return -1;
5307 return close(fd_orig);
5310 static int open_self_maps(void *cpu_env, int fd)
5312 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5313 TaskState *ts = cpu->opaque;
5314 FILE *fp;
5315 char *line = NULL;
5316 size_t len = 0;
5317 ssize_t read;
5319 fp = fopen("/proc/self/maps", "r");
5320 if (fp == NULL) {
5321 return -EACCES;
5324 while ((read = getline(&line, &len, fp)) != -1) {
5325 int fields, dev_maj, dev_min, inode;
5326 uint64_t min, max, offset;
5327 char flag_r, flag_w, flag_x, flag_p;
5328 char path[512] = "";
5329 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5330 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5331 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5333 if ((fields < 10) || (fields > 11)) {
5334 continue;
5336 if (h2g_valid(min)) {
5337 int flags = page_get_flags(h2g(min));
5338 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5339 if (page_check_range(h2g(min), max - min, flags) == -1) {
5340 continue;
5342 if (h2g(min) == ts->info->stack_limit) {
5343 pstrcpy(path, sizeof(path), " [stack]");
5345 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5346 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5347 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5348 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5349 path[0] ? " " : "", path);
5353 free(line);
5354 fclose(fp);
5356 return 0;
5359 static int open_self_stat(void *cpu_env, int fd)
5361 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5362 TaskState *ts = cpu->opaque;
5363 abi_ulong start_stack = ts->info->start_stack;
5364 int i;
5366 for (i = 0; i < 44; i++) {
5367 char buf[128];
5368 int len;
5369 uint64_t val = 0;
5371 if (i == 0) {
5372 /* pid */
5373 val = getpid();
5374 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5375 } else if (i == 1) {
5376 /* app name */
5377 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5378 } else if (i == 27) {
5379 /* stack bottom */
5380 val = start_stack;
5381 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5382 } else {
5383 /* for the rest, there is MasterCard */
5384 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5387 len = strlen(buf);
5388 if (write(fd, buf, len) != len) {
5389 return -1;
5393 return 0;
5396 static int open_self_auxv(void *cpu_env, int fd)
5398 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5399 TaskState *ts = cpu->opaque;
5400 abi_ulong auxv = ts->info->saved_auxv;
5401 abi_ulong len = ts->info->auxv_len;
5402 char *ptr;
5405 * Auxiliary vector is stored in target process stack.
5406 * read in whole auxv vector and copy it to file
5408 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5409 if (ptr != NULL) {
5410 while (len > 0) {
5411 ssize_t r;
5412 r = write(fd, ptr, len);
5413 if (r <= 0) {
5414 break;
5416 len -= r;
5417 ptr += r;
5419 lseek(fd, 0, SEEK_SET);
5420 unlock_user(ptr, auxv, len);
5423 return 0;
5426 static int is_proc_myself(const char *filename, const char *entry)
5428 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5429 filename += strlen("/proc/");
5430 if (!strncmp(filename, "self/", strlen("self/"))) {
5431 filename += strlen("self/");
5432 } else if (*filename >= '1' && *filename <= '9') {
5433 char myself[80];
5434 snprintf(myself, sizeof(myself), "%d/", getpid());
5435 if (!strncmp(filename, myself, strlen(myself))) {
5436 filename += strlen(myself);
5437 } else {
5438 return 0;
5440 } else {
5441 return 0;
5443 if (!strcmp(filename, entry)) {
5444 return 1;
5447 return 0;
5450 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5451 static int is_proc(const char *filename, const char *entry)
5453 return strcmp(filename, entry) == 0;
5456 static int open_net_route(void *cpu_env, int fd)
5458 FILE *fp;
5459 char *line = NULL;
5460 size_t len = 0;
5461 ssize_t read;
5463 fp = fopen("/proc/net/route", "r");
5464 if (fp == NULL) {
5465 return -EACCES;
5468 /* read header */
5470 read = getline(&line, &len, fp);
5471 dprintf(fd, "%s", line);
5473 /* read routes */
5475 while ((read = getline(&line, &len, fp)) != -1) {
5476 char iface[16];
5477 uint32_t dest, gw, mask;
5478 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5479 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5480 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5481 &mask, &mtu, &window, &irtt);
5482 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5483 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5484 metric, tswap32(mask), mtu, window, irtt);
5487 free(line);
5488 fclose(fp);
5490 return 0;
5492 #endif
5494 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5496 struct fake_open {
5497 const char *filename;
5498 int (*fill)(void *cpu_env, int fd);
5499 int (*cmp)(const char *s1, const char *s2);
5501 const struct fake_open *fake_open;
5502 static const struct fake_open fakes[] = {
5503 { "maps", open_self_maps, is_proc_myself },
5504 { "stat", open_self_stat, is_proc_myself },
5505 { "auxv", open_self_auxv, is_proc_myself },
5506 { "cmdline", open_self_cmdline, is_proc_myself },
5507 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5508 { "/proc/net/route", open_net_route, is_proc },
5509 #endif
5510 { NULL, NULL, NULL }
5513 if (is_proc_myself(pathname, "exe")) {
5514 int execfd = qemu_getauxval(AT_EXECFD);
5515 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5518 for (fake_open = fakes; fake_open->filename; fake_open++) {
5519 if (fake_open->cmp(pathname, fake_open->filename)) {
5520 break;
5524 if (fake_open->filename) {
5525 const char *tmpdir;
5526 char filename[PATH_MAX];
5527 int fd, r;
5529 /* create temporary file to map stat to */
5530 tmpdir = getenv("TMPDIR");
5531 if (!tmpdir)
5532 tmpdir = "/tmp";
5533 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5534 fd = mkstemp(filename);
5535 if (fd < 0) {
5536 return fd;
5538 unlink(filename);
5540 if ((r = fake_open->fill(cpu_env, fd))) {
5541 close(fd);
5542 return r;
5544 lseek(fd, 0, SEEK_SET);
5546 return fd;
5549 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5552 #define TIMER_MAGIC 0x0caf0000
5553 #define TIMER_MAGIC_MASK 0xffff0000
5555 /* Convert QEMU provided timer ID back to internal 16bit index format */
5556 static target_timer_t get_timer_id(abi_long arg)
5558 target_timer_t timerid = arg;
5560 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5561 return -TARGET_EINVAL;
5564 timerid &= 0xffff;
5566 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5567 return -TARGET_EINVAL;
5570 return timerid;
5573 /* do_syscall() should always have a single exit point at the end so
5574 that actions, such as logging of syscall results, can be performed.
5575 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5576 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5577 abi_long arg2, abi_long arg3, abi_long arg4,
5578 abi_long arg5, abi_long arg6, abi_long arg7,
5579 abi_long arg8)
5581 CPUState *cpu = ENV_GET_CPU(cpu_env);
5582 abi_long ret;
5583 struct stat st;
5584 struct statfs stfs;
5585 void *p;
5587 #ifdef DEBUG
5588 gemu_log("syscall %d", num);
5589 #endif
5590 if(do_strace)
5591 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5593 switch(num) {
5594 case TARGET_NR_exit:
5595 /* In old applications this may be used to implement _exit(2).
5596 However in threaded applictions it is used for thread termination,
5597 and _exit_group is used for application termination.
5598 Do thread termination if we have more then one thread. */
5599 /* FIXME: This probably breaks if a signal arrives. We should probably
5600 be disabling signals. */
5601 if (CPU_NEXT(first_cpu)) {
5602 TaskState *ts;
5604 cpu_list_lock();
5605 /* Remove the CPU from the list. */
5606 QTAILQ_REMOVE(&cpus, cpu, node);
5607 cpu_list_unlock();
5608 ts = cpu->opaque;
5609 if (ts->child_tidptr) {
5610 put_user_u32(0, ts->child_tidptr);
5611 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5612 NULL, NULL, 0);
5614 thread_cpu = NULL;
5615 object_unref(OBJECT(cpu));
5616 g_free(ts);
5617 rcu_unregister_thread();
5618 pthread_exit(NULL);
5620 #ifdef TARGET_GPROF
5621 _mcleanup();
5622 #endif
5623 gdb_exit(cpu_env, arg1);
5624 _exit(arg1);
5625 ret = 0; /* avoid warning */
5626 break;
5627 case TARGET_NR_read:
5628 if (arg3 == 0)
5629 ret = 0;
5630 else {
5631 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5632 goto efault;
5633 ret = get_errno(read(arg1, p, arg3));
5634 unlock_user(p, arg2, ret);
5636 break;
5637 case TARGET_NR_write:
5638 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5639 goto efault;
5640 ret = get_errno(write(arg1, p, arg3));
5641 unlock_user(p, arg2, 0);
5642 break;
5643 #ifdef TARGET_NR_open
5644 case TARGET_NR_open:
5645 if (!(p = lock_user_string(arg1)))
5646 goto efault;
5647 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5648 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5649 arg3));
5650 unlock_user(p, arg1, 0);
5651 break;
5652 #endif
5653 case TARGET_NR_openat:
5654 if (!(p = lock_user_string(arg2)))
5655 goto efault;
5656 ret = get_errno(do_openat(cpu_env, arg1, p,
5657 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5658 arg4));
5659 unlock_user(p, arg2, 0);
5660 break;
5661 case TARGET_NR_close:
5662 ret = get_errno(close(arg1));
5663 break;
5664 case TARGET_NR_brk:
5665 ret = do_brk(arg1);
5666 break;
5667 #ifdef TARGET_NR_fork
5668 case TARGET_NR_fork:
5669 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5670 break;
5671 #endif
5672 #ifdef TARGET_NR_waitpid
5673 case TARGET_NR_waitpid:
5675 int status;
5676 ret = get_errno(waitpid(arg1, &status, arg3));
5677 if (!is_error(ret) && arg2 && ret
5678 && put_user_s32(host_to_target_waitstatus(status), arg2))
5679 goto efault;
5681 break;
5682 #endif
5683 #ifdef TARGET_NR_waitid
5684 case TARGET_NR_waitid:
5686 siginfo_t info;
5687 info.si_pid = 0;
5688 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5689 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5690 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5691 goto efault;
5692 host_to_target_siginfo(p, &info);
5693 unlock_user(p, arg3, sizeof(target_siginfo_t));
5696 break;
5697 #endif
5698 #ifdef TARGET_NR_creat /* not on alpha */
5699 case TARGET_NR_creat:
5700 if (!(p = lock_user_string(arg1)))
5701 goto efault;
5702 ret = get_errno(creat(p, arg2));
5703 unlock_user(p, arg1, 0);
5704 break;
5705 #endif
5706 #ifdef TARGET_NR_link
5707 case TARGET_NR_link:
5709 void * p2;
5710 p = lock_user_string(arg1);
5711 p2 = lock_user_string(arg2);
5712 if (!p || !p2)
5713 ret = -TARGET_EFAULT;
5714 else
5715 ret = get_errno(link(p, p2));
5716 unlock_user(p2, arg2, 0);
5717 unlock_user(p, arg1, 0);
5719 break;
5720 #endif
5721 #if defined(TARGET_NR_linkat)
5722 case TARGET_NR_linkat:
5724 void * p2 = NULL;
5725 if (!arg2 || !arg4)
5726 goto efault;
5727 p = lock_user_string(arg2);
5728 p2 = lock_user_string(arg4);
5729 if (!p || !p2)
5730 ret = -TARGET_EFAULT;
5731 else
5732 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5733 unlock_user(p, arg2, 0);
5734 unlock_user(p2, arg4, 0);
5736 break;
5737 #endif
5738 #ifdef TARGET_NR_unlink
5739 case TARGET_NR_unlink:
5740 if (!(p = lock_user_string(arg1)))
5741 goto efault;
5742 ret = get_errno(unlink(p));
5743 unlock_user(p, arg1, 0);
5744 break;
5745 #endif
5746 #if defined(TARGET_NR_unlinkat)
5747 case TARGET_NR_unlinkat:
5748 if (!(p = lock_user_string(arg2)))
5749 goto efault;
5750 ret = get_errno(unlinkat(arg1, p, arg3));
5751 unlock_user(p, arg2, 0);
5752 break;
5753 #endif
5754 case TARGET_NR_execve:
5756 char **argp, **envp;
5757 int argc, envc;
5758 abi_ulong gp;
5759 abi_ulong guest_argp;
5760 abi_ulong guest_envp;
5761 abi_ulong addr;
5762 char **q;
5763 int total_size = 0;
5765 argc = 0;
5766 guest_argp = arg2;
5767 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5768 if (get_user_ual(addr, gp))
5769 goto efault;
5770 if (!addr)
5771 break;
5772 argc++;
5774 envc = 0;
5775 guest_envp = arg3;
5776 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5777 if (get_user_ual(addr, gp))
5778 goto efault;
5779 if (!addr)
5780 break;
5781 envc++;
5784 argp = alloca((argc + 1) * sizeof(void *));
5785 envp = alloca((envc + 1) * sizeof(void *));
5787 for (gp = guest_argp, q = argp; gp;
5788 gp += sizeof(abi_ulong), q++) {
5789 if (get_user_ual(addr, gp))
5790 goto execve_efault;
5791 if (!addr)
5792 break;
5793 if (!(*q = lock_user_string(addr)))
5794 goto execve_efault;
5795 total_size += strlen(*q) + 1;
5797 *q = NULL;
5799 for (gp = guest_envp, q = envp; gp;
5800 gp += sizeof(abi_ulong), q++) {
5801 if (get_user_ual(addr, gp))
5802 goto execve_efault;
5803 if (!addr)
5804 break;
5805 if (!(*q = lock_user_string(addr)))
5806 goto execve_efault;
5807 total_size += strlen(*q) + 1;
5809 *q = NULL;
5811 /* This case will not be caught by the host's execve() if its
5812 page size is bigger than the target's. */
5813 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5814 ret = -TARGET_E2BIG;
5815 goto execve_end;
5817 if (!(p = lock_user_string(arg1)))
5818 goto execve_efault;
5819 ret = get_errno(execve(p, argp, envp));
5820 unlock_user(p, arg1, 0);
5822 goto execve_end;
5824 execve_efault:
5825 ret = -TARGET_EFAULT;
5827 execve_end:
5828 for (gp = guest_argp, q = argp; *q;
5829 gp += sizeof(abi_ulong), q++) {
5830 if (get_user_ual(addr, gp)
5831 || !addr)
5832 break;
5833 unlock_user(*q, addr, 0);
5835 for (gp = guest_envp, q = envp; *q;
5836 gp += sizeof(abi_ulong), q++) {
5837 if (get_user_ual(addr, gp)
5838 || !addr)
5839 break;
5840 unlock_user(*q, addr, 0);
5843 break;
5844 case TARGET_NR_chdir:
5845 if (!(p = lock_user_string(arg1)))
5846 goto efault;
5847 ret = get_errno(chdir(p));
5848 unlock_user(p, arg1, 0);
5849 break;
5850 #ifdef TARGET_NR_time
5851 case TARGET_NR_time:
5853 time_t host_time;
5854 ret = get_errno(time(&host_time));
5855 if (!is_error(ret)
5856 && arg1
5857 && put_user_sal(host_time, arg1))
5858 goto efault;
5860 break;
5861 #endif
5862 #ifdef TARGET_NR_mknod
5863 case TARGET_NR_mknod:
5864 if (!(p = lock_user_string(arg1)))
5865 goto efault;
5866 ret = get_errno(mknod(p, arg2, arg3));
5867 unlock_user(p, arg1, 0);
5868 break;
5869 #endif
5870 #if defined(TARGET_NR_mknodat)
5871 case TARGET_NR_mknodat:
5872 if (!(p = lock_user_string(arg2)))
5873 goto efault;
5874 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5875 unlock_user(p, arg2, 0);
5876 break;
5877 #endif
5878 #ifdef TARGET_NR_chmod
5879 case TARGET_NR_chmod:
5880 if (!(p = lock_user_string(arg1)))
5881 goto efault;
5882 ret = get_errno(chmod(p, arg2));
5883 unlock_user(p, arg1, 0);
5884 break;
5885 #endif
5886 #ifdef TARGET_NR_break
5887 case TARGET_NR_break:
5888 goto unimplemented;
5889 #endif
5890 #ifdef TARGET_NR_oldstat
5891 case TARGET_NR_oldstat:
5892 goto unimplemented;
5893 #endif
5894 case TARGET_NR_lseek:
5895 ret = get_errno(lseek(arg1, arg2, arg3));
5896 break;
5897 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5898 /* Alpha specific */
5899 case TARGET_NR_getxpid:
5900 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5901 ret = get_errno(getpid());
5902 break;
5903 #endif
5904 #ifdef TARGET_NR_getpid
5905 case TARGET_NR_getpid:
5906 ret = get_errno(getpid());
5907 break;
5908 #endif
5909 case TARGET_NR_mount:
5911 /* need to look at the data field */
5912 void *p2, *p3;
5914 if (arg1) {
5915 p = lock_user_string(arg1);
5916 if (!p) {
5917 goto efault;
5919 } else {
5920 p = NULL;
5923 p2 = lock_user_string(arg2);
5924 if (!p2) {
5925 if (arg1) {
5926 unlock_user(p, arg1, 0);
5928 goto efault;
5931 if (arg3) {
5932 p3 = lock_user_string(arg3);
5933 if (!p3) {
5934 if (arg1) {
5935 unlock_user(p, arg1, 0);
5937 unlock_user(p2, arg2, 0);
5938 goto efault;
5940 } else {
5941 p3 = NULL;
5944 /* FIXME - arg5 should be locked, but it isn't clear how to
5945 * do that since it's not guaranteed to be a NULL-terminated
5946 * string.
5948 if (!arg5) {
5949 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
5950 } else {
5951 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
5953 ret = get_errno(ret);
5955 if (arg1) {
5956 unlock_user(p, arg1, 0);
5958 unlock_user(p2, arg2, 0);
5959 if (arg3) {
5960 unlock_user(p3, arg3, 0);
5963 break;
5964 #ifdef TARGET_NR_umount
5965 case TARGET_NR_umount:
5966 if (!(p = lock_user_string(arg1)))
5967 goto efault;
5968 ret = get_errno(umount(p));
5969 unlock_user(p, arg1, 0);
5970 break;
5971 #endif
5972 #ifdef TARGET_NR_stime /* not on alpha */
5973 case TARGET_NR_stime:
5975 time_t host_time;
5976 if (get_user_sal(host_time, arg1))
5977 goto efault;
5978 ret = get_errno(stime(&host_time));
5980 break;
5981 #endif
5982 case TARGET_NR_ptrace:
5983 goto unimplemented;
5984 #ifdef TARGET_NR_alarm /* not on alpha */
5985 case TARGET_NR_alarm:
5986 ret = alarm(arg1);
5987 break;
5988 #endif
5989 #ifdef TARGET_NR_oldfstat
5990 case TARGET_NR_oldfstat:
5991 goto unimplemented;
5992 #endif
5993 #ifdef TARGET_NR_pause /* not on alpha */
5994 case TARGET_NR_pause:
5995 ret = get_errno(pause());
5996 break;
5997 #endif
5998 #ifdef TARGET_NR_utime
5999 case TARGET_NR_utime:
6001 struct utimbuf tbuf, *host_tbuf;
6002 struct target_utimbuf *target_tbuf;
6003 if (arg2) {
6004 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6005 goto efault;
6006 tbuf.actime = tswapal(target_tbuf->actime);
6007 tbuf.modtime = tswapal(target_tbuf->modtime);
6008 unlock_user_struct(target_tbuf, arg2, 0);
6009 host_tbuf = &tbuf;
6010 } else {
6011 host_tbuf = NULL;
6013 if (!(p = lock_user_string(arg1)))
6014 goto efault;
6015 ret = get_errno(utime(p, host_tbuf));
6016 unlock_user(p, arg1, 0);
6018 break;
6019 #endif
6020 #ifdef TARGET_NR_utimes
6021 case TARGET_NR_utimes:
6023 struct timeval *tvp, tv[2];
6024 if (arg2) {
6025 if (copy_from_user_timeval(&tv[0], arg2)
6026 || copy_from_user_timeval(&tv[1],
6027 arg2 + sizeof(struct target_timeval)))
6028 goto efault;
6029 tvp = tv;
6030 } else {
6031 tvp = NULL;
6033 if (!(p = lock_user_string(arg1)))
6034 goto efault;
6035 ret = get_errno(utimes(p, tvp));
6036 unlock_user(p, arg1, 0);
6038 break;
6039 #endif
6040 #if defined(TARGET_NR_futimesat)
6041 case TARGET_NR_futimesat:
6043 struct timeval *tvp, tv[2];
6044 if (arg3) {
6045 if (copy_from_user_timeval(&tv[0], arg3)
6046 || copy_from_user_timeval(&tv[1],
6047 arg3 + sizeof(struct target_timeval)))
6048 goto efault;
6049 tvp = tv;
6050 } else {
6051 tvp = NULL;
6053 if (!(p = lock_user_string(arg2)))
6054 goto efault;
6055 ret = get_errno(futimesat(arg1, path(p), tvp));
6056 unlock_user(p, arg2, 0);
6058 break;
6059 #endif
6060 #ifdef TARGET_NR_stty
6061 case TARGET_NR_stty:
6062 goto unimplemented;
6063 #endif
6064 #ifdef TARGET_NR_gtty
6065 case TARGET_NR_gtty:
6066 goto unimplemented;
6067 #endif
6068 #ifdef TARGET_NR_access
6069 case TARGET_NR_access:
6070 if (!(p = lock_user_string(arg1)))
6071 goto efault;
6072 ret = get_errno(access(path(p), arg2));
6073 unlock_user(p, arg1, 0);
6074 break;
6075 #endif
6076 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6077 case TARGET_NR_faccessat:
6078 if (!(p = lock_user_string(arg2)))
6079 goto efault;
6080 ret = get_errno(faccessat(arg1, p, arg3, 0));
6081 unlock_user(p, arg2, 0);
6082 break;
6083 #endif
6084 #ifdef TARGET_NR_nice /* not on alpha */
6085 case TARGET_NR_nice:
6086 ret = get_errno(nice(arg1));
6087 break;
6088 #endif
6089 #ifdef TARGET_NR_ftime
6090 case TARGET_NR_ftime:
6091 goto unimplemented;
6092 #endif
6093 case TARGET_NR_sync:
6094 sync();
6095 ret = 0;
6096 break;
6097 case TARGET_NR_kill:
6098 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6099 break;
6100 #ifdef TARGET_NR_rename
6101 case TARGET_NR_rename:
6103 void *p2;
6104 p = lock_user_string(arg1);
6105 p2 = lock_user_string(arg2);
6106 if (!p || !p2)
6107 ret = -TARGET_EFAULT;
6108 else
6109 ret = get_errno(rename(p, p2));
6110 unlock_user(p2, arg2, 0);
6111 unlock_user(p, arg1, 0);
6113 break;
6114 #endif
6115 #if defined(TARGET_NR_renameat)
6116 case TARGET_NR_renameat:
6118 void *p2;
6119 p = lock_user_string(arg2);
6120 p2 = lock_user_string(arg4);
6121 if (!p || !p2)
6122 ret = -TARGET_EFAULT;
6123 else
6124 ret = get_errno(renameat(arg1, p, arg3, p2));
6125 unlock_user(p2, arg4, 0);
6126 unlock_user(p, arg2, 0);
6128 break;
6129 #endif
6130 #ifdef TARGET_NR_mkdir
6131 case TARGET_NR_mkdir:
6132 if (!(p = lock_user_string(arg1)))
6133 goto efault;
6134 ret = get_errno(mkdir(p, arg2));
6135 unlock_user(p, arg1, 0);
6136 break;
6137 #endif
6138 #if defined(TARGET_NR_mkdirat)
6139 case TARGET_NR_mkdirat:
6140 if (!(p = lock_user_string(arg2)))
6141 goto efault;
6142 ret = get_errno(mkdirat(arg1, p, arg3));
6143 unlock_user(p, arg2, 0);
6144 break;
6145 #endif
6146 #ifdef TARGET_NR_rmdir
6147 case TARGET_NR_rmdir:
6148 if (!(p = lock_user_string(arg1)))
6149 goto efault;
6150 ret = get_errno(rmdir(p));
6151 unlock_user(p, arg1, 0);
6152 break;
6153 #endif
6154 case TARGET_NR_dup:
6155 ret = get_errno(dup(arg1));
6156 break;
6157 #ifdef TARGET_NR_pipe
6158 case TARGET_NR_pipe:
6159 ret = do_pipe(cpu_env, arg1, 0, 0);
6160 break;
6161 #endif
6162 #ifdef TARGET_NR_pipe2
6163 case TARGET_NR_pipe2:
6164 ret = do_pipe(cpu_env, arg1,
6165 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6166 break;
6167 #endif
6168 case TARGET_NR_times:
6170 struct target_tms *tmsp;
6171 struct tms tms;
6172 ret = get_errno(times(&tms));
6173 if (arg1) {
6174 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6175 if (!tmsp)
6176 goto efault;
6177 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6178 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6179 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6180 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6182 if (!is_error(ret))
6183 ret = host_to_target_clock_t(ret);
6185 break;
6186 #ifdef TARGET_NR_prof
6187 case TARGET_NR_prof:
6188 goto unimplemented;
6189 #endif
6190 #ifdef TARGET_NR_signal
6191 case TARGET_NR_signal:
6192 goto unimplemented;
6193 #endif
6194 case TARGET_NR_acct:
6195 if (arg1 == 0) {
6196 ret = get_errno(acct(NULL));
6197 } else {
6198 if (!(p = lock_user_string(arg1)))
6199 goto efault;
6200 ret = get_errno(acct(path(p)));
6201 unlock_user(p, arg1, 0);
6203 break;
6204 #ifdef TARGET_NR_umount2
6205 case TARGET_NR_umount2:
6206 if (!(p = lock_user_string(arg1)))
6207 goto efault;
6208 ret = get_errno(umount2(p, arg2));
6209 unlock_user(p, arg1, 0);
6210 break;
6211 #endif
6212 #ifdef TARGET_NR_lock
6213 case TARGET_NR_lock:
6214 goto unimplemented;
6215 #endif
6216 case TARGET_NR_ioctl:
6217 ret = do_ioctl(arg1, arg2, arg3);
6218 break;
6219 case TARGET_NR_fcntl:
6220 ret = do_fcntl(arg1, arg2, arg3);
6221 break;
6222 #ifdef TARGET_NR_mpx
6223 case TARGET_NR_mpx:
6224 goto unimplemented;
6225 #endif
6226 case TARGET_NR_setpgid:
6227 ret = get_errno(setpgid(arg1, arg2));
6228 break;
6229 #ifdef TARGET_NR_ulimit
6230 case TARGET_NR_ulimit:
6231 goto unimplemented;
6232 #endif
6233 #ifdef TARGET_NR_oldolduname
6234 case TARGET_NR_oldolduname:
6235 goto unimplemented;
6236 #endif
6237 case TARGET_NR_umask:
6238 ret = get_errno(umask(arg1));
6239 break;
6240 case TARGET_NR_chroot:
6241 if (!(p = lock_user_string(arg1)))
6242 goto efault;
6243 ret = get_errno(chroot(p));
6244 unlock_user(p, arg1, 0);
6245 break;
6246 #ifdef TARGET_NR_ustat
6247 case TARGET_NR_ustat:
6248 goto unimplemented;
6249 #endif
6250 #ifdef TARGET_NR_dup2
6251 case TARGET_NR_dup2:
6252 ret = get_errno(dup2(arg1, arg2));
6253 break;
6254 #endif
6255 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6256 case TARGET_NR_dup3:
6257 ret = get_errno(dup3(arg1, arg2, arg3));
6258 break;
6259 #endif
6260 #ifdef TARGET_NR_getppid /* not on alpha */
6261 case TARGET_NR_getppid:
6262 ret = get_errno(getppid());
6263 break;
6264 #endif
6265 #ifdef TARGET_NR_getpgrp
6266 case TARGET_NR_getpgrp:
6267 ret = get_errno(getpgrp());
6268 break;
6269 #endif
6270 case TARGET_NR_setsid:
6271 ret = get_errno(setsid());
6272 break;
6273 #ifdef TARGET_NR_sigaction
6274 case TARGET_NR_sigaction:
6276 #if defined(TARGET_ALPHA)
6277 struct target_sigaction act, oact, *pact = 0;
6278 struct target_old_sigaction *old_act;
6279 if (arg2) {
6280 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6281 goto efault;
6282 act._sa_handler = old_act->_sa_handler;
6283 target_siginitset(&act.sa_mask, old_act->sa_mask);
6284 act.sa_flags = old_act->sa_flags;
6285 act.sa_restorer = 0;
6286 unlock_user_struct(old_act, arg2, 0);
6287 pact = &act;
6289 ret = get_errno(do_sigaction(arg1, pact, &oact));
6290 if (!is_error(ret) && arg3) {
6291 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6292 goto efault;
6293 old_act->_sa_handler = oact._sa_handler;
6294 old_act->sa_mask = oact.sa_mask.sig[0];
6295 old_act->sa_flags = oact.sa_flags;
6296 unlock_user_struct(old_act, arg3, 1);
6298 #elif defined(TARGET_MIPS)
6299 struct target_sigaction act, oact, *pact, *old_act;
6301 if (arg2) {
6302 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6303 goto efault;
6304 act._sa_handler = old_act->_sa_handler;
6305 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6306 act.sa_flags = old_act->sa_flags;
6307 unlock_user_struct(old_act, arg2, 0);
6308 pact = &act;
6309 } else {
6310 pact = NULL;
6313 ret = get_errno(do_sigaction(arg1, pact, &oact));
6315 if (!is_error(ret) && arg3) {
6316 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6317 goto efault;
6318 old_act->_sa_handler = oact._sa_handler;
6319 old_act->sa_flags = oact.sa_flags;
6320 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6321 old_act->sa_mask.sig[1] = 0;
6322 old_act->sa_mask.sig[2] = 0;
6323 old_act->sa_mask.sig[3] = 0;
6324 unlock_user_struct(old_act, arg3, 1);
6326 #else
6327 struct target_old_sigaction *old_act;
6328 struct target_sigaction act, oact, *pact;
6329 if (arg2) {
6330 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6331 goto efault;
6332 act._sa_handler = old_act->_sa_handler;
6333 target_siginitset(&act.sa_mask, old_act->sa_mask);
6334 act.sa_flags = old_act->sa_flags;
6335 act.sa_restorer = old_act->sa_restorer;
6336 unlock_user_struct(old_act, arg2, 0);
6337 pact = &act;
6338 } else {
6339 pact = NULL;
6341 ret = get_errno(do_sigaction(arg1, pact, &oact));
6342 if (!is_error(ret) && arg3) {
6343 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6344 goto efault;
6345 old_act->_sa_handler = oact._sa_handler;
6346 old_act->sa_mask = oact.sa_mask.sig[0];
6347 old_act->sa_flags = oact.sa_flags;
6348 old_act->sa_restorer = oact.sa_restorer;
6349 unlock_user_struct(old_act, arg3, 1);
6351 #endif
6353 break;
6354 #endif
6355 case TARGET_NR_rt_sigaction:
6357 #if defined(TARGET_ALPHA)
6358 struct target_sigaction act, oact, *pact = 0;
6359 struct target_rt_sigaction *rt_act;
6360 /* ??? arg4 == sizeof(sigset_t). */
6361 if (arg2) {
6362 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6363 goto efault;
6364 act._sa_handler = rt_act->_sa_handler;
6365 act.sa_mask = rt_act->sa_mask;
6366 act.sa_flags = rt_act->sa_flags;
6367 act.sa_restorer = arg5;
6368 unlock_user_struct(rt_act, arg2, 0);
6369 pact = &act;
6371 ret = get_errno(do_sigaction(arg1, pact, &oact));
6372 if (!is_error(ret) && arg3) {
6373 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6374 goto efault;
6375 rt_act->_sa_handler = oact._sa_handler;
6376 rt_act->sa_mask = oact.sa_mask;
6377 rt_act->sa_flags = oact.sa_flags;
6378 unlock_user_struct(rt_act, arg3, 1);
6380 #else
6381 struct target_sigaction *act;
6382 struct target_sigaction *oact;
6384 if (arg2) {
6385 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6386 goto efault;
6387 } else
6388 act = NULL;
6389 if (arg3) {
6390 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6391 ret = -TARGET_EFAULT;
6392 goto rt_sigaction_fail;
6394 } else
6395 oact = NULL;
6396 ret = get_errno(do_sigaction(arg1, act, oact));
6397 rt_sigaction_fail:
6398 if (act)
6399 unlock_user_struct(act, arg2, 0);
6400 if (oact)
6401 unlock_user_struct(oact, arg3, 1);
6402 #endif
6404 break;
6405 #ifdef TARGET_NR_sgetmask /* not on alpha */
6406 case TARGET_NR_sgetmask:
6408 sigset_t cur_set;
6409 abi_ulong target_set;
6410 do_sigprocmask(0, NULL, &cur_set);
6411 host_to_target_old_sigset(&target_set, &cur_set);
6412 ret = target_set;
6414 break;
6415 #endif
6416 #ifdef TARGET_NR_ssetmask /* not on alpha */
6417 case TARGET_NR_ssetmask:
6419 sigset_t set, oset, cur_set;
6420 abi_ulong target_set = arg1;
6421 do_sigprocmask(0, NULL, &cur_set);
6422 target_to_host_old_sigset(&set, &target_set);
6423 sigorset(&set, &set, &cur_set);
6424 do_sigprocmask(SIG_SETMASK, &set, &oset);
6425 host_to_target_old_sigset(&target_set, &oset);
6426 ret = target_set;
6428 break;
6429 #endif
6430 #ifdef TARGET_NR_sigprocmask
6431 case TARGET_NR_sigprocmask:
6433 #if defined(TARGET_ALPHA)
6434 sigset_t set, oldset;
6435 abi_ulong mask;
6436 int how;
6438 switch (arg1) {
6439 case TARGET_SIG_BLOCK:
6440 how = SIG_BLOCK;
6441 break;
6442 case TARGET_SIG_UNBLOCK:
6443 how = SIG_UNBLOCK;
6444 break;
6445 case TARGET_SIG_SETMASK:
6446 how = SIG_SETMASK;
6447 break;
6448 default:
6449 ret = -TARGET_EINVAL;
6450 goto fail;
6452 mask = arg2;
6453 target_to_host_old_sigset(&set, &mask);
6455 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6456 if (!is_error(ret)) {
6457 host_to_target_old_sigset(&mask, &oldset);
6458 ret = mask;
6459 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6461 #else
6462 sigset_t set, oldset, *set_ptr;
6463 int how;
6465 if (arg2) {
6466 switch (arg1) {
6467 case TARGET_SIG_BLOCK:
6468 how = SIG_BLOCK;
6469 break;
6470 case TARGET_SIG_UNBLOCK:
6471 how = SIG_UNBLOCK;
6472 break;
6473 case TARGET_SIG_SETMASK:
6474 how = SIG_SETMASK;
6475 break;
6476 default:
6477 ret = -TARGET_EINVAL;
6478 goto fail;
6480 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6481 goto efault;
6482 target_to_host_old_sigset(&set, p);
6483 unlock_user(p, arg2, 0);
6484 set_ptr = &set;
6485 } else {
6486 how = 0;
6487 set_ptr = NULL;
6489 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6490 if (!is_error(ret) && arg3) {
6491 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6492 goto efault;
6493 host_to_target_old_sigset(p, &oldset);
6494 unlock_user(p, arg3, sizeof(target_sigset_t));
6496 #endif
6498 break;
6499 #endif
6500 case TARGET_NR_rt_sigprocmask:
6502 int how = arg1;
6503 sigset_t set, oldset, *set_ptr;
6505 if (arg2) {
6506 switch(how) {
6507 case TARGET_SIG_BLOCK:
6508 how = SIG_BLOCK;
6509 break;
6510 case TARGET_SIG_UNBLOCK:
6511 how = SIG_UNBLOCK;
6512 break;
6513 case TARGET_SIG_SETMASK:
6514 how = SIG_SETMASK;
6515 break;
6516 default:
6517 ret = -TARGET_EINVAL;
6518 goto fail;
6520 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6521 goto efault;
6522 target_to_host_sigset(&set, p);
6523 unlock_user(p, arg2, 0);
6524 set_ptr = &set;
6525 } else {
6526 how = 0;
6527 set_ptr = NULL;
6529 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6530 if (!is_error(ret) && arg3) {
6531 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6532 goto efault;
6533 host_to_target_sigset(p, &oldset);
6534 unlock_user(p, arg3, sizeof(target_sigset_t));
6537 break;
6538 #ifdef TARGET_NR_sigpending
6539 case TARGET_NR_sigpending:
6541 sigset_t set;
6542 ret = get_errno(sigpending(&set));
6543 if (!is_error(ret)) {
6544 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6545 goto efault;
6546 host_to_target_old_sigset(p, &set);
6547 unlock_user(p, arg1, sizeof(target_sigset_t));
6550 break;
6551 #endif
6552 case TARGET_NR_rt_sigpending:
6554 sigset_t set;
6555 ret = get_errno(sigpending(&set));
6556 if (!is_error(ret)) {
6557 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6558 goto efault;
6559 host_to_target_sigset(p, &set);
6560 unlock_user(p, arg1, sizeof(target_sigset_t));
6563 break;
6564 #ifdef TARGET_NR_sigsuspend
6565 case TARGET_NR_sigsuspend:
6567 sigset_t set;
6568 #if defined(TARGET_ALPHA)
6569 abi_ulong mask = arg1;
6570 target_to_host_old_sigset(&set, &mask);
6571 #else
6572 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6573 goto efault;
6574 target_to_host_old_sigset(&set, p);
6575 unlock_user(p, arg1, 0);
6576 #endif
6577 ret = get_errno(sigsuspend(&set));
6579 break;
6580 #endif
6581 case TARGET_NR_rt_sigsuspend:
6583 sigset_t set;
6584 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6585 goto efault;
6586 target_to_host_sigset(&set, p);
6587 unlock_user(p, arg1, 0);
6588 ret = get_errno(sigsuspend(&set));
6590 break;
6591 case TARGET_NR_rt_sigtimedwait:
6593 sigset_t set;
6594 struct timespec uts, *puts;
6595 siginfo_t uinfo;
6597 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6598 goto efault;
6599 target_to_host_sigset(&set, p);
6600 unlock_user(p, arg1, 0);
6601 if (arg3) {
6602 puts = &uts;
6603 target_to_host_timespec(puts, arg3);
6604 } else {
6605 puts = NULL;
6607 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6608 if (!is_error(ret)) {
6609 if (arg2) {
6610 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6612 if (!p) {
6613 goto efault;
6615 host_to_target_siginfo(p, &uinfo);
6616 unlock_user(p, arg2, sizeof(target_siginfo_t));
6618 ret = host_to_target_signal(ret);
6621 break;
6622 case TARGET_NR_rt_sigqueueinfo:
6624 siginfo_t uinfo;
6625 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6626 goto efault;
6627 target_to_host_siginfo(&uinfo, p);
6628 unlock_user(p, arg1, 0);
6629 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6631 break;
6632 #ifdef TARGET_NR_sigreturn
6633 case TARGET_NR_sigreturn:
6634 /* NOTE: ret is eax, so not transcoding must be done */
6635 ret = do_sigreturn(cpu_env);
6636 break;
6637 #endif
6638 case TARGET_NR_rt_sigreturn:
6639 /* NOTE: ret is eax, so not transcoding must be done */
6640 ret = do_rt_sigreturn(cpu_env);
6641 break;
6642 case TARGET_NR_sethostname:
6643 if (!(p = lock_user_string(arg1)))
6644 goto efault;
6645 ret = get_errno(sethostname(p, arg2));
6646 unlock_user(p, arg1, 0);
6647 break;
6648 case TARGET_NR_setrlimit:
6650 int resource = target_to_host_resource(arg1);
6651 struct target_rlimit *target_rlim;
6652 struct rlimit rlim;
6653 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6654 goto efault;
6655 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6656 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6657 unlock_user_struct(target_rlim, arg2, 0);
6658 ret = get_errno(setrlimit(resource, &rlim));
6660 break;
6661 case TARGET_NR_getrlimit:
6663 int resource = target_to_host_resource(arg1);
6664 struct target_rlimit *target_rlim;
6665 struct rlimit rlim;
6667 ret = get_errno(getrlimit(resource, &rlim));
6668 if (!is_error(ret)) {
6669 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6670 goto efault;
6671 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6672 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6673 unlock_user_struct(target_rlim, arg2, 1);
6676 break;
6677 case TARGET_NR_getrusage:
6679 struct rusage rusage;
6680 ret = get_errno(getrusage(arg1, &rusage));
6681 if (!is_error(ret)) {
6682 ret = host_to_target_rusage(arg2, &rusage);
6685 break;
6686 case TARGET_NR_gettimeofday:
6688 struct timeval tv;
6689 ret = get_errno(gettimeofday(&tv, NULL));
6690 if (!is_error(ret)) {
6691 if (copy_to_user_timeval(arg1, &tv))
6692 goto efault;
6695 break;
6696 case TARGET_NR_settimeofday:
6698 struct timeval tv, *ptv = NULL;
6699 struct timezone tz, *ptz = NULL;
6701 if (arg1) {
6702 if (copy_from_user_timeval(&tv, arg1)) {
6703 goto efault;
6705 ptv = &tv;
6708 if (arg2) {
6709 if (copy_from_user_timezone(&tz, arg2)) {
6710 goto efault;
6712 ptz = &tz;
6715 ret = get_errno(settimeofday(ptv, ptz));
6717 break;
6718 #if defined(TARGET_NR_select)
6719 case TARGET_NR_select:
6720 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6721 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6722 #else
6724 struct target_sel_arg_struct *sel;
6725 abi_ulong inp, outp, exp, tvp;
6726 long nsel;
6728 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6729 goto efault;
6730 nsel = tswapal(sel->n);
6731 inp = tswapal(sel->inp);
6732 outp = tswapal(sel->outp);
6733 exp = tswapal(sel->exp);
6734 tvp = tswapal(sel->tvp);
6735 unlock_user_struct(sel, arg1, 0);
6736 ret = do_select(nsel, inp, outp, exp, tvp);
6738 #endif
6739 break;
6740 #endif
6741 #ifdef TARGET_NR_pselect6
6742 case TARGET_NR_pselect6:
6744 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6745 fd_set rfds, wfds, efds;
6746 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6747 struct timespec ts, *ts_ptr;
6750 * The 6th arg is actually two args smashed together,
6751 * so we cannot use the C library.
6753 sigset_t set;
6754 struct {
6755 sigset_t *set;
6756 size_t size;
6757 } sig, *sig_ptr;
6759 abi_ulong arg_sigset, arg_sigsize, *arg7;
6760 target_sigset_t *target_sigset;
6762 n = arg1;
6763 rfd_addr = arg2;
6764 wfd_addr = arg3;
6765 efd_addr = arg4;
6766 ts_addr = arg5;
6768 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6769 if (ret) {
6770 goto fail;
6772 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6773 if (ret) {
6774 goto fail;
6776 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6777 if (ret) {
6778 goto fail;
6782 * This takes a timespec, and not a timeval, so we cannot
6783 * use the do_select() helper ...
6785 if (ts_addr) {
6786 if (target_to_host_timespec(&ts, ts_addr)) {
6787 goto efault;
6789 ts_ptr = &ts;
6790 } else {
6791 ts_ptr = NULL;
6794 /* Extract the two packed args for the sigset */
6795 if (arg6) {
6796 sig_ptr = &sig;
6797 sig.size = _NSIG / 8;
6799 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6800 if (!arg7) {
6801 goto efault;
6803 arg_sigset = tswapal(arg7[0]);
6804 arg_sigsize = tswapal(arg7[1]);
6805 unlock_user(arg7, arg6, 0);
6807 if (arg_sigset) {
6808 sig.set = &set;
6809 if (arg_sigsize != sizeof(*target_sigset)) {
6810 /* Like the kernel, we enforce correct size sigsets */
6811 ret = -TARGET_EINVAL;
6812 goto fail;
6814 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6815 sizeof(*target_sigset), 1);
6816 if (!target_sigset) {
6817 goto efault;
6819 target_to_host_sigset(&set, target_sigset);
6820 unlock_user(target_sigset, arg_sigset, 0);
6821 } else {
6822 sig.set = NULL;
6824 } else {
6825 sig_ptr = NULL;
6828 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6829 ts_ptr, sig_ptr));
6831 if (!is_error(ret)) {
6832 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6833 goto efault;
6834 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6835 goto efault;
6836 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6837 goto efault;
6839 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6840 goto efault;
6843 break;
6844 #endif
6845 #ifdef TARGET_NR_symlink
6846 case TARGET_NR_symlink:
6848 void *p2;
6849 p = lock_user_string(arg1);
6850 p2 = lock_user_string(arg2);
6851 if (!p || !p2)
6852 ret = -TARGET_EFAULT;
6853 else
6854 ret = get_errno(symlink(p, p2));
6855 unlock_user(p2, arg2, 0);
6856 unlock_user(p, arg1, 0);
6858 break;
6859 #endif
6860 #if defined(TARGET_NR_symlinkat)
6861 case TARGET_NR_symlinkat:
6863 void *p2;
6864 p = lock_user_string(arg1);
6865 p2 = lock_user_string(arg3);
6866 if (!p || !p2)
6867 ret = -TARGET_EFAULT;
6868 else
6869 ret = get_errno(symlinkat(p, arg2, p2));
6870 unlock_user(p2, arg3, 0);
6871 unlock_user(p, arg1, 0);
6873 break;
6874 #endif
6875 #ifdef TARGET_NR_oldlstat
6876 case TARGET_NR_oldlstat:
6877 goto unimplemented;
6878 #endif
6879 #ifdef TARGET_NR_readlink
6880 case TARGET_NR_readlink:
6882 void *p2;
6883 p = lock_user_string(arg1);
6884 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6885 if (!p || !p2) {
6886 ret = -TARGET_EFAULT;
6887 } else if (!arg3) {
6888 /* Short circuit this for the magic exe check. */
6889 ret = -TARGET_EINVAL;
6890 } else if (is_proc_myself((const char *)p, "exe")) {
6891 char real[PATH_MAX], *temp;
6892 temp = realpath(exec_path, real);
6893 /* Return value is # of bytes that we wrote to the buffer. */
6894 if (temp == NULL) {
6895 ret = get_errno(-1);
6896 } else {
6897 /* Don't worry about sign mismatch as earlier mapping
6898 * logic would have thrown a bad address error. */
6899 ret = MIN(strlen(real), arg3);
6900 /* We cannot NUL terminate the string. */
6901 memcpy(p2, real, ret);
6903 } else {
6904 ret = get_errno(readlink(path(p), p2, arg3));
6906 unlock_user(p2, arg2, ret);
6907 unlock_user(p, arg1, 0);
6909 break;
6910 #endif
6911 #if defined(TARGET_NR_readlinkat)
6912 case TARGET_NR_readlinkat:
6914 void *p2;
6915 p = lock_user_string(arg2);
6916 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6917 if (!p || !p2) {
6918 ret = -TARGET_EFAULT;
6919 } else if (is_proc_myself((const char *)p, "exe")) {
6920 char real[PATH_MAX], *temp;
6921 temp = realpath(exec_path, real);
6922 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6923 snprintf((char *)p2, arg4, "%s", real);
6924 } else {
6925 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6927 unlock_user(p2, arg3, ret);
6928 unlock_user(p, arg2, 0);
6930 break;
6931 #endif
6932 #ifdef TARGET_NR_uselib
6933 case TARGET_NR_uselib:
6934 goto unimplemented;
6935 #endif
6936 #ifdef TARGET_NR_swapon
6937 case TARGET_NR_swapon:
6938 if (!(p = lock_user_string(arg1)))
6939 goto efault;
6940 ret = get_errno(swapon(p, arg2));
6941 unlock_user(p, arg1, 0);
6942 break;
6943 #endif
6944 case TARGET_NR_reboot:
6945 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6946 /* arg4 must be ignored in all other cases */
6947 p = lock_user_string(arg4);
6948 if (!p) {
6949 goto efault;
6951 ret = get_errno(reboot(arg1, arg2, arg3, p));
6952 unlock_user(p, arg4, 0);
6953 } else {
6954 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6956 break;
6957 #ifdef TARGET_NR_readdir
6958 case TARGET_NR_readdir:
6959 goto unimplemented;
6960 #endif
6961 #ifdef TARGET_NR_mmap
6962 case TARGET_NR_mmap:
6963 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6964 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6965 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6966 || defined(TARGET_S390X)
6968 abi_ulong *v;
6969 abi_ulong v1, v2, v3, v4, v5, v6;
6970 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6971 goto efault;
6972 v1 = tswapal(v[0]);
6973 v2 = tswapal(v[1]);
6974 v3 = tswapal(v[2]);
6975 v4 = tswapal(v[3]);
6976 v5 = tswapal(v[4]);
6977 v6 = tswapal(v[5]);
6978 unlock_user(v, arg1, 0);
6979 ret = get_errno(target_mmap(v1, v2, v3,
6980 target_to_host_bitmask(v4, mmap_flags_tbl),
6981 v5, v6));
6983 #else
6984 ret = get_errno(target_mmap(arg1, arg2, arg3,
6985 target_to_host_bitmask(arg4, mmap_flags_tbl),
6986 arg5,
6987 arg6));
6988 #endif
6989 break;
6990 #endif
6991 #ifdef TARGET_NR_mmap2
6992 case TARGET_NR_mmap2:
6993 #ifndef MMAP_SHIFT
6994 #define MMAP_SHIFT 12
6995 #endif
6996 ret = get_errno(target_mmap(arg1, arg2, arg3,
6997 target_to_host_bitmask(arg4, mmap_flags_tbl),
6998 arg5,
6999 arg6 << MMAP_SHIFT));
7000 break;
7001 #endif
7002 case TARGET_NR_munmap:
7003 ret = get_errno(target_munmap(arg1, arg2));
7004 break;
7005 case TARGET_NR_mprotect:
7007 TaskState *ts = cpu->opaque;
7008 /* Special hack to detect libc making the stack executable. */
7009 if ((arg3 & PROT_GROWSDOWN)
7010 && arg1 >= ts->info->stack_limit
7011 && arg1 <= ts->info->start_stack) {
7012 arg3 &= ~PROT_GROWSDOWN;
7013 arg2 = arg2 + arg1 - ts->info->stack_limit;
7014 arg1 = ts->info->stack_limit;
7017 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7018 break;
7019 #ifdef TARGET_NR_mremap
7020 case TARGET_NR_mremap:
7021 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7022 break;
7023 #endif
7024 /* ??? msync/mlock/munlock are broken for softmmu. */
7025 #ifdef TARGET_NR_msync
7026 case TARGET_NR_msync:
7027 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7028 break;
7029 #endif
7030 #ifdef TARGET_NR_mlock
7031 case TARGET_NR_mlock:
7032 ret = get_errno(mlock(g2h(arg1), arg2));
7033 break;
7034 #endif
7035 #ifdef TARGET_NR_munlock
7036 case TARGET_NR_munlock:
7037 ret = get_errno(munlock(g2h(arg1), arg2));
7038 break;
7039 #endif
7040 #ifdef TARGET_NR_mlockall
7041 case TARGET_NR_mlockall:
7042 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7043 break;
7044 #endif
7045 #ifdef TARGET_NR_munlockall
7046 case TARGET_NR_munlockall:
7047 ret = get_errno(munlockall());
7048 break;
7049 #endif
7050 case TARGET_NR_truncate:
7051 if (!(p = lock_user_string(arg1)))
7052 goto efault;
7053 ret = get_errno(truncate(p, arg2));
7054 unlock_user(p, arg1, 0);
7055 break;
7056 case TARGET_NR_ftruncate:
7057 ret = get_errno(ftruncate(arg1, arg2));
7058 break;
7059 case TARGET_NR_fchmod:
7060 ret = get_errno(fchmod(arg1, arg2));
7061 break;
7062 #if defined(TARGET_NR_fchmodat)
7063 case TARGET_NR_fchmodat:
7064 if (!(p = lock_user_string(arg2)))
7065 goto efault;
7066 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7067 unlock_user(p, arg2, 0);
7068 break;
7069 #endif
7070 case TARGET_NR_getpriority:
7071 /* Note that negative values are valid for getpriority, so we must
7072 differentiate based on errno settings. */
7073 errno = 0;
7074 ret = getpriority(arg1, arg2);
7075 if (ret == -1 && errno != 0) {
7076 ret = -host_to_target_errno(errno);
7077 break;
7079 #ifdef TARGET_ALPHA
7080 /* Return value is the unbiased priority. Signal no error. */
7081 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7082 #else
7083 /* Return value is a biased priority to avoid negative numbers. */
7084 ret = 20 - ret;
7085 #endif
7086 break;
7087 case TARGET_NR_setpriority:
7088 ret = get_errno(setpriority(arg1, arg2, arg3));
7089 break;
7090 #ifdef TARGET_NR_profil
7091 case TARGET_NR_profil:
7092 goto unimplemented;
7093 #endif
7094 case TARGET_NR_statfs:
7095 if (!(p = lock_user_string(arg1)))
7096 goto efault;
7097 ret = get_errno(statfs(path(p), &stfs));
7098 unlock_user(p, arg1, 0);
7099 convert_statfs:
7100 if (!is_error(ret)) {
7101 struct target_statfs *target_stfs;
7103 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7104 goto efault;
7105 __put_user(stfs.f_type, &target_stfs->f_type);
7106 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7107 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7108 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7109 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7110 __put_user(stfs.f_files, &target_stfs->f_files);
7111 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7112 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7113 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7114 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7115 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7116 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7117 unlock_user_struct(target_stfs, arg2, 1);
7119 break;
7120 case TARGET_NR_fstatfs:
7121 ret = get_errno(fstatfs(arg1, &stfs));
7122 goto convert_statfs;
7123 #ifdef TARGET_NR_statfs64
7124 case TARGET_NR_statfs64:
7125 if (!(p = lock_user_string(arg1)))
7126 goto efault;
7127 ret = get_errno(statfs(path(p), &stfs));
7128 unlock_user(p, arg1, 0);
7129 convert_statfs64:
7130 if (!is_error(ret)) {
7131 struct target_statfs64 *target_stfs;
7133 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7134 goto efault;
7135 __put_user(stfs.f_type, &target_stfs->f_type);
7136 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7137 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7138 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7139 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7140 __put_user(stfs.f_files, &target_stfs->f_files);
7141 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7142 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7143 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7144 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7145 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7146 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7147 unlock_user_struct(target_stfs, arg3, 1);
7149 break;
7150 case TARGET_NR_fstatfs64:
7151 ret = get_errno(fstatfs(arg1, &stfs));
7152 goto convert_statfs64;
7153 #endif
7154 #ifdef TARGET_NR_ioperm
7155 case TARGET_NR_ioperm:
7156 goto unimplemented;
7157 #endif
7158 #ifdef TARGET_NR_socketcall
7159 case TARGET_NR_socketcall:
7160 ret = do_socketcall(arg1, arg2);
7161 break;
7162 #endif
7163 #ifdef TARGET_NR_accept
7164 case TARGET_NR_accept:
7165 ret = do_accept4(arg1, arg2, arg3, 0);
7166 break;
7167 #endif
7168 #ifdef TARGET_NR_accept4
7169 case TARGET_NR_accept4:
7170 #ifdef CONFIG_ACCEPT4
7171 ret = do_accept4(arg1, arg2, arg3, arg4);
7172 #else
7173 goto unimplemented;
7174 #endif
7175 break;
7176 #endif
7177 #ifdef TARGET_NR_bind
7178 case TARGET_NR_bind:
7179 ret = do_bind(arg1, arg2, arg3);
7180 break;
7181 #endif
7182 #ifdef TARGET_NR_connect
7183 case TARGET_NR_connect:
7184 ret = do_connect(arg1, arg2, arg3);
7185 break;
7186 #endif
7187 #ifdef TARGET_NR_getpeername
7188 case TARGET_NR_getpeername:
7189 ret = do_getpeername(arg1, arg2, arg3);
7190 break;
7191 #endif
7192 #ifdef TARGET_NR_getsockname
7193 case TARGET_NR_getsockname:
7194 ret = do_getsockname(arg1, arg2, arg3);
7195 break;
7196 #endif
7197 #ifdef TARGET_NR_getsockopt
7198 case TARGET_NR_getsockopt:
7199 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7200 break;
7201 #endif
7202 #ifdef TARGET_NR_listen
7203 case TARGET_NR_listen:
7204 ret = get_errno(listen(arg1, arg2));
7205 break;
7206 #endif
7207 #ifdef TARGET_NR_recv
7208 case TARGET_NR_recv:
7209 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7210 break;
7211 #endif
7212 #ifdef TARGET_NR_recvfrom
7213 case TARGET_NR_recvfrom:
7214 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7215 break;
7216 #endif
7217 #ifdef TARGET_NR_recvmsg
7218 case TARGET_NR_recvmsg:
7219 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7220 break;
7221 #endif
7222 #ifdef TARGET_NR_send
7223 case TARGET_NR_send:
7224 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7225 break;
7226 #endif
7227 #ifdef TARGET_NR_sendmsg
7228 case TARGET_NR_sendmsg:
7229 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7230 break;
7231 #endif
7232 #ifdef TARGET_NR_sendmmsg
7233 case TARGET_NR_sendmmsg:
7234 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7235 break;
7236 case TARGET_NR_recvmmsg:
7237 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7238 break;
7239 #endif
7240 #ifdef TARGET_NR_sendto
7241 case TARGET_NR_sendto:
7242 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7243 break;
7244 #endif
7245 #ifdef TARGET_NR_shutdown
7246 case TARGET_NR_shutdown:
7247 ret = get_errno(shutdown(arg1, arg2));
7248 break;
7249 #endif
7250 #ifdef TARGET_NR_socket
7251 case TARGET_NR_socket:
7252 ret = do_socket(arg1, arg2, arg3);
7253 break;
7254 #endif
7255 #ifdef TARGET_NR_socketpair
7256 case TARGET_NR_socketpair:
7257 ret = do_socketpair(arg1, arg2, arg3, arg4);
7258 break;
7259 #endif
7260 #ifdef TARGET_NR_setsockopt
7261 case TARGET_NR_setsockopt:
7262 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7263 break;
7264 #endif
7266 case TARGET_NR_syslog:
7267 if (!(p = lock_user_string(arg2)))
7268 goto efault;
7269 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7270 unlock_user(p, arg2, 0);
7271 break;
7273 case TARGET_NR_setitimer:
7275 struct itimerval value, ovalue, *pvalue;
7277 if (arg2) {
7278 pvalue = &value;
7279 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7280 || copy_from_user_timeval(&pvalue->it_value,
7281 arg2 + sizeof(struct target_timeval)))
7282 goto efault;
7283 } else {
7284 pvalue = NULL;
7286 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7287 if (!is_error(ret) && arg3) {
7288 if (copy_to_user_timeval(arg3,
7289 &ovalue.it_interval)
7290 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7291 &ovalue.it_value))
7292 goto efault;
7295 break;
7296 case TARGET_NR_getitimer:
7298 struct itimerval value;
7300 ret = get_errno(getitimer(arg1, &value));
7301 if (!is_error(ret) && arg2) {
7302 if (copy_to_user_timeval(arg2,
7303 &value.it_interval)
7304 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7305 &value.it_value))
7306 goto efault;
7309 break;
7310 #ifdef TARGET_NR_stat
7311 case TARGET_NR_stat:
7312 if (!(p = lock_user_string(arg1)))
7313 goto efault;
7314 ret = get_errno(stat(path(p), &st));
7315 unlock_user(p, arg1, 0);
7316 goto do_stat;
7317 #endif
7318 #ifdef TARGET_NR_lstat
7319 case TARGET_NR_lstat:
7320 if (!(p = lock_user_string(arg1)))
7321 goto efault;
7322 ret = get_errno(lstat(path(p), &st));
7323 unlock_user(p, arg1, 0);
7324 goto do_stat;
7325 #endif
7326 case TARGET_NR_fstat:
7328 ret = get_errno(fstat(arg1, &st));
7329 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7330 do_stat:
7331 #endif
7332 if (!is_error(ret)) {
7333 struct target_stat *target_st;
7335 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7336 goto efault;
7337 memset(target_st, 0, sizeof(*target_st));
7338 __put_user(st.st_dev, &target_st->st_dev);
7339 __put_user(st.st_ino, &target_st->st_ino);
7340 __put_user(st.st_mode, &target_st->st_mode);
7341 __put_user(st.st_uid, &target_st->st_uid);
7342 __put_user(st.st_gid, &target_st->st_gid);
7343 __put_user(st.st_nlink, &target_st->st_nlink);
7344 __put_user(st.st_rdev, &target_st->st_rdev);
7345 __put_user(st.st_size, &target_st->st_size);
7346 __put_user(st.st_blksize, &target_st->st_blksize);
7347 __put_user(st.st_blocks, &target_st->st_blocks);
7348 __put_user(st.st_atime, &target_st->target_st_atime);
7349 __put_user(st.st_mtime, &target_st->target_st_mtime);
7350 __put_user(st.st_ctime, &target_st->target_st_ctime);
7351 unlock_user_struct(target_st, arg2, 1);
7354 break;
7355 #ifdef TARGET_NR_olduname
7356 case TARGET_NR_olduname:
7357 goto unimplemented;
7358 #endif
7359 #ifdef TARGET_NR_iopl
7360 case TARGET_NR_iopl:
7361 goto unimplemented;
7362 #endif
7363 case TARGET_NR_vhangup:
7364 ret = get_errno(vhangup());
7365 break;
7366 #ifdef TARGET_NR_idle
7367 case TARGET_NR_idle:
7368 goto unimplemented;
7369 #endif
7370 #ifdef TARGET_NR_syscall
7371 case TARGET_NR_syscall:
7372 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7373 arg6, arg7, arg8, 0);
7374 break;
7375 #endif
7376 case TARGET_NR_wait4:
7378 int status;
7379 abi_long status_ptr = arg2;
7380 struct rusage rusage, *rusage_ptr;
7381 abi_ulong target_rusage = arg4;
7382 abi_long rusage_err;
7383 if (target_rusage)
7384 rusage_ptr = &rusage;
7385 else
7386 rusage_ptr = NULL;
7387 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7388 if (!is_error(ret)) {
7389 if (status_ptr && ret) {
7390 status = host_to_target_waitstatus(status);
7391 if (put_user_s32(status, status_ptr))
7392 goto efault;
7394 if (target_rusage) {
7395 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7396 if (rusage_err) {
7397 ret = rusage_err;
7402 break;
7403 #ifdef TARGET_NR_swapoff
7404 case TARGET_NR_swapoff:
7405 if (!(p = lock_user_string(arg1)))
7406 goto efault;
7407 ret = get_errno(swapoff(p));
7408 unlock_user(p, arg1, 0);
7409 break;
7410 #endif
7411 case TARGET_NR_sysinfo:
7413 struct target_sysinfo *target_value;
7414 struct sysinfo value;
7415 ret = get_errno(sysinfo(&value));
7416 if (!is_error(ret) && arg1)
7418 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7419 goto efault;
7420 __put_user(value.uptime, &target_value->uptime);
7421 __put_user(value.loads[0], &target_value->loads[0]);
7422 __put_user(value.loads[1], &target_value->loads[1]);
7423 __put_user(value.loads[2], &target_value->loads[2]);
7424 __put_user(value.totalram, &target_value->totalram);
7425 __put_user(value.freeram, &target_value->freeram);
7426 __put_user(value.sharedram, &target_value->sharedram);
7427 __put_user(value.bufferram, &target_value->bufferram);
7428 __put_user(value.totalswap, &target_value->totalswap);
7429 __put_user(value.freeswap, &target_value->freeswap);
7430 __put_user(value.procs, &target_value->procs);
7431 __put_user(value.totalhigh, &target_value->totalhigh);
7432 __put_user(value.freehigh, &target_value->freehigh);
7433 __put_user(value.mem_unit, &target_value->mem_unit);
7434 unlock_user_struct(target_value, arg1, 1);
7437 break;
7438 #ifdef TARGET_NR_ipc
7439 case TARGET_NR_ipc:
7440 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7441 break;
7442 #endif
7443 #ifdef TARGET_NR_semget
7444 case TARGET_NR_semget:
7445 ret = get_errno(semget(arg1, arg2, arg3));
7446 break;
7447 #endif
7448 #ifdef TARGET_NR_semop
7449 case TARGET_NR_semop:
7450 ret = do_semop(arg1, arg2, arg3);
7451 break;
7452 #endif
7453 #ifdef TARGET_NR_semctl
7454 case TARGET_NR_semctl:
7455 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7456 break;
7457 #endif
7458 #ifdef TARGET_NR_msgctl
7459 case TARGET_NR_msgctl:
7460 ret = do_msgctl(arg1, arg2, arg3);
7461 break;
7462 #endif
7463 #ifdef TARGET_NR_msgget
7464 case TARGET_NR_msgget:
7465 ret = get_errno(msgget(arg1, arg2));
7466 break;
7467 #endif
7468 #ifdef TARGET_NR_msgrcv
7469 case TARGET_NR_msgrcv:
7470 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7471 break;
7472 #endif
7473 #ifdef TARGET_NR_msgsnd
7474 case TARGET_NR_msgsnd:
7475 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7476 break;
7477 #endif
7478 #ifdef TARGET_NR_shmget
7479 case TARGET_NR_shmget:
7480 ret = get_errno(shmget(arg1, arg2, arg3));
7481 break;
7482 #endif
7483 #ifdef TARGET_NR_shmctl
7484 case TARGET_NR_shmctl:
7485 ret = do_shmctl(arg1, arg2, arg3);
7486 break;
7487 #endif
7488 #ifdef TARGET_NR_shmat
7489 case TARGET_NR_shmat:
7490 ret = do_shmat(arg1, arg2, arg3);
7491 break;
7492 #endif
7493 #ifdef TARGET_NR_shmdt
7494 case TARGET_NR_shmdt:
7495 ret = do_shmdt(arg1);
7496 break;
7497 #endif
7498 case TARGET_NR_fsync:
7499 ret = get_errno(fsync(arg1));
7500 break;
7501 case TARGET_NR_clone:
7502 /* Linux manages to have three different orderings for its
7503 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7504 * match the kernel's CONFIG_CLONE_* settings.
7505 * Microblaze is further special in that it uses a sixth
7506 * implicit argument to clone for the TLS pointer.
7508 #if defined(TARGET_MICROBLAZE)
7509 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7510 #elif defined(TARGET_CLONE_BACKWARDS)
7511 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7512 #elif defined(TARGET_CLONE_BACKWARDS2)
7513 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7514 #else
7515 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7516 #endif
7517 break;
7518 #ifdef __NR_exit_group
7519 /* new thread calls */
7520 case TARGET_NR_exit_group:
7521 #ifdef TARGET_GPROF
7522 _mcleanup();
7523 #endif
7524 gdb_exit(cpu_env, arg1);
7525 ret = get_errno(exit_group(arg1));
7526 break;
7527 #endif
7528 case TARGET_NR_setdomainname:
7529 if (!(p = lock_user_string(arg1)))
7530 goto efault;
7531 ret = get_errno(setdomainname(p, arg2));
7532 unlock_user(p, arg1, 0);
7533 break;
7534 case TARGET_NR_uname:
7535 /* no need to transcode because we use the linux syscall */
7537 struct new_utsname * buf;
7539 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7540 goto efault;
7541 ret = get_errno(sys_uname(buf));
7542 if (!is_error(ret)) {
7543 /* Overrite the native machine name with whatever is being
7544 emulated. */
7545 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7546 /* Allow the user to override the reported release. */
7547 if (qemu_uname_release && *qemu_uname_release)
7548 strcpy (buf->release, qemu_uname_release);
7550 unlock_user_struct(buf, arg1, 1);
7552 break;
7553 #ifdef TARGET_I386
7554 case TARGET_NR_modify_ldt:
7555 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7556 break;
7557 #if !defined(TARGET_X86_64)
7558 case TARGET_NR_vm86old:
7559 goto unimplemented;
7560 case TARGET_NR_vm86:
7561 ret = do_vm86(cpu_env, arg1, arg2);
7562 break;
7563 #endif
7564 #endif
7565 case TARGET_NR_adjtimex:
7566 goto unimplemented;
7567 #ifdef TARGET_NR_create_module
7568 case TARGET_NR_create_module:
7569 #endif
7570 case TARGET_NR_init_module:
7571 case TARGET_NR_delete_module:
7572 #ifdef TARGET_NR_get_kernel_syms
7573 case TARGET_NR_get_kernel_syms:
7574 #endif
7575 goto unimplemented;
7576 case TARGET_NR_quotactl:
7577 goto unimplemented;
7578 case TARGET_NR_getpgid:
7579 ret = get_errno(getpgid(arg1));
7580 break;
7581 case TARGET_NR_fchdir:
7582 ret = get_errno(fchdir(arg1));
7583 break;
7584 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7585 case TARGET_NR_bdflush:
7586 goto unimplemented;
7587 #endif
7588 #ifdef TARGET_NR_sysfs
7589 case TARGET_NR_sysfs:
7590 goto unimplemented;
7591 #endif
7592 case TARGET_NR_personality:
7593 ret = get_errno(personality(arg1));
7594 break;
7595 #ifdef TARGET_NR_afs_syscall
7596 case TARGET_NR_afs_syscall:
7597 goto unimplemented;
7598 #endif
7599 #ifdef TARGET_NR__llseek /* Not on alpha */
7600 case TARGET_NR__llseek:
7602 int64_t res;
7603 #if !defined(__NR_llseek)
7604 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7605 if (res == -1) {
7606 ret = get_errno(res);
7607 } else {
7608 ret = 0;
7610 #else
7611 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7612 #endif
7613 if ((ret == 0) && put_user_s64(res, arg4)) {
7614 goto efault;
7617 break;
7618 #endif
7619 #ifdef TARGET_NR_getdents
7620 case TARGET_NR_getdents:
7621 #ifdef __NR_getdents
7622 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7624 struct target_dirent *target_dirp;
7625 struct linux_dirent *dirp;
7626 abi_long count = arg3;
7628 dirp = malloc(count);
7629 if (!dirp) {
7630 ret = -TARGET_ENOMEM;
7631 goto fail;
7634 ret = get_errno(sys_getdents(arg1, dirp, count));
7635 if (!is_error(ret)) {
7636 struct linux_dirent *de;
7637 struct target_dirent *tde;
7638 int len = ret;
7639 int reclen, treclen;
7640 int count1, tnamelen;
7642 count1 = 0;
7643 de = dirp;
7644 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7645 goto efault;
7646 tde = target_dirp;
7647 while (len > 0) {
7648 reclen = de->d_reclen;
7649 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7650 assert(tnamelen >= 0);
7651 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7652 assert(count1 + treclen <= count);
7653 tde->d_reclen = tswap16(treclen);
7654 tde->d_ino = tswapal(de->d_ino);
7655 tde->d_off = tswapal(de->d_off);
7656 memcpy(tde->d_name, de->d_name, tnamelen);
7657 de = (struct linux_dirent *)((char *)de + reclen);
7658 len -= reclen;
7659 tde = (struct target_dirent *)((char *)tde + treclen);
7660 count1 += treclen;
7662 ret = count1;
7663 unlock_user(target_dirp, arg2, ret);
7665 free(dirp);
7667 #else
7669 struct linux_dirent *dirp;
7670 abi_long count = arg3;
7672 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7673 goto efault;
7674 ret = get_errno(sys_getdents(arg1, dirp, count));
7675 if (!is_error(ret)) {
7676 struct linux_dirent *de;
7677 int len = ret;
7678 int reclen;
7679 de = dirp;
7680 while (len > 0) {
7681 reclen = de->d_reclen;
7682 if (reclen > len)
7683 break;
7684 de->d_reclen = tswap16(reclen);
7685 tswapls(&de->d_ino);
7686 tswapls(&de->d_off);
7687 de = (struct linux_dirent *)((char *)de + reclen);
7688 len -= reclen;
7691 unlock_user(dirp, arg2, ret);
7693 #endif
7694 #else
7695 /* Implement getdents in terms of getdents64 */
7697 struct linux_dirent64 *dirp;
7698 abi_long count = arg3;
7700 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7701 if (!dirp) {
7702 goto efault;
7704 ret = get_errno(sys_getdents64(arg1, dirp, count));
7705 if (!is_error(ret)) {
7706 /* Convert the dirent64 structs to target dirent. We do this
7707 * in-place, since we can guarantee that a target_dirent is no
7708 * larger than a dirent64; however this means we have to be
7709 * careful to read everything before writing in the new format.
7711 struct linux_dirent64 *de;
7712 struct target_dirent *tde;
7713 int len = ret;
7714 int tlen = 0;
7716 de = dirp;
7717 tde = (struct target_dirent *)dirp;
7718 while (len > 0) {
7719 int namelen, treclen;
7720 int reclen = de->d_reclen;
7721 uint64_t ino = de->d_ino;
7722 int64_t off = de->d_off;
7723 uint8_t type = de->d_type;
7725 namelen = strlen(de->d_name);
7726 treclen = offsetof(struct target_dirent, d_name)
7727 + namelen + 2;
7728 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7730 memmove(tde->d_name, de->d_name, namelen + 1);
7731 tde->d_ino = tswapal(ino);
7732 tde->d_off = tswapal(off);
7733 tde->d_reclen = tswap16(treclen);
7734 /* The target_dirent type is in what was formerly a padding
7735 * byte at the end of the structure:
7737 *(((char *)tde) + treclen - 1) = type;
7739 de = (struct linux_dirent64 *)((char *)de + reclen);
7740 tde = (struct target_dirent *)((char *)tde + treclen);
7741 len -= reclen;
7742 tlen += treclen;
7744 ret = tlen;
7746 unlock_user(dirp, arg2, ret);
7748 #endif
7749 break;
7750 #endif /* TARGET_NR_getdents */
7751 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7752 case TARGET_NR_getdents64:
7754 struct linux_dirent64 *dirp;
7755 abi_long count = arg3;
7756 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7757 goto efault;
7758 ret = get_errno(sys_getdents64(arg1, dirp, count));
7759 if (!is_error(ret)) {
7760 struct linux_dirent64 *de;
7761 int len = ret;
7762 int reclen;
7763 de = dirp;
7764 while (len > 0) {
7765 reclen = de->d_reclen;
7766 if (reclen > len)
7767 break;
7768 de->d_reclen = tswap16(reclen);
7769 tswap64s((uint64_t *)&de->d_ino);
7770 tswap64s((uint64_t *)&de->d_off);
7771 de = (struct linux_dirent64 *)((char *)de + reclen);
7772 len -= reclen;
7775 unlock_user(dirp, arg2, ret);
7777 break;
7778 #endif /* TARGET_NR_getdents64 */
7779 #if defined(TARGET_NR__newselect)
7780 case TARGET_NR__newselect:
7781 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7782 break;
7783 #endif
7784 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7785 # ifdef TARGET_NR_poll
7786 case TARGET_NR_poll:
7787 # endif
7788 # ifdef TARGET_NR_ppoll
7789 case TARGET_NR_ppoll:
7790 # endif
7792 struct target_pollfd *target_pfd;
7793 unsigned int nfds = arg2;
7794 int timeout = arg3;
7795 struct pollfd *pfd;
7796 unsigned int i;
7798 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7799 if (!target_pfd)
7800 goto efault;
7802 pfd = alloca(sizeof(struct pollfd) * nfds);
7803 for(i = 0; i < nfds; i++) {
7804 pfd[i].fd = tswap32(target_pfd[i].fd);
7805 pfd[i].events = tswap16(target_pfd[i].events);
7808 # ifdef TARGET_NR_ppoll
7809 if (num == TARGET_NR_ppoll) {
7810 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7811 target_sigset_t *target_set;
7812 sigset_t _set, *set = &_set;
7814 if (arg3) {
7815 if (target_to_host_timespec(timeout_ts, arg3)) {
7816 unlock_user(target_pfd, arg1, 0);
7817 goto efault;
7819 } else {
7820 timeout_ts = NULL;
7823 if (arg4) {
7824 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7825 if (!target_set) {
7826 unlock_user(target_pfd, arg1, 0);
7827 goto efault;
7829 target_to_host_sigset(set, target_set);
7830 } else {
7831 set = NULL;
7834 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7836 if (!is_error(ret) && arg3) {
7837 host_to_target_timespec(arg3, timeout_ts);
7839 if (arg4) {
7840 unlock_user(target_set, arg4, 0);
7842 } else
7843 # endif
7844 ret = get_errno(poll(pfd, nfds, timeout));
7846 if (!is_error(ret)) {
7847 for(i = 0; i < nfds; i++) {
7848 target_pfd[i].revents = tswap16(pfd[i].revents);
7851 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7853 break;
7854 #endif
7855 case TARGET_NR_flock:
7856 /* NOTE: the flock constant seems to be the same for every
7857 Linux platform */
7858 ret = get_errno(flock(arg1, arg2));
7859 break;
7860 case TARGET_NR_readv:
7862 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7863 if (vec != NULL) {
7864 ret = get_errno(readv(arg1, vec, arg3));
7865 unlock_iovec(vec, arg2, arg3, 1);
7866 } else {
7867 ret = -host_to_target_errno(errno);
7870 break;
7871 case TARGET_NR_writev:
7873 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7874 if (vec != NULL) {
7875 ret = get_errno(writev(arg1, vec, arg3));
7876 unlock_iovec(vec, arg2, arg3, 0);
7877 } else {
7878 ret = -host_to_target_errno(errno);
7881 break;
7882 case TARGET_NR_getsid:
7883 ret = get_errno(getsid(arg1));
7884 break;
7885 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7886 case TARGET_NR_fdatasync:
7887 ret = get_errno(fdatasync(arg1));
7888 break;
7889 #endif
7890 #ifdef TARGET_NR__sysctl
7891 case TARGET_NR__sysctl:
7892 /* We don't implement this, but ENOTDIR is always a safe
7893 return value. */
7894 ret = -TARGET_ENOTDIR;
7895 break;
7896 #endif
7897 case TARGET_NR_sched_getaffinity:
7899 unsigned int mask_size;
7900 unsigned long *mask;
7903 * sched_getaffinity needs multiples of ulong, so need to take
7904 * care of mismatches between target ulong and host ulong sizes.
7906 if (arg2 & (sizeof(abi_ulong) - 1)) {
7907 ret = -TARGET_EINVAL;
7908 break;
7910 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7912 mask = alloca(mask_size);
7913 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7915 if (!is_error(ret)) {
7916 if (ret > arg2) {
7917 /* More data returned than the caller's buffer will fit.
7918 * This only happens if sizeof(abi_long) < sizeof(long)
7919 * and the caller passed us a buffer holding an odd number
7920 * of abi_longs. If the host kernel is actually using the
7921 * extra 4 bytes then fail EINVAL; otherwise we can just
7922 * ignore them and only copy the interesting part.
7924 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
7925 if (numcpus > arg2 * 8) {
7926 ret = -TARGET_EINVAL;
7927 break;
7929 ret = arg2;
7932 if (copy_to_user(arg3, mask, ret)) {
7933 goto efault;
7937 break;
7938 case TARGET_NR_sched_setaffinity:
7940 unsigned int mask_size;
7941 unsigned long *mask;
7944 * sched_setaffinity needs multiples of ulong, so need to take
7945 * care of mismatches between target ulong and host ulong sizes.
7947 if (arg2 & (sizeof(abi_ulong) - 1)) {
7948 ret = -TARGET_EINVAL;
7949 break;
7951 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7953 mask = alloca(mask_size);
7954 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7955 goto efault;
7957 memcpy(mask, p, arg2);
7958 unlock_user_struct(p, arg2, 0);
7960 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7962 break;
7963 case TARGET_NR_sched_setparam:
7965 struct sched_param *target_schp;
7966 struct sched_param schp;
7968 if (arg2 == 0) {
7969 return -TARGET_EINVAL;
7971 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7972 goto efault;
7973 schp.sched_priority = tswap32(target_schp->sched_priority);
7974 unlock_user_struct(target_schp, arg2, 0);
7975 ret = get_errno(sched_setparam(arg1, &schp));
7977 break;
7978 case TARGET_NR_sched_getparam:
7980 struct sched_param *target_schp;
7981 struct sched_param schp;
7983 if (arg2 == 0) {
7984 return -TARGET_EINVAL;
7986 ret = get_errno(sched_getparam(arg1, &schp));
7987 if (!is_error(ret)) {
7988 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7989 goto efault;
7990 target_schp->sched_priority = tswap32(schp.sched_priority);
7991 unlock_user_struct(target_schp, arg2, 1);
7994 break;
7995 case TARGET_NR_sched_setscheduler:
7997 struct sched_param *target_schp;
7998 struct sched_param schp;
7999 if (arg3 == 0) {
8000 return -TARGET_EINVAL;
8002 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8003 goto efault;
8004 schp.sched_priority = tswap32(target_schp->sched_priority);
8005 unlock_user_struct(target_schp, arg3, 0);
8006 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8008 break;
8009 case TARGET_NR_sched_getscheduler:
8010 ret = get_errno(sched_getscheduler(arg1));
8011 break;
8012 case TARGET_NR_sched_yield:
8013 ret = get_errno(sched_yield());
8014 break;
8015 case TARGET_NR_sched_get_priority_max:
8016 ret = get_errno(sched_get_priority_max(arg1));
8017 break;
8018 case TARGET_NR_sched_get_priority_min:
8019 ret = get_errno(sched_get_priority_min(arg1));
8020 break;
8021 case TARGET_NR_sched_rr_get_interval:
8023 struct timespec ts;
8024 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8025 if (!is_error(ret)) {
8026 ret = host_to_target_timespec(arg2, &ts);
8029 break;
8030 case TARGET_NR_nanosleep:
8032 struct timespec req, rem;
8033 target_to_host_timespec(&req, arg1);
8034 ret = get_errno(nanosleep(&req, &rem));
8035 if (is_error(ret) && arg2) {
8036 host_to_target_timespec(arg2, &rem);
8039 break;
8040 #ifdef TARGET_NR_query_module
8041 case TARGET_NR_query_module:
8042 goto unimplemented;
8043 #endif
8044 #ifdef TARGET_NR_nfsservctl
8045 case TARGET_NR_nfsservctl:
8046 goto unimplemented;
8047 #endif
8048 case TARGET_NR_prctl:
8049 switch (arg1) {
8050 case PR_GET_PDEATHSIG:
8052 int deathsig;
8053 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8054 if (!is_error(ret) && arg2
8055 && put_user_ual(deathsig, arg2)) {
8056 goto efault;
8058 break;
8060 #ifdef PR_GET_NAME
8061 case PR_GET_NAME:
8063 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8064 if (!name) {
8065 goto efault;
8067 ret = get_errno(prctl(arg1, (unsigned long)name,
8068 arg3, arg4, arg5));
8069 unlock_user(name, arg2, 16);
8070 break;
8072 case PR_SET_NAME:
8074 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8075 if (!name) {
8076 goto efault;
8078 ret = get_errno(prctl(arg1, (unsigned long)name,
8079 arg3, arg4, arg5));
8080 unlock_user(name, arg2, 0);
8081 break;
8083 #endif
8084 default:
8085 /* Most prctl options have no pointer arguments */
8086 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8087 break;
8089 break;
8090 #ifdef TARGET_NR_arch_prctl
8091 case TARGET_NR_arch_prctl:
8092 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8093 ret = do_arch_prctl(cpu_env, arg1, arg2);
8094 break;
8095 #else
8096 goto unimplemented;
8097 #endif
8098 #endif
8099 #ifdef TARGET_NR_pread64
8100 case TARGET_NR_pread64:
8101 if (regpairs_aligned(cpu_env)) {
8102 arg4 = arg5;
8103 arg5 = arg6;
8105 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8106 goto efault;
8107 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8108 unlock_user(p, arg2, ret);
8109 break;
8110 case TARGET_NR_pwrite64:
8111 if (regpairs_aligned(cpu_env)) {
8112 arg4 = arg5;
8113 arg5 = arg6;
8115 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8116 goto efault;
8117 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8118 unlock_user(p, arg2, 0);
8119 break;
8120 #endif
8121 case TARGET_NR_getcwd:
8122 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8123 goto efault;
8124 ret = get_errno(sys_getcwd1(p, arg2));
8125 unlock_user(p, arg1, ret);
8126 break;
8127 case TARGET_NR_capget:
8128 case TARGET_NR_capset:
8130 struct target_user_cap_header *target_header;
8131 struct target_user_cap_data *target_data = NULL;
8132 struct __user_cap_header_struct header;
8133 struct __user_cap_data_struct data[2];
8134 struct __user_cap_data_struct *dataptr = NULL;
8135 int i, target_datalen;
8136 int data_items = 1;
8138 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8139 goto efault;
8141 header.version = tswap32(target_header->version);
8142 header.pid = tswap32(target_header->pid);
8144 if (header.version != _LINUX_CAPABILITY_VERSION) {
8145 /* Version 2 and up takes pointer to two user_data structs */
8146 data_items = 2;
8149 target_datalen = sizeof(*target_data) * data_items;
8151 if (arg2) {
8152 if (num == TARGET_NR_capget) {
8153 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8154 } else {
8155 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8157 if (!target_data) {
8158 unlock_user_struct(target_header, arg1, 0);
8159 goto efault;
8162 if (num == TARGET_NR_capset) {
8163 for (i = 0; i < data_items; i++) {
8164 data[i].effective = tswap32(target_data[i].effective);
8165 data[i].permitted = tswap32(target_data[i].permitted);
8166 data[i].inheritable = tswap32(target_data[i].inheritable);
8170 dataptr = data;
8173 if (num == TARGET_NR_capget) {
8174 ret = get_errno(capget(&header, dataptr));
8175 } else {
8176 ret = get_errno(capset(&header, dataptr));
8179 /* The kernel always updates version for both capget and capset */
8180 target_header->version = tswap32(header.version);
8181 unlock_user_struct(target_header, arg1, 1);
8183 if (arg2) {
8184 if (num == TARGET_NR_capget) {
8185 for (i = 0; i < data_items; i++) {
8186 target_data[i].effective = tswap32(data[i].effective);
8187 target_data[i].permitted = tswap32(data[i].permitted);
8188 target_data[i].inheritable = tswap32(data[i].inheritable);
8190 unlock_user(target_data, arg2, target_datalen);
8191 } else {
8192 unlock_user(target_data, arg2, 0);
8195 break;
8197 case TARGET_NR_sigaltstack:
8198 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8199 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8200 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8201 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8202 break;
8203 #else
8204 goto unimplemented;
8205 #endif
8207 #ifdef CONFIG_SENDFILE
8208 case TARGET_NR_sendfile:
8210 off_t *offp = NULL;
8211 off_t off;
8212 if (arg3) {
8213 ret = get_user_sal(off, arg3);
8214 if (is_error(ret)) {
8215 break;
8217 offp = &off;
8219 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8220 if (!is_error(ret) && arg3) {
8221 abi_long ret2 = put_user_sal(off, arg3);
8222 if (is_error(ret2)) {
8223 ret = ret2;
8226 break;
8228 #ifdef TARGET_NR_sendfile64
8229 case TARGET_NR_sendfile64:
8231 off_t *offp = NULL;
8232 off_t off;
8233 if (arg3) {
8234 ret = get_user_s64(off, arg3);
8235 if (is_error(ret)) {
8236 break;
8238 offp = &off;
8240 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8241 if (!is_error(ret) && arg3) {
8242 abi_long ret2 = put_user_s64(off, arg3);
8243 if (is_error(ret2)) {
8244 ret = ret2;
8247 break;
8249 #endif
8250 #else
8251 case TARGET_NR_sendfile:
8252 #ifdef TARGET_NR_sendfile64
8253 case TARGET_NR_sendfile64:
8254 #endif
8255 goto unimplemented;
8256 #endif
8258 #ifdef TARGET_NR_getpmsg
8259 case TARGET_NR_getpmsg:
8260 goto unimplemented;
8261 #endif
8262 #ifdef TARGET_NR_putpmsg
8263 case TARGET_NR_putpmsg:
8264 goto unimplemented;
8265 #endif
8266 #ifdef TARGET_NR_vfork
8267 case TARGET_NR_vfork:
8268 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8269 0, 0, 0, 0));
8270 break;
8271 #endif
8272 #ifdef TARGET_NR_ugetrlimit
8273 case TARGET_NR_ugetrlimit:
8275 struct rlimit rlim;
8276 int resource = target_to_host_resource(arg1);
8277 ret = get_errno(getrlimit(resource, &rlim));
8278 if (!is_error(ret)) {
8279 struct target_rlimit *target_rlim;
8280 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8281 goto efault;
8282 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8283 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8284 unlock_user_struct(target_rlim, arg2, 1);
8286 break;
8288 #endif
8289 #ifdef TARGET_NR_truncate64
8290 case TARGET_NR_truncate64:
8291 if (!(p = lock_user_string(arg1)))
8292 goto efault;
8293 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8294 unlock_user(p, arg1, 0);
8295 break;
8296 #endif
8297 #ifdef TARGET_NR_ftruncate64
8298 case TARGET_NR_ftruncate64:
8299 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8300 break;
8301 #endif
8302 #ifdef TARGET_NR_stat64
8303 case TARGET_NR_stat64:
8304 if (!(p = lock_user_string(arg1)))
8305 goto efault;
8306 ret = get_errno(stat(path(p), &st));
8307 unlock_user(p, arg1, 0);
8308 if (!is_error(ret))
8309 ret = host_to_target_stat64(cpu_env, arg2, &st);
8310 break;
8311 #endif
8312 #ifdef TARGET_NR_lstat64
8313 case TARGET_NR_lstat64:
8314 if (!(p = lock_user_string(arg1)))
8315 goto efault;
8316 ret = get_errno(lstat(path(p), &st));
8317 unlock_user(p, arg1, 0);
8318 if (!is_error(ret))
8319 ret = host_to_target_stat64(cpu_env, arg2, &st);
8320 break;
8321 #endif
8322 #ifdef TARGET_NR_fstat64
8323 case TARGET_NR_fstat64:
8324 ret = get_errno(fstat(arg1, &st));
8325 if (!is_error(ret))
8326 ret = host_to_target_stat64(cpu_env, arg2, &st);
8327 break;
8328 #endif
8329 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8330 #ifdef TARGET_NR_fstatat64
8331 case TARGET_NR_fstatat64:
8332 #endif
8333 #ifdef TARGET_NR_newfstatat
8334 case TARGET_NR_newfstatat:
8335 #endif
8336 if (!(p = lock_user_string(arg2)))
8337 goto efault;
8338 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8339 if (!is_error(ret))
8340 ret = host_to_target_stat64(cpu_env, arg3, &st);
8341 break;
8342 #endif
8343 #ifdef TARGET_NR_lchown
8344 case TARGET_NR_lchown:
8345 if (!(p = lock_user_string(arg1)))
8346 goto efault;
8347 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8348 unlock_user(p, arg1, 0);
8349 break;
8350 #endif
8351 #ifdef TARGET_NR_getuid
8352 case TARGET_NR_getuid:
8353 ret = get_errno(high2lowuid(getuid()));
8354 break;
8355 #endif
8356 #ifdef TARGET_NR_getgid
8357 case TARGET_NR_getgid:
8358 ret = get_errno(high2lowgid(getgid()));
8359 break;
8360 #endif
8361 #ifdef TARGET_NR_geteuid
8362 case TARGET_NR_geteuid:
8363 ret = get_errno(high2lowuid(geteuid()));
8364 break;
8365 #endif
8366 #ifdef TARGET_NR_getegid
8367 case TARGET_NR_getegid:
8368 ret = get_errno(high2lowgid(getegid()));
8369 break;
8370 #endif
8371 case TARGET_NR_setreuid:
8372 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8373 break;
8374 case TARGET_NR_setregid:
8375 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8376 break;
8377 case TARGET_NR_getgroups:
8379 int gidsetsize = arg1;
8380 target_id *target_grouplist;
8381 gid_t *grouplist;
8382 int i;
8384 grouplist = alloca(gidsetsize * sizeof(gid_t));
8385 ret = get_errno(getgroups(gidsetsize, grouplist));
8386 if (gidsetsize == 0)
8387 break;
8388 if (!is_error(ret)) {
8389 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8390 if (!target_grouplist)
8391 goto efault;
8392 for(i = 0;i < ret; i++)
8393 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8394 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8397 break;
8398 case TARGET_NR_setgroups:
8400 int gidsetsize = arg1;
8401 target_id *target_grouplist;
8402 gid_t *grouplist = NULL;
8403 int i;
8404 if (gidsetsize) {
8405 grouplist = alloca(gidsetsize * sizeof(gid_t));
8406 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8407 if (!target_grouplist) {
8408 ret = -TARGET_EFAULT;
8409 goto fail;
8411 for (i = 0; i < gidsetsize; i++) {
8412 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8414 unlock_user(target_grouplist, arg2, 0);
8416 ret = get_errno(setgroups(gidsetsize, grouplist));
8418 break;
8419 case TARGET_NR_fchown:
8420 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8421 break;
8422 #if defined(TARGET_NR_fchownat)
8423 case TARGET_NR_fchownat:
8424 if (!(p = lock_user_string(arg2)))
8425 goto efault;
8426 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8427 low2highgid(arg4), arg5));
8428 unlock_user(p, arg2, 0);
8429 break;
8430 #endif
8431 #ifdef TARGET_NR_setresuid
8432 case TARGET_NR_setresuid:
8433 ret = get_errno(setresuid(low2highuid(arg1),
8434 low2highuid(arg2),
8435 low2highuid(arg3)));
8436 break;
8437 #endif
8438 #ifdef TARGET_NR_getresuid
8439 case TARGET_NR_getresuid:
8441 uid_t ruid, euid, suid;
8442 ret = get_errno(getresuid(&ruid, &euid, &suid));
8443 if (!is_error(ret)) {
8444 if (put_user_id(high2lowuid(ruid), arg1)
8445 || put_user_id(high2lowuid(euid), arg2)
8446 || put_user_id(high2lowuid(suid), arg3))
8447 goto efault;
8450 break;
8451 #endif
8452 #ifdef TARGET_NR_getresgid
8453 case TARGET_NR_setresgid:
8454 ret = get_errno(setresgid(low2highgid(arg1),
8455 low2highgid(arg2),
8456 low2highgid(arg3)));
8457 break;
8458 #endif
8459 #ifdef TARGET_NR_getresgid
8460 case TARGET_NR_getresgid:
8462 gid_t rgid, egid, sgid;
8463 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8464 if (!is_error(ret)) {
8465 if (put_user_id(high2lowgid(rgid), arg1)
8466 || put_user_id(high2lowgid(egid), arg2)
8467 || put_user_id(high2lowgid(sgid), arg3))
8468 goto efault;
8471 break;
8472 #endif
8473 #ifdef TARGET_NR_chown
8474 case TARGET_NR_chown:
8475 if (!(p = lock_user_string(arg1)))
8476 goto efault;
8477 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8478 unlock_user(p, arg1, 0);
8479 break;
8480 #endif
8481 case TARGET_NR_setuid:
8482 ret = get_errno(setuid(low2highuid(arg1)));
8483 break;
8484 case TARGET_NR_setgid:
8485 ret = get_errno(setgid(low2highgid(arg1)));
8486 break;
8487 case TARGET_NR_setfsuid:
8488 ret = get_errno(setfsuid(arg1));
8489 break;
8490 case TARGET_NR_setfsgid:
8491 ret = get_errno(setfsgid(arg1));
8492 break;
8494 #ifdef TARGET_NR_lchown32
8495 case TARGET_NR_lchown32:
8496 if (!(p = lock_user_string(arg1)))
8497 goto efault;
8498 ret = get_errno(lchown(p, arg2, arg3));
8499 unlock_user(p, arg1, 0);
8500 break;
8501 #endif
8502 #ifdef TARGET_NR_getuid32
8503 case TARGET_NR_getuid32:
8504 ret = get_errno(getuid());
8505 break;
8506 #endif
8508 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8509 /* Alpha specific */
8510 case TARGET_NR_getxuid:
8512 uid_t euid;
8513 euid=geteuid();
8514 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8516 ret = get_errno(getuid());
8517 break;
8518 #endif
8519 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8520 /* Alpha specific */
8521 case TARGET_NR_getxgid:
8523 uid_t egid;
8524 egid=getegid();
8525 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8527 ret = get_errno(getgid());
8528 break;
8529 #endif
8530 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8531 /* Alpha specific */
8532 case TARGET_NR_osf_getsysinfo:
8533 ret = -TARGET_EOPNOTSUPP;
8534 switch (arg1) {
8535 case TARGET_GSI_IEEE_FP_CONTROL:
8537 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8539 /* Copied from linux ieee_fpcr_to_swcr. */
8540 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8541 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8542 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8543 | SWCR_TRAP_ENABLE_DZE
8544 | SWCR_TRAP_ENABLE_OVF);
8545 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8546 | SWCR_TRAP_ENABLE_INE);
8547 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8548 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8550 if (put_user_u64 (swcr, arg2))
8551 goto efault;
8552 ret = 0;
8554 break;
8556 /* case GSI_IEEE_STATE_AT_SIGNAL:
8557 -- Not implemented in linux kernel.
8558 case GSI_UACPROC:
8559 -- Retrieves current unaligned access state; not much used.
8560 case GSI_PROC_TYPE:
8561 -- Retrieves implver information; surely not used.
8562 case GSI_GET_HWRPB:
8563 -- Grabs a copy of the HWRPB; surely not used.
8566 break;
8567 #endif
8568 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8569 /* Alpha specific */
8570 case TARGET_NR_osf_setsysinfo:
8571 ret = -TARGET_EOPNOTSUPP;
8572 switch (arg1) {
8573 case TARGET_SSI_IEEE_FP_CONTROL:
8575 uint64_t swcr, fpcr, orig_fpcr;
8577 if (get_user_u64 (swcr, arg2)) {
8578 goto efault;
8580 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8581 fpcr = orig_fpcr & FPCR_DYN_MASK;
8583 /* Copied from linux ieee_swcr_to_fpcr. */
8584 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8585 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8586 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8587 | SWCR_TRAP_ENABLE_DZE
8588 | SWCR_TRAP_ENABLE_OVF)) << 48;
8589 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8590 | SWCR_TRAP_ENABLE_INE)) << 57;
8591 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8592 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8594 cpu_alpha_store_fpcr(cpu_env, fpcr);
8595 ret = 0;
8597 break;
8599 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8601 uint64_t exc, fpcr, orig_fpcr;
8602 int si_code;
8604 if (get_user_u64(exc, arg2)) {
8605 goto efault;
8608 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8610 /* We only add to the exception status here. */
8611 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8613 cpu_alpha_store_fpcr(cpu_env, fpcr);
8614 ret = 0;
8616 /* Old exceptions are not signaled. */
8617 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8619 /* If any exceptions set by this call,
8620 and are unmasked, send a signal. */
8621 si_code = 0;
8622 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8623 si_code = TARGET_FPE_FLTRES;
8625 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8626 si_code = TARGET_FPE_FLTUND;
8628 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8629 si_code = TARGET_FPE_FLTOVF;
8631 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8632 si_code = TARGET_FPE_FLTDIV;
8634 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8635 si_code = TARGET_FPE_FLTINV;
8637 if (si_code != 0) {
8638 target_siginfo_t info;
8639 info.si_signo = SIGFPE;
8640 info.si_errno = 0;
8641 info.si_code = si_code;
8642 info._sifields._sigfault._addr
8643 = ((CPUArchState *)cpu_env)->pc;
8644 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8647 break;
8649 /* case SSI_NVPAIRS:
8650 -- Used with SSIN_UACPROC to enable unaligned accesses.
8651 case SSI_IEEE_STATE_AT_SIGNAL:
8652 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8653 -- Not implemented in linux kernel
8656 break;
8657 #endif
8658 #ifdef TARGET_NR_osf_sigprocmask
8659 /* Alpha specific. */
8660 case TARGET_NR_osf_sigprocmask:
8662 abi_ulong mask;
8663 int how;
8664 sigset_t set, oldset;
8666 switch(arg1) {
8667 case TARGET_SIG_BLOCK:
8668 how = SIG_BLOCK;
8669 break;
8670 case TARGET_SIG_UNBLOCK:
8671 how = SIG_UNBLOCK;
8672 break;
8673 case TARGET_SIG_SETMASK:
8674 how = SIG_SETMASK;
8675 break;
8676 default:
8677 ret = -TARGET_EINVAL;
8678 goto fail;
8680 mask = arg2;
8681 target_to_host_old_sigset(&set, &mask);
8682 do_sigprocmask(how, &set, &oldset);
8683 host_to_target_old_sigset(&mask, &oldset);
8684 ret = mask;
8686 break;
8687 #endif
8689 #ifdef TARGET_NR_getgid32
8690 case TARGET_NR_getgid32:
8691 ret = get_errno(getgid());
8692 break;
8693 #endif
8694 #ifdef TARGET_NR_geteuid32
8695 case TARGET_NR_geteuid32:
8696 ret = get_errno(geteuid());
8697 break;
8698 #endif
8699 #ifdef TARGET_NR_getegid32
8700 case TARGET_NR_getegid32:
8701 ret = get_errno(getegid());
8702 break;
8703 #endif
8704 #ifdef TARGET_NR_setreuid32
8705 case TARGET_NR_setreuid32:
8706 ret = get_errno(setreuid(arg1, arg2));
8707 break;
8708 #endif
8709 #ifdef TARGET_NR_setregid32
8710 case TARGET_NR_setregid32:
8711 ret = get_errno(setregid(arg1, arg2));
8712 break;
8713 #endif
8714 #ifdef TARGET_NR_getgroups32
8715 case TARGET_NR_getgroups32:
8717 int gidsetsize = arg1;
8718 uint32_t *target_grouplist;
8719 gid_t *grouplist;
8720 int i;
8722 grouplist = alloca(gidsetsize * sizeof(gid_t));
8723 ret = get_errno(getgroups(gidsetsize, grouplist));
8724 if (gidsetsize == 0)
8725 break;
8726 if (!is_error(ret)) {
8727 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8728 if (!target_grouplist) {
8729 ret = -TARGET_EFAULT;
8730 goto fail;
8732 for(i = 0;i < ret; i++)
8733 target_grouplist[i] = tswap32(grouplist[i]);
8734 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8737 break;
8738 #endif
8739 #ifdef TARGET_NR_setgroups32
8740 case TARGET_NR_setgroups32:
8742 int gidsetsize = arg1;
8743 uint32_t *target_grouplist;
8744 gid_t *grouplist;
8745 int i;
8747 grouplist = alloca(gidsetsize * sizeof(gid_t));
8748 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8749 if (!target_grouplist) {
8750 ret = -TARGET_EFAULT;
8751 goto fail;
8753 for(i = 0;i < gidsetsize; i++)
8754 grouplist[i] = tswap32(target_grouplist[i]);
8755 unlock_user(target_grouplist, arg2, 0);
8756 ret = get_errno(setgroups(gidsetsize, grouplist));
8758 break;
8759 #endif
8760 #ifdef TARGET_NR_fchown32
8761 case TARGET_NR_fchown32:
8762 ret = get_errno(fchown(arg1, arg2, arg3));
8763 break;
8764 #endif
8765 #ifdef TARGET_NR_setresuid32
8766 case TARGET_NR_setresuid32:
8767 ret = get_errno(setresuid(arg1, arg2, arg3));
8768 break;
8769 #endif
8770 #ifdef TARGET_NR_getresuid32
8771 case TARGET_NR_getresuid32:
8773 uid_t ruid, euid, suid;
8774 ret = get_errno(getresuid(&ruid, &euid, &suid));
8775 if (!is_error(ret)) {
8776 if (put_user_u32(ruid, arg1)
8777 || put_user_u32(euid, arg2)
8778 || put_user_u32(suid, arg3))
8779 goto efault;
8782 break;
8783 #endif
8784 #ifdef TARGET_NR_setresgid32
8785 case TARGET_NR_setresgid32:
8786 ret = get_errno(setresgid(arg1, arg2, arg3));
8787 break;
8788 #endif
8789 #ifdef TARGET_NR_getresgid32
8790 case TARGET_NR_getresgid32:
8792 gid_t rgid, egid, sgid;
8793 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8794 if (!is_error(ret)) {
8795 if (put_user_u32(rgid, arg1)
8796 || put_user_u32(egid, arg2)
8797 || put_user_u32(sgid, arg3))
8798 goto efault;
8801 break;
8802 #endif
8803 #ifdef TARGET_NR_chown32
8804 case TARGET_NR_chown32:
8805 if (!(p = lock_user_string(arg1)))
8806 goto efault;
8807 ret = get_errno(chown(p, arg2, arg3));
8808 unlock_user(p, arg1, 0);
8809 break;
8810 #endif
8811 #ifdef TARGET_NR_setuid32
8812 case TARGET_NR_setuid32:
8813 ret = get_errno(setuid(arg1));
8814 break;
8815 #endif
8816 #ifdef TARGET_NR_setgid32
8817 case TARGET_NR_setgid32:
8818 ret = get_errno(setgid(arg1));
8819 break;
8820 #endif
8821 #ifdef TARGET_NR_setfsuid32
8822 case TARGET_NR_setfsuid32:
8823 ret = get_errno(setfsuid(arg1));
8824 break;
8825 #endif
8826 #ifdef TARGET_NR_setfsgid32
8827 case TARGET_NR_setfsgid32:
8828 ret = get_errno(setfsgid(arg1));
8829 break;
8830 #endif
8832 case TARGET_NR_pivot_root:
8833 goto unimplemented;
8834 #ifdef TARGET_NR_mincore
8835 case TARGET_NR_mincore:
8837 void *a;
8838 ret = -TARGET_EFAULT;
8839 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8840 goto efault;
8841 if (!(p = lock_user_string(arg3)))
8842 goto mincore_fail;
8843 ret = get_errno(mincore(a, arg2, p));
8844 unlock_user(p, arg3, ret);
8845 mincore_fail:
8846 unlock_user(a, arg1, 0);
8848 break;
8849 #endif
8850 #ifdef TARGET_NR_arm_fadvise64_64
8851 case TARGET_NR_arm_fadvise64_64:
8854 * arm_fadvise64_64 looks like fadvise64_64 but
8855 * with different argument order
8857 abi_long temp;
8858 temp = arg3;
8859 arg3 = arg4;
8860 arg4 = temp;
8862 #endif
8863 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8864 #ifdef TARGET_NR_fadvise64_64
8865 case TARGET_NR_fadvise64_64:
8866 #endif
8867 #ifdef TARGET_NR_fadvise64
8868 case TARGET_NR_fadvise64:
8869 #endif
8870 #ifdef TARGET_S390X
8871 switch (arg4) {
8872 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8873 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8874 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8875 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8876 default: break;
8878 #endif
8879 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8880 break;
8881 #endif
8882 #ifdef TARGET_NR_madvise
8883 case TARGET_NR_madvise:
8884 /* A straight passthrough may not be safe because qemu sometimes
8885 turns private file-backed mappings into anonymous mappings.
8886 This will break MADV_DONTNEED.
8887 This is a hint, so ignoring and returning success is ok. */
8888 ret = get_errno(0);
8889 break;
8890 #endif
8891 #if TARGET_ABI_BITS == 32
8892 case TARGET_NR_fcntl64:
8894 int cmd;
8895 struct flock64 fl;
8896 struct target_flock64 *target_fl;
8897 #ifdef TARGET_ARM
8898 struct target_eabi_flock64 *target_efl;
8899 #endif
8901 cmd = target_to_host_fcntl_cmd(arg2);
8902 if (cmd == -TARGET_EINVAL) {
8903 ret = cmd;
8904 break;
8907 switch(arg2) {
8908 case TARGET_F_GETLK64:
8909 #ifdef TARGET_ARM
8910 if (((CPUARMState *)cpu_env)->eabi) {
8911 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8912 goto efault;
8913 fl.l_type = tswap16(target_efl->l_type);
8914 fl.l_whence = tswap16(target_efl->l_whence);
8915 fl.l_start = tswap64(target_efl->l_start);
8916 fl.l_len = tswap64(target_efl->l_len);
8917 fl.l_pid = tswap32(target_efl->l_pid);
8918 unlock_user_struct(target_efl, arg3, 0);
8919 } else
8920 #endif
8922 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8923 goto efault;
8924 fl.l_type = tswap16(target_fl->l_type);
8925 fl.l_whence = tswap16(target_fl->l_whence);
8926 fl.l_start = tswap64(target_fl->l_start);
8927 fl.l_len = tswap64(target_fl->l_len);
8928 fl.l_pid = tswap32(target_fl->l_pid);
8929 unlock_user_struct(target_fl, arg3, 0);
8931 ret = get_errno(fcntl(arg1, cmd, &fl));
8932 if (ret == 0) {
8933 #ifdef TARGET_ARM
8934 if (((CPUARMState *)cpu_env)->eabi) {
8935 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8936 goto efault;
8937 target_efl->l_type = tswap16(fl.l_type);
8938 target_efl->l_whence = tswap16(fl.l_whence);
8939 target_efl->l_start = tswap64(fl.l_start);
8940 target_efl->l_len = tswap64(fl.l_len);
8941 target_efl->l_pid = tswap32(fl.l_pid);
8942 unlock_user_struct(target_efl, arg3, 1);
8943 } else
8944 #endif
8946 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8947 goto efault;
8948 target_fl->l_type = tswap16(fl.l_type);
8949 target_fl->l_whence = tswap16(fl.l_whence);
8950 target_fl->l_start = tswap64(fl.l_start);
8951 target_fl->l_len = tswap64(fl.l_len);
8952 target_fl->l_pid = tswap32(fl.l_pid);
8953 unlock_user_struct(target_fl, arg3, 1);
8956 break;
8958 case TARGET_F_SETLK64:
8959 case TARGET_F_SETLKW64:
8960 #ifdef TARGET_ARM
8961 if (((CPUARMState *)cpu_env)->eabi) {
8962 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8963 goto efault;
8964 fl.l_type = tswap16(target_efl->l_type);
8965 fl.l_whence = tswap16(target_efl->l_whence);
8966 fl.l_start = tswap64(target_efl->l_start);
8967 fl.l_len = tswap64(target_efl->l_len);
8968 fl.l_pid = tswap32(target_efl->l_pid);
8969 unlock_user_struct(target_efl, arg3, 0);
8970 } else
8971 #endif
8973 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8974 goto efault;
8975 fl.l_type = tswap16(target_fl->l_type);
8976 fl.l_whence = tswap16(target_fl->l_whence);
8977 fl.l_start = tswap64(target_fl->l_start);
8978 fl.l_len = tswap64(target_fl->l_len);
8979 fl.l_pid = tswap32(target_fl->l_pid);
8980 unlock_user_struct(target_fl, arg3, 0);
8982 ret = get_errno(fcntl(arg1, cmd, &fl));
8983 break;
8984 default:
8985 ret = do_fcntl(arg1, arg2, arg3);
8986 break;
8988 break;
8990 #endif
8991 #ifdef TARGET_NR_cacheflush
8992 case TARGET_NR_cacheflush:
8993 /* self-modifying code is handled automatically, so nothing needed */
8994 ret = 0;
8995 break;
8996 #endif
8997 #ifdef TARGET_NR_security
8998 case TARGET_NR_security:
8999 goto unimplemented;
9000 #endif
9001 #ifdef TARGET_NR_getpagesize
9002 case TARGET_NR_getpagesize:
9003 ret = TARGET_PAGE_SIZE;
9004 break;
9005 #endif
9006 case TARGET_NR_gettid:
9007 ret = get_errno(gettid());
9008 break;
9009 #ifdef TARGET_NR_readahead
9010 case TARGET_NR_readahead:
9011 #if TARGET_ABI_BITS == 32
9012 if (regpairs_aligned(cpu_env)) {
9013 arg2 = arg3;
9014 arg3 = arg4;
9015 arg4 = arg5;
9017 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9018 #else
9019 ret = get_errno(readahead(arg1, arg2, arg3));
9020 #endif
9021 break;
9022 #endif
9023 #ifdef CONFIG_ATTR
9024 #ifdef TARGET_NR_setxattr
9025 case TARGET_NR_listxattr:
9026 case TARGET_NR_llistxattr:
9028 void *p, *b = 0;
9029 if (arg2) {
9030 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9031 if (!b) {
9032 ret = -TARGET_EFAULT;
9033 break;
9036 p = lock_user_string(arg1);
9037 if (p) {
9038 if (num == TARGET_NR_listxattr) {
9039 ret = get_errno(listxattr(p, b, arg3));
9040 } else {
9041 ret = get_errno(llistxattr(p, b, arg3));
9043 } else {
9044 ret = -TARGET_EFAULT;
9046 unlock_user(p, arg1, 0);
9047 unlock_user(b, arg2, arg3);
9048 break;
9050 case TARGET_NR_flistxattr:
9052 void *b = 0;
9053 if (arg2) {
9054 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9055 if (!b) {
9056 ret = -TARGET_EFAULT;
9057 break;
9060 ret = get_errno(flistxattr(arg1, b, arg3));
9061 unlock_user(b, arg2, arg3);
9062 break;
9064 case TARGET_NR_setxattr:
9065 case TARGET_NR_lsetxattr:
9067 void *p, *n, *v = 0;
9068 if (arg3) {
9069 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9070 if (!v) {
9071 ret = -TARGET_EFAULT;
9072 break;
9075 p = lock_user_string(arg1);
9076 n = lock_user_string(arg2);
9077 if (p && n) {
9078 if (num == TARGET_NR_setxattr) {
9079 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9080 } else {
9081 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9083 } else {
9084 ret = -TARGET_EFAULT;
9086 unlock_user(p, arg1, 0);
9087 unlock_user(n, arg2, 0);
9088 unlock_user(v, arg3, 0);
9090 break;
9091 case TARGET_NR_fsetxattr:
9093 void *n, *v = 0;
9094 if (arg3) {
9095 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9096 if (!v) {
9097 ret = -TARGET_EFAULT;
9098 break;
9101 n = lock_user_string(arg2);
9102 if (n) {
9103 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9104 } else {
9105 ret = -TARGET_EFAULT;
9107 unlock_user(n, arg2, 0);
9108 unlock_user(v, arg3, 0);
9110 break;
9111 case TARGET_NR_getxattr:
9112 case TARGET_NR_lgetxattr:
9114 void *p, *n, *v = 0;
9115 if (arg3) {
9116 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9117 if (!v) {
9118 ret = -TARGET_EFAULT;
9119 break;
9122 p = lock_user_string(arg1);
9123 n = lock_user_string(arg2);
9124 if (p && n) {
9125 if (num == TARGET_NR_getxattr) {
9126 ret = get_errno(getxattr(p, n, v, arg4));
9127 } else {
9128 ret = get_errno(lgetxattr(p, n, v, arg4));
9130 } else {
9131 ret = -TARGET_EFAULT;
9133 unlock_user(p, arg1, 0);
9134 unlock_user(n, arg2, 0);
9135 unlock_user(v, arg3, arg4);
9137 break;
9138 case TARGET_NR_fgetxattr:
9140 void *n, *v = 0;
9141 if (arg3) {
9142 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9143 if (!v) {
9144 ret = -TARGET_EFAULT;
9145 break;
9148 n = lock_user_string(arg2);
9149 if (n) {
9150 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9151 } else {
9152 ret = -TARGET_EFAULT;
9154 unlock_user(n, arg2, 0);
9155 unlock_user(v, arg3, arg4);
9157 break;
9158 case TARGET_NR_removexattr:
9159 case TARGET_NR_lremovexattr:
9161 void *p, *n;
9162 p = lock_user_string(arg1);
9163 n = lock_user_string(arg2);
9164 if (p && n) {
9165 if (num == TARGET_NR_removexattr) {
9166 ret = get_errno(removexattr(p, n));
9167 } else {
9168 ret = get_errno(lremovexattr(p, n));
9170 } else {
9171 ret = -TARGET_EFAULT;
9173 unlock_user(p, arg1, 0);
9174 unlock_user(n, arg2, 0);
9176 break;
9177 case TARGET_NR_fremovexattr:
9179 void *n;
9180 n = lock_user_string(arg2);
9181 if (n) {
9182 ret = get_errno(fremovexattr(arg1, n));
9183 } else {
9184 ret = -TARGET_EFAULT;
9186 unlock_user(n, arg2, 0);
9188 break;
9189 #endif
9190 #endif /* CONFIG_ATTR */
9191 #ifdef TARGET_NR_set_thread_area
9192 case TARGET_NR_set_thread_area:
9193 #if defined(TARGET_MIPS)
9194 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9195 ret = 0;
9196 break;
9197 #elif defined(TARGET_CRIS)
9198 if (arg1 & 0xff)
9199 ret = -TARGET_EINVAL;
9200 else {
9201 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9202 ret = 0;
9204 break;
9205 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9206 ret = do_set_thread_area(cpu_env, arg1);
9207 break;
9208 #elif defined(TARGET_M68K)
9210 TaskState *ts = cpu->opaque;
9211 ts->tp_value = arg1;
9212 ret = 0;
9213 break;
9215 #else
9216 goto unimplemented_nowarn;
9217 #endif
9218 #endif
9219 #ifdef TARGET_NR_get_thread_area
9220 case TARGET_NR_get_thread_area:
9221 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9222 ret = do_get_thread_area(cpu_env, arg1);
9223 break;
9224 #elif defined(TARGET_M68K)
9226 TaskState *ts = cpu->opaque;
9227 ret = ts->tp_value;
9228 break;
9230 #else
9231 goto unimplemented_nowarn;
9232 #endif
9233 #endif
9234 #ifdef TARGET_NR_getdomainname
9235 case TARGET_NR_getdomainname:
9236 goto unimplemented_nowarn;
9237 #endif
9239 #ifdef TARGET_NR_clock_gettime
9240 case TARGET_NR_clock_gettime:
9242 struct timespec ts;
9243 ret = get_errno(clock_gettime(arg1, &ts));
9244 if (!is_error(ret)) {
9245 host_to_target_timespec(arg2, &ts);
9247 break;
9249 #endif
9250 #ifdef TARGET_NR_clock_getres
9251 case TARGET_NR_clock_getres:
9253 struct timespec ts;
9254 ret = get_errno(clock_getres(arg1, &ts));
9255 if (!is_error(ret)) {
9256 host_to_target_timespec(arg2, &ts);
9258 break;
9260 #endif
9261 #ifdef TARGET_NR_clock_nanosleep
9262 case TARGET_NR_clock_nanosleep:
9264 struct timespec ts;
9265 target_to_host_timespec(&ts, arg3);
9266 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9267 if (arg4)
9268 host_to_target_timespec(arg4, &ts);
9270 #if defined(TARGET_PPC)
9271 /* clock_nanosleep is odd in that it returns positive errno values.
9272 * On PPC, CR0 bit 3 should be set in such a situation. */
9273 if (ret) {
9274 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9276 #endif
9277 break;
9279 #endif
9281 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9282 case TARGET_NR_set_tid_address:
9283 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9284 break;
9285 #endif
9287 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9288 case TARGET_NR_tkill:
9289 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9290 break;
9291 #endif
9293 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9294 case TARGET_NR_tgkill:
9295 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9296 target_to_host_signal(arg3)));
9297 break;
9298 #endif
9300 #ifdef TARGET_NR_set_robust_list
9301 case TARGET_NR_set_robust_list:
9302 case TARGET_NR_get_robust_list:
9303 /* The ABI for supporting robust futexes has userspace pass
9304 * the kernel a pointer to a linked list which is updated by
9305 * userspace after the syscall; the list is walked by the kernel
9306 * when the thread exits. Since the linked list in QEMU guest
9307 * memory isn't a valid linked list for the host and we have
9308 * no way to reliably intercept the thread-death event, we can't
9309 * support these. Silently return ENOSYS so that guest userspace
9310 * falls back to a non-robust futex implementation (which should
9311 * be OK except in the corner case of the guest crashing while
9312 * holding a mutex that is shared with another process via
9313 * shared memory).
9315 goto unimplemented_nowarn;
9316 #endif
9318 #if defined(TARGET_NR_utimensat)
9319 case TARGET_NR_utimensat:
9321 struct timespec *tsp, ts[2];
9322 if (!arg3) {
9323 tsp = NULL;
9324 } else {
9325 target_to_host_timespec(ts, arg3);
9326 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9327 tsp = ts;
9329 if (!arg2)
9330 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9331 else {
9332 if (!(p = lock_user_string(arg2))) {
9333 ret = -TARGET_EFAULT;
9334 goto fail;
9336 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9337 unlock_user(p, arg2, 0);
9340 break;
9341 #endif
9342 case TARGET_NR_futex:
9343 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9344 break;
9345 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9346 case TARGET_NR_inotify_init:
9347 ret = get_errno(sys_inotify_init());
9348 break;
9349 #endif
9350 #ifdef CONFIG_INOTIFY1
9351 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9352 case TARGET_NR_inotify_init1:
9353 ret = get_errno(sys_inotify_init1(arg1));
9354 break;
9355 #endif
9356 #endif
9357 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9358 case TARGET_NR_inotify_add_watch:
9359 p = lock_user_string(arg2);
9360 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9361 unlock_user(p, arg2, 0);
9362 break;
9363 #endif
9364 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9365 case TARGET_NR_inotify_rm_watch:
9366 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9367 break;
9368 #endif
9370 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9371 case TARGET_NR_mq_open:
9373 struct mq_attr posix_mq_attr, *attrp;
9375 p = lock_user_string(arg1 - 1);
9376 if (arg4 != 0) {
9377 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9378 attrp = &posix_mq_attr;
9379 } else {
9380 attrp = 0;
9382 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9383 unlock_user (p, arg1, 0);
9385 break;
9387 case TARGET_NR_mq_unlink:
9388 p = lock_user_string(arg1 - 1);
9389 ret = get_errno(mq_unlink(p));
9390 unlock_user (p, arg1, 0);
9391 break;
9393 case TARGET_NR_mq_timedsend:
9395 struct timespec ts;
9397 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9398 if (arg5 != 0) {
9399 target_to_host_timespec(&ts, arg5);
9400 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9401 host_to_target_timespec(arg5, &ts);
9403 else
9404 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9405 unlock_user (p, arg2, arg3);
9407 break;
9409 case TARGET_NR_mq_timedreceive:
9411 struct timespec ts;
9412 unsigned int prio;
9414 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9415 if (arg5 != 0) {
9416 target_to_host_timespec(&ts, arg5);
9417 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9418 host_to_target_timespec(arg5, &ts);
9420 else
9421 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9422 unlock_user (p, arg2, arg3);
9423 if (arg4 != 0)
9424 put_user_u32(prio, arg4);
9426 break;
9428 /* Not implemented for now... */
9429 /* case TARGET_NR_mq_notify: */
9430 /* break; */
9432 case TARGET_NR_mq_getsetattr:
9434 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9435 ret = 0;
9436 if (arg3 != 0) {
9437 ret = mq_getattr(arg1, &posix_mq_attr_out);
9438 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9440 if (arg2 != 0) {
9441 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9442 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9446 break;
9447 #endif
9449 #ifdef CONFIG_SPLICE
9450 #ifdef TARGET_NR_tee
9451 case TARGET_NR_tee:
9453 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9455 break;
9456 #endif
9457 #ifdef TARGET_NR_splice
9458 case TARGET_NR_splice:
9460 loff_t loff_in, loff_out;
9461 loff_t *ploff_in = NULL, *ploff_out = NULL;
9462 if (arg2) {
9463 if (get_user_u64(loff_in, arg2)) {
9464 goto efault;
9466 ploff_in = &loff_in;
9468 if (arg4) {
9469 if (get_user_u64(loff_out, arg4)) {
9470 goto efault;
9472 ploff_out = &loff_out;
9474 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9475 if (arg2) {
9476 if (put_user_u64(loff_in, arg2)) {
9477 goto efault;
9480 if (arg4) {
9481 if (put_user_u64(loff_out, arg4)) {
9482 goto efault;
9486 break;
9487 #endif
9488 #ifdef TARGET_NR_vmsplice
9489 case TARGET_NR_vmsplice:
9491 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9492 if (vec != NULL) {
9493 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9494 unlock_iovec(vec, arg2, arg3, 0);
9495 } else {
9496 ret = -host_to_target_errno(errno);
9499 break;
9500 #endif
9501 #endif /* CONFIG_SPLICE */
9502 #ifdef CONFIG_EVENTFD
9503 #if defined(TARGET_NR_eventfd)
9504 case TARGET_NR_eventfd:
9505 ret = get_errno(eventfd(arg1, 0));
9506 break;
9507 #endif
9508 #if defined(TARGET_NR_eventfd2)
9509 case TARGET_NR_eventfd2:
9511 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9512 if (arg2 & TARGET_O_NONBLOCK) {
9513 host_flags |= O_NONBLOCK;
9515 if (arg2 & TARGET_O_CLOEXEC) {
9516 host_flags |= O_CLOEXEC;
9518 ret = get_errno(eventfd(arg1, host_flags));
9519 break;
9521 #endif
9522 #endif /* CONFIG_EVENTFD */
9523 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9524 case TARGET_NR_fallocate:
9525 #if TARGET_ABI_BITS == 32
9526 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9527 target_offset64(arg5, arg6)));
9528 #else
9529 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9530 #endif
9531 break;
9532 #endif
9533 #if defined(CONFIG_SYNC_FILE_RANGE)
9534 #if defined(TARGET_NR_sync_file_range)
9535 case TARGET_NR_sync_file_range:
9536 #if TARGET_ABI_BITS == 32
9537 #if defined(TARGET_MIPS)
9538 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9539 target_offset64(arg5, arg6), arg7));
9540 #else
9541 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9542 target_offset64(arg4, arg5), arg6));
9543 #endif /* !TARGET_MIPS */
9544 #else
9545 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9546 #endif
9547 break;
9548 #endif
9549 #if defined(TARGET_NR_sync_file_range2)
9550 case TARGET_NR_sync_file_range2:
9551 /* This is like sync_file_range but the arguments are reordered */
9552 #if TARGET_ABI_BITS == 32
9553 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9554 target_offset64(arg5, arg6), arg2));
9555 #else
9556 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9557 #endif
9558 break;
9559 #endif
9560 #endif
9561 #if defined(CONFIG_EPOLL)
9562 #if defined(TARGET_NR_epoll_create)
9563 case TARGET_NR_epoll_create:
9564 ret = get_errno(epoll_create(arg1));
9565 break;
9566 #endif
9567 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9568 case TARGET_NR_epoll_create1:
9569 ret = get_errno(epoll_create1(arg1));
9570 break;
9571 #endif
9572 #if defined(TARGET_NR_epoll_ctl)
9573 case TARGET_NR_epoll_ctl:
9575 struct epoll_event ep;
9576 struct epoll_event *epp = 0;
9577 if (arg4) {
9578 struct target_epoll_event *target_ep;
9579 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9580 goto efault;
9582 ep.events = tswap32(target_ep->events);
9583 /* The epoll_data_t union is just opaque data to the kernel,
9584 * so we transfer all 64 bits across and need not worry what
9585 * actual data type it is.
9587 ep.data.u64 = tswap64(target_ep->data.u64);
9588 unlock_user_struct(target_ep, arg4, 0);
9589 epp = &ep;
9591 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9592 break;
9594 #endif
9596 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9597 #define IMPLEMENT_EPOLL_PWAIT
9598 #endif
9599 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9600 #if defined(TARGET_NR_epoll_wait)
9601 case TARGET_NR_epoll_wait:
9602 #endif
9603 #if defined(IMPLEMENT_EPOLL_PWAIT)
9604 case TARGET_NR_epoll_pwait:
9605 #endif
9607 struct target_epoll_event *target_ep;
9608 struct epoll_event *ep;
9609 int epfd = arg1;
9610 int maxevents = arg3;
9611 int timeout = arg4;
9613 target_ep = lock_user(VERIFY_WRITE, arg2,
9614 maxevents * sizeof(struct target_epoll_event), 1);
9615 if (!target_ep) {
9616 goto efault;
9619 ep = alloca(maxevents * sizeof(struct epoll_event));
9621 switch (num) {
9622 #if defined(IMPLEMENT_EPOLL_PWAIT)
9623 case TARGET_NR_epoll_pwait:
9625 target_sigset_t *target_set;
9626 sigset_t _set, *set = &_set;
9628 if (arg5) {
9629 target_set = lock_user(VERIFY_READ, arg5,
9630 sizeof(target_sigset_t), 1);
9631 if (!target_set) {
9632 unlock_user(target_ep, arg2, 0);
9633 goto efault;
9635 target_to_host_sigset(set, target_set);
9636 unlock_user(target_set, arg5, 0);
9637 } else {
9638 set = NULL;
9641 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9642 break;
9644 #endif
9645 #if defined(TARGET_NR_epoll_wait)
9646 case TARGET_NR_epoll_wait:
9647 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9648 break;
9649 #endif
9650 default:
9651 ret = -TARGET_ENOSYS;
9653 if (!is_error(ret)) {
9654 int i;
9655 for (i = 0; i < ret; i++) {
9656 target_ep[i].events = tswap32(ep[i].events);
9657 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9660 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9661 break;
9663 #endif
9664 #endif
9665 #ifdef TARGET_NR_prlimit64
9666 case TARGET_NR_prlimit64:
9668 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9669 struct target_rlimit64 *target_rnew, *target_rold;
9670 struct host_rlimit64 rnew, rold, *rnewp = 0;
9671 int resource = target_to_host_resource(arg2);
9672 if (arg3) {
9673 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9674 goto efault;
9676 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9677 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9678 unlock_user_struct(target_rnew, arg3, 0);
9679 rnewp = &rnew;
9682 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
9683 if (!is_error(ret) && arg4) {
9684 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9685 goto efault;
9687 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9688 target_rold->rlim_max = tswap64(rold.rlim_max);
9689 unlock_user_struct(target_rold, arg4, 1);
9691 break;
9693 #endif
9694 #ifdef TARGET_NR_gethostname
9695 case TARGET_NR_gethostname:
9697 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9698 if (name) {
9699 ret = get_errno(gethostname(name, arg2));
9700 unlock_user(name, arg1, arg2);
9701 } else {
9702 ret = -TARGET_EFAULT;
9704 break;
9706 #endif
9707 #ifdef TARGET_NR_atomic_cmpxchg_32
9708 case TARGET_NR_atomic_cmpxchg_32:
9710 /* should use start_exclusive from main.c */
9711 abi_ulong mem_value;
9712 if (get_user_u32(mem_value, arg6)) {
9713 target_siginfo_t info;
9714 info.si_signo = SIGSEGV;
9715 info.si_errno = 0;
9716 info.si_code = TARGET_SEGV_MAPERR;
9717 info._sifields._sigfault._addr = arg6;
9718 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9719 ret = 0xdeadbeef;
9722 if (mem_value == arg2)
9723 put_user_u32(arg1, arg6);
9724 ret = mem_value;
9725 break;
9727 #endif
9728 #ifdef TARGET_NR_atomic_barrier
9729 case TARGET_NR_atomic_barrier:
9731 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9732 ret = 0;
9733 break;
9735 #endif
9737 #ifdef TARGET_NR_timer_create
9738 case TARGET_NR_timer_create:
9740 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9742 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9744 int clkid = arg1;
9745 int timer_index = next_free_host_timer();
9747 if (timer_index < 0) {
9748 ret = -TARGET_EAGAIN;
9749 } else {
9750 timer_t *phtimer = g_posix_timers + timer_index;
9752 if (arg2) {
9753 phost_sevp = &host_sevp;
9754 ret = target_to_host_sigevent(phost_sevp, arg2);
9755 if (ret != 0) {
9756 break;
9760 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9761 if (ret) {
9762 phtimer = NULL;
9763 } else {
9764 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
9765 goto efault;
9769 break;
9771 #endif
9773 #ifdef TARGET_NR_timer_settime
9774 case TARGET_NR_timer_settime:
9776 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9777 * struct itimerspec * old_value */
9778 target_timer_t timerid = get_timer_id(arg1);
9780 if (timerid < 0) {
9781 ret = timerid;
9782 } else if (arg3 == 0) {
9783 ret = -TARGET_EINVAL;
9784 } else {
9785 timer_t htimer = g_posix_timers[timerid];
9786 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9788 target_to_host_itimerspec(&hspec_new, arg3);
9789 ret = get_errno(
9790 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9791 host_to_target_itimerspec(arg2, &hspec_old);
9793 break;
9795 #endif
9797 #ifdef TARGET_NR_timer_gettime
9798 case TARGET_NR_timer_gettime:
9800 /* args: timer_t timerid, struct itimerspec *curr_value */
9801 target_timer_t timerid = get_timer_id(arg1);
9803 if (timerid < 0) {
9804 ret = timerid;
9805 } else if (!arg2) {
9806 ret = -TARGET_EFAULT;
9807 } else {
9808 timer_t htimer = g_posix_timers[timerid];
9809 struct itimerspec hspec;
9810 ret = get_errno(timer_gettime(htimer, &hspec));
9812 if (host_to_target_itimerspec(arg2, &hspec)) {
9813 ret = -TARGET_EFAULT;
9816 break;
9818 #endif
9820 #ifdef TARGET_NR_timer_getoverrun
9821 case TARGET_NR_timer_getoverrun:
9823 /* args: timer_t timerid */
9824 target_timer_t timerid = get_timer_id(arg1);
9826 if (timerid < 0) {
9827 ret = timerid;
9828 } else {
9829 timer_t htimer = g_posix_timers[timerid];
9830 ret = get_errno(timer_getoverrun(htimer));
9832 break;
9834 #endif
9836 #ifdef TARGET_NR_timer_delete
9837 case TARGET_NR_timer_delete:
9839 /* args: timer_t timerid */
9840 target_timer_t timerid = get_timer_id(arg1);
9842 if (timerid < 0) {
9843 ret = timerid;
9844 } else {
9845 timer_t htimer = g_posix_timers[timerid];
9846 ret = get_errno(timer_delete(htimer));
9847 g_posix_timers[timerid] = 0;
9849 break;
9851 #endif
9853 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
9854 case TARGET_NR_timerfd_create:
9855 ret = get_errno(timerfd_create(arg1,
9856 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
9857 break;
9858 #endif
9860 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
9861 case TARGET_NR_timerfd_gettime:
9863 struct itimerspec its_curr;
9865 ret = get_errno(timerfd_gettime(arg1, &its_curr));
9867 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
9868 goto efault;
9871 break;
9872 #endif
9874 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
9875 case TARGET_NR_timerfd_settime:
9877 struct itimerspec its_new, its_old, *p_new;
9879 if (arg3) {
9880 if (target_to_host_itimerspec(&its_new, arg3)) {
9881 goto efault;
9883 p_new = &its_new;
9884 } else {
9885 p_new = NULL;
9888 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
9890 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
9891 goto efault;
9894 break;
9895 #endif
9897 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
9898 case TARGET_NR_ioprio_get:
9899 ret = get_errno(ioprio_get(arg1, arg2));
9900 break;
9901 #endif
9903 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
9904 case TARGET_NR_ioprio_set:
9905 ret = get_errno(ioprio_set(arg1, arg2, arg3));
9906 break;
9907 #endif
9909 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
9910 case TARGET_NR_setns:
9911 ret = get_errno(setns(arg1, arg2));
9912 break;
9913 #endif
9914 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
9915 case TARGET_NR_unshare:
9916 ret = get_errno(unshare(arg1));
9917 break;
9918 #endif
9920 default:
9921 unimplemented:
9922 gemu_log("qemu: Unsupported syscall: %d\n", num);
9923 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9924 unimplemented_nowarn:
9925 #endif
9926 ret = -TARGET_ENOSYS;
9927 break;
9929 fail:
9930 #ifdef DEBUG
9931 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9932 #endif
9933 if(do_strace)
9934 print_syscall_ret(num, ret);
9935 return ret;
9936 efault:
9937 ret = -TARGET_EFAULT;
9938 goto fail;