target-xtensa: Change gen_intermediate_code_internal() arg to XtensaCPU
[qemu.git] / linux-user / syscall.c
blob433d3ba990d43ddd8a82bfa715cbeaa34cc6650a
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include "linux_loop.h"
109 #include "cpu-uname.h"
111 #include "qemu.h"
113 #if defined(CONFIG_USE_NPTL)
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
116 #else
117 /* XXX: Hardcode the above values. */
118 #define CLONE_NPTL_FLAGS2 0
119 #endif
121 //#define DEBUG
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_futex __NR_futex
193 #define __NR_sys_inotify_init __NR_inotify_init
194 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
195 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
197 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
198 defined(__s390x__)
199 #define __NR__llseek __NR_lseek
200 #endif
202 #ifdef __NR_gettid
203 _syscall0(int, gettid)
204 #else
205 /* This is a replacement for the host gettid() and must return a host
206 errno. */
207 static int gettid(void) {
208 return -ENOSYS;
210 #endif
211 #ifdef __NR_getdents
212 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
213 #endif
214 #if !defined(__NR_getdents) || \
215 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
220 loff_t *, res, uint, wh);
221 #endif
222 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
223 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
226 #endif
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill,int,tid,int,sig)
229 #endif
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group,int,error_code)
232 #endif
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address,int *,tidptr)
235 #endif
236 #if defined(CONFIG_USE_NPTL)
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
239 const struct timespec *,timeout,int *,uaddr2,int,val3)
240 #endif
241 #endif
242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
243 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
244 unsigned long *, user_mask_ptr);
245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
246 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
249 void *, arg);
251 static bitmask_transtbl fcntl_flags_tbl[] = {
252 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
253 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
254 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
255 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
256 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
257 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
258 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
259 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
260 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
261 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
262 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
263 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
264 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
265 #if defined(O_DIRECT)
266 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
267 #endif
268 #if defined(O_NOATIME)
269 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
270 #endif
271 #if defined(O_CLOEXEC)
272 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
273 #endif
274 #if defined(O_PATH)
275 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
276 #endif
277 /* Don't terminate the list prematurely on 64-bit host+guest. */
278 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
279 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
280 #endif
281 { 0, 0, 0, 0 }
284 #define COPY_UTSNAME_FIELD(dest, src) \
285 do { \
286 /* __NEW_UTS_LEN doesn't include terminating null */ \
287 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
288 (dest)[__NEW_UTS_LEN] = '\0'; \
289 } while (0)
291 static int sys_uname(struct new_utsname *buf)
293 struct utsname uts_buf;
295 if (uname(&uts_buf) < 0)
296 return (-1);
299 * Just in case these have some differences, we
300 * translate utsname to new_utsname (which is the
301 * struct linux kernel uses).
304 memset(buf, 0, sizeof(*buf));
305 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
306 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
307 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
308 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
309 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
310 #ifdef _GNU_SOURCE
311 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
312 #endif
313 return (0);
315 #undef COPY_UTSNAME_FIELD
318 static int sys_getcwd1(char *buf, size_t size)
320 if (getcwd(buf, size) == NULL) {
321 /* getcwd() sets errno */
322 return (-1);
324 return strlen(buf)+1;
327 #ifdef TARGET_NR_openat
328 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
331 * open(2) has extra parameter 'mode' when called with
332 * flag O_CREAT.
334 if ((flags & O_CREAT) != 0) {
335 return (openat(dirfd, pathname, flags, mode));
337 return (openat(dirfd, pathname, flags));
339 #endif
341 #ifdef CONFIG_UTIMENSAT
342 static int sys_utimensat(int dirfd, const char *pathname,
343 const struct timespec times[2], int flags)
345 if (pathname == NULL)
346 return futimens(dirfd, times);
347 else
348 return utimensat(dirfd, pathname, times, flags);
350 #else
351 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
352 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
353 const struct timespec *,tsp,int,flags)
354 #endif
355 #endif /* CONFIG_UTIMENSAT */
357 #ifdef CONFIG_INOTIFY
358 #include <sys/inotify.h>
360 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
361 static int sys_inotify_init(void)
363 return (inotify_init());
365 #endif
366 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
367 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
369 return (inotify_add_watch(fd, pathname, mask));
371 #endif
372 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
373 static int sys_inotify_rm_watch(int fd, int32_t wd)
375 return (inotify_rm_watch(fd, wd));
377 #endif
378 #ifdef CONFIG_INOTIFY1
379 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
380 static int sys_inotify_init1(int flags)
382 return (inotify_init1(flags));
384 #endif
385 #endif
386 #else
387 /* Userspace can usually survive runtime without inotify */
388 #undef TARGET_NR_inotify_init
389 #undef TARGET_NR_inotify_init1
390 #undef TARGET_NR_inotify_add_watch
391 #undef TARGET_NR_inotify_rm_watch
392 #endif /* CONFIG_INOTIFY */
394 #if defined(TARGET_NR_ppoll)
395 #ifndef __NR_ppoll
396 # define __NR_ppoll -1
397 #endif
398 #define __NR_sys_ppoll __NR_ppoll
399 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
400 struct timespec *, timeout, const __sigset_t *, sigmask,
401 size_t, sigsetsize)
402 #endif
404 #if defined(TARGET_NR_pselect6)
405 #ifndef __NR_pselect6
406 # define __NR_pselect6 -1
407 #endif
408 #define __NR_sys_pselect6 __NR_pselect6
409 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
410 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
411 #endif
413 #if defined(TARGET_NR_prlimit64)
414 #ifndef __NR_prlimit64
415 # define __NR_prlimit64 -1
416 #endif
417 #define __NR_sys_prlimit64 __NR_prlimit64
418 /* The glibc rlimit structure may not be that used by the underlying syscall */
419 struct host_rlimit64 {
420 uint64_t rlim_cur;
421 uint64_t rlim_max;
423 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
424 const struct host_rlimit64 *, new_limit,
425 struct host_rlimit64 *, old_limit)
426 #endif
428 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
429 #ifdef TARGET_ARM
430 static inline int regpairs_aligned(void *cpu_env) {
431 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
433 #elif defined(TARGET_MIPS)
434 static inline int regpairs_aligned(void *cpu_env) { return 1; }
435 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
436 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
437 * of registers which translates to the same as ARM/MIPS, because we start with
438 * r3 as arg1 */
439 static inline int regpairs_aligned(void *cpu_env) { return 1; }
440 #else
441 static inline int regpairs_aligned(void *cpu_env) { return 0; }
442 #endif
444 #define ERRNO_TABLE_SIZE 1200
446 /* target_to_host_errno_table[] is initialized from
447 * host_to_target_errno_table[] in syscall_init(). */
448 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
452 * This list is the union of errno values overridden in asm-<arch>/errno.h
453 * minus the errnos that are not actually generic to all archs.
455 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
456 [EIDRM] = TARGET_EIDRM,
457 [ECHRNG] = TARGET_ECHRNG,
458 [EL2NSYNC] = TARGET_EL2NSYNC,
459 [EL3HLT] = TARGET_EL3HLT,
460 [EL3RST] = TARGET_EL3RST,
461 [ELNRNG] = TARGET_ELNRNG,
462 [EUNATCH] = TARGET_EUNATCH,
463 [ENOCSI] = TARGET_ENOCSI,
464 [EL2HLT] = TARGET_EL2HLT,
465 [EDEADLK] = TARGET_EDEADLK,
466 [ENOLCK] = TARGET_ENOLCK,
467 [EBADE] = TARGET_EBADE,
468 [EBADR] = TARGET_EBADR,
469 [EXFULL] = TARGET_EXFULL,
470 [ENOANO] = TARGET_ENOANO,
471 [EBADRQC] = TARGET_EBADRQC,
472 [EBADSLT] = TARGET_EBADSLT,
473 [EBFONT] = TARGET_EBFONT,
474 [ENOSTR] = TARGET_ENOSTR,
475 [ENODATA] = TARGET_ENODATA,
476 [ETIME] = TARGET_ETIME,
477 [ENOSR] = TARGET_ENOSR,
478 [ENONET] = TARGET_ENONET,
479 [ENOPKG] = TARGET_ENOPKG,
480 [EREMOTE] = TARGET_EREMOTE,
481 [ENOLINK] = TARGET_ENOLINK,
482 [EADV] = TARGET_EADV,
483 [ESRMNT] = TARGET_ESRMNT,
484 [ECOMM] = TARGET_ECOMM,
485 [EPROTO] = TARGET_EPROTO,
486 [EDOTDOT] = TARGET_EDOTDOT,
487 [EMULTIHOP] = TARGET_EMULTIHOP,
488 [EBADMSG] = TARGET_EBADMSG,
489 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
490 [EOVERFLOW] = TARGET_EOVERFLOW,
491 [ENOTUNIQ] = TARGET_ENOTUNIQ,
492 [EBADFD] = TARGET_EBADFD,
493 [EREMCHG] = TARGET_EREMCHG,
494 [ELIBACC] = TARGET_ELIBACC,
495 [ELIBBAD] = TARGET_ELIBBAD,
496 [ELIBSCN] = TARGET_ELIBSCN,
497 [ELIBMAX] = TARGET_ELIBMAX,
498 [ELIBEXEC] = TARGET_ELIBEXEC,
499 [EILSEQ] = TARGET_EILSEQ,
500 [ENOSYS] = TARGET_ENOSYS,
501 [ELOOP] = TARGET_ELOOP,
502 [ERESTART] = TARGET_ERESTART,
503 [ESTRPIPE] = TARGET_ESTRPIPE,
504 [ENOTEMPTY] = TARGET_ENOTEMPTY,
505 [EUSERS] = TARGET_EUSERS,
506 [ENOTSOCK] = TARGET_ENOTSOCK,
507 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
508 [EMSGSIZE] = TARGET_EMSGSIZE,
509 [EPROTOTYPE] = TARGET_EPROTOTYPE,
510 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
511 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
512 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
513 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
514 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
515 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
516 [EADDRINUSE] = TARGET_EADDRINUSE,
517 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
518 [ENETDOWN] = TARGET_ENETDOWN,
519 [ENETUNREACH] = TARGET_ENETUNREACH,
520 [ENETRESET] = TARGET_ENETRESET,
521 [ECONNABORTED] = TARGET_ECONNABORTED,
522 [ECONNRESET] = TARGET_ECONNRESET,
523 [ENOBUFS] = TARGET_ENOBUFS,
524 [EISCONN] = TARGET_EISCONN,
525 [ENOTCONN] = TARGET_ENOTCONN,
526 [EUCLEAN] = TARGET_EUCLEAN,
527 [ENOTNAM] = TARGET_ENOTNAM,
528 [ENAVAIL] = TARGET_ENAVAIL,
529 [EISNAM] = TARGET_EISNAM,
530 [EREMOTEIO] = TARGET_EREMOTEIO,
531 [ESHUTDOWN] = TARGET_ESHUTDOWN,
532 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
533 [ETIMEDOUT] = TARGET_ETIMEDOUT,
534 [ECONNREFUSED] = TARGET_ECONNREFUSED,
535 [EHOSTDOWN] = TARGET_EHOSTDOWN,
536 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
537 [EALREADY] = TARGET_EALREADY,
538 [EINPROGRESS] = TARGET_EINPROGRESS,
539 [ESTALE] = TARGET_ESTALE,
540 [ECANCELED] = TARGET_ECANCELED,
541 [ENOMEDIUM] = TARGET_ENOMEDIUM,
542 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
543 #ifdef ENOKEY
544 [ENOKEY] = TARGET_ENOKEY,
545 #endif
546 #ifdef EKEYEXPIRED
547 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
548 #endif
549 #ifdef EKEYREVOKED
550 [EKEYREVOKED] = TARGET_EKEYREVOKED,
551 #endif
552 #ifdef EKEYREJECTED
553 [EKEYREJECTED] = TARGET_EKEYREJECTED,
554 #endif
555 #ifdef EOWNERDEAD
556 [EOWNERDEAD] = TARGET_EOWNERDEAD,
557 #endif
558 #ifdef ENOTRECOVERABLE
559 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
560 #endif
563 static inline int host_to_target_errno(int err)
565 if(host_to_target_errno_table[err])
566 return host_to_target_errno_table[err];
567 return err;
570 static inline int target_to_host_errno(int err)
572 if (target_to_host_errno_table[err])
573 return target_to_host_errno_table[err];
574 return err;
577 static inline abi_long get_errno(abi_long ret)
579 if (ret == -1)
580 return -host_to_target_errno(errno);
581 else
582 return ret;
585 static inline int is_error(abi_long ret)
587 return (abi_ulong)ret >= (abi_ulong)(-4096);
590 char *target_strerror(int err)
592 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
593 return NULL;
595 return strerror(target_to_host_errno(err));
598 static abi_ulong target_brk;
599 static abi_ulong target_original_brk;
600 static abi_ulong brk_page;
602 void target_set_brk(abi_ulong new_brk)
604 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
605 brk_page = HOST_PAGE_ALIGN(target_brk);
608 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
609 #define DEBUGF_BRK(message, args...)
611 /* do_brk() must return target values and target errnos. */
612 abi_long do_brk(abi_ulong new_brk)
614 abi_long mapped_addr;
615 int new_alloc_size;
617 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
619 if (!new_brk) {
620 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
621 return target_brk;
623 if (new_brk < target_original_brk) {
624 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
625 target_brk);
626 return target_brk;
629 /* If the new brk is less than the highest page reserved to the
630 * target heap allocation, set it and we're almost done... */
631 if (new_brk <= brk_page) {
632 /* Heap contents are initialized to zero, as for anonymous
633 * mapped pages. */
634 if (new_brk > target_brk) {
635 memset(g2h(target_brk), 0, new_brk - target_brk);
637 target_brk = new_brk;
638 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
639 return target_brk;
642 /* We need to allocate more memory after the brk... Note that
643 * we don't use MAP_FIXED because that will map over the top of
644 * any existing mapping (like the one with the host libc or qemu
645 * itself); instead we treat "mapped but at wrong address" as
646 * a failure and unmap again.
648 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
649 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
650 PROT_READ|PROT_WRITE,
651 MAP_ANON|MAP_PRIVATE, 0, 0));
653 if (mapped_addr == brk_page) {
654 /* Heap contents are initialized to zero, as for anonymous
655 * mapped pages. Technically the new pages are already
656 * initialized to zero since they *are* anonymous mapped
657 * pages, however we have to take care with the contents that
658 * come from the remaining part of the previous page: it may
659 * contains garbage data due to a previous heap usage (grown
660 * then shrunken). */
661 memset(g2h(target_brk), 0, brk_page - target_brk);
663 target_brk = new_brk;
664 brk_page = HOST_PAGE_ALIGN(target_brk);
665 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
666 target_brk);
667 return target_brk;
668 } else if (mapped_addr != -1) {
669 /* Mapped but at wrong address, meaning there wasn't actually
670 * enough space for this brk.
672 target_munmap(mapped_addr, new_alloc_size);
673 mapped_addr = -1;
674 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
676 else {
677 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
680 #if defined(TARGET_ALPHA)
681 /* We (partially) emulate OSF/1 on Alpha, which requires we
682 return a proper errno, not an unchanged brk value. */
683 return -TARGET_ENOMEM;
684 #endif
685 /* For everything else, return the previous break. */
686 return target_brk;
689 static inline abi_long copy_from_user_fdset(fd_set *fds,
690 abi_ulong target_fds_addr,
691 int n)
693 int i, nw, j, k;
694 abi_ulong b, *target_fds;
696 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
697 if (!(target_fds = lock_user(VERIFY_READ,
698 target_fds_addr,
699 sizeof(abi_ulong) * nw,
700 1)))
701 return -TARGET_EFAULT;
703 FD_ZERO(fds);
704 k = 0;
705 for (i = 0; i < nw; i++) {
706 /* grab the abi_ulong */
707 __get_user(b, &target_fds[i]);
708 for (j = 0; j < TARGET_ABI_BITS; j++) {
709 /* check the bit inside the abi_ulong */
710 if ((b >> j) & 1)
711 FD_SET(k, fds);
712 k++;
716 unlock_user(target_fds, target_fds_addr, 0);
718 return 0;
721 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
722 abi_ulong target_fds_addr,
723 int n)
725 if (target_fds_addr) {
726 if (copy_from_user_fdset(fds, target_fds_addr, n))
727 return -TARGET_EFAULT;
728 *fds_ptr = fds;
729 } else {
730 *fds_ptr = NULL;
732 return 0;
735 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
736 const fd_set *fds,
737 int n)
739 int i, nw, j, k;
740 abi_long v;
741 abi_ulong *target_fds;
743 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
744 if (!(target_fds = lock_user(VERIFY_WRITE,
745 target_fds_addr,
746 sizeof(abi_ulong) * nw,
747 0)))
748 return -TARGET_EFAULT;
750 k = 0;
751 for (i = 0; i < nw; i++) {
752 v = 0;
753 for (j = 0; j < TARGET_ABI_BITS; j++) {
754 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
755 k++;
757 __put_user(v, &target_fds[i]);
760 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
762 return 0;
765 #if defined(__alpha__)
766 #define HOST_HZ 1024
767 #else
768 #define HOST_HZ 100
769 #endif
771 static inline abi_long host_to_target_clock_t(long ticks)
773 #if HOST_HZ == TARGET_HZ
774 return ticks;
775 #else
776 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
777 #endif
780 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
781 const struct rusage *rusage)
783 struct target_rusage *target_rusage;
785 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
786 return -TARGET_EFAULT;
787 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
788 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
789 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
790 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
791 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
792 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
793 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
794 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
795 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
796 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
797 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
798 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
799 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
800 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
801 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
802 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
803 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
804 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
805 unlock_user_struct(target_rusage, target_addr, 1);
807 return 0;
810 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
812 abi_ulong target_rlim_swap;
813 rlim_t result;
815 target_rlim_swap = tswapal(target_rlim);
816 if (target_rlim_swap == TARGET_RLIM_INFINITY)
817 return RLIM_INFINITY;
819 result = target_rlim_swap;
820 if (target_rlim_swap != (rlim_t)result)
821 return RLIM_INFINITY;
823 return result;
826 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
828 abi_ulong target_rlim_swap;
829 abi_ulong result;
831 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
832 target_rlim_swap = TARGET_RLIM_INFINITY;
833 else
834 target_rlim_swap = rlim;
835 result = tswapal(target_rlim_swap);
837 return result;
840 static inline int target_to_host_resource(int code)
842 switch (code) {
843 case TARGET_RLIMIT_AS:
844 return RLIMIT_AS;
845 case TARGET_RLIMIT_CORE:
846 return RLIMIT_CORE;
847 case TARGET_RLIMIT_CPU:
848 return RLIMIT_CPU;
849 case TARGET_RLIMIT_DATA:
850 return RLIMIT_DATA;
851 case TARGET_RLIMIT_FSIZE:
852 return RLIMIT_FSIZE;
853 case TARGET_RLIMIT_LOCKS:
854 return RLIMIT_LOCKS;
855 case TARGET_RLIMIT_MEMLOCK:
856 return RLIMIT_MEMLOCK;
857 case TARGET_RLIMIT_MSGQUEUE:
858 return RLIMIT_MSGQUEUE;
859 case TARGET_RLIMIT_NICE:
860 return RLIMIT_NICE;
861 case TARGET_RLIMIT_NOFILE:
862 return RLIMIT_NOFILE;
863 case TARGET_RLIMIT_NPROC:
864 return RLIMIT_NPROC;
865 case TARGET_RLIMIT_RSS:
866 return RLIMIT_RSS;
867 case TARGET_RLIMIT_RTPRIO:
868 return RLIMIT_RTPRIO;
869 case TARGET_RLIMIT_SIGPENDING:
870 return RLIMIT_SIGPENDING;
871 case TARGET_RLIMIT_STACK:
872 return RLIMIT_STACK;
873 default:
874 return code;
878 static inline abi_long copy_from_user_timeval(struct timeval *tv,
879 abi_ulong target_tv_addr)
881 struct target_timeval *target_tv;
883 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
884 return -TARGET_EFAULT;
886 __get_user(tv->tv_sec, &target_tv->tv_sec);
887 __get_user(tv->tv_usec, &target_tv->tv_usec);
889 unlock_user_struct(target_tv, target_tv_addr, 0);
891 return 0;
894 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
895 const struct timeval *tv)
897 struct target_timeval *target_tv;
899 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
900 return -TARGET_EFAULT;
902 __put_user(tv->tv_sec, &target_tv->tv_sec);
903 __put_user(tv->tv_usec, &target_tv->tv_usec);
905 unlock_user_struct(target_tv, target_tv_addr, 1);
907 return 0;
910 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
911 #include <mqueue.h>
913 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
914 abi_ulong target_mq_attr_addr)
916 struct target_mq_attr *target_mq_attr;
918 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
919 target_mq_attr_addr, 1))
920 return -TARGET_EFAULT;
922 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
923 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
924 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
925 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
927 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
929 return 0;
932 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
933 const struct mq_attr *attr)
935 struct target_mq_attr *target_mq_attr;
937 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
938 target_mq_attr_addr, 0))
939 return -TARGET_EFAULT;
941 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
942 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
943 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
944 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
946 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
948 return 0;
950 #endif
952 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
953 /* do_select() must return target values and target errnos. */
954 static abi_long do_select(int n,
955 abi_ulong rfd_addr, abi_ulong wfd_addr,
956 abi_ulong efd_addr, abi_ulong target_tv_addr)
958 fd_set rfds, wfds, efds;
959 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
960 struct timeval tv, *tv_ptr;
961 abi_long ret;
963 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
964 if (ret) {
965 return ret;
967 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
968 if (ret) {
969 return ret;
971 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
972 if (ret) {
973 return ret;
976 if (target_tv_addr) {
977 if (copy_from_user_timeval(&tv, target_tv_addr))
978 return -TARGET_EFAULT;
979 tv_ptr = &tv;
980 } else {
981 tv_ptr = NULL;
984 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
986 if (!is_error(ret)) {
987 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
988 return -TARGET_EFAULT;
989 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
990 return -TARGET_EFAULT;
991 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
992 return -TARGET_EFAULT;
994 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
995 return -TARGET_EFAULT;
998 return ret;
1000 #endif
1002 static abi_long do_pipe2(int host_pipe[], int flags)
1004 #ifdef CONFIG_PIPE2
1005 return pipe2(host_pipe, flags);
1006 #else
1007 return -ENOSYS;
1008 #endif
1011 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1012 int flags, int is_pipe2)
1014 int host_pipe[2];
1015 abi_long ret;
1016 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1018 if (is_error(ret))
1019 return get_errno(ret);
1021 /* Several targets have special calling conventions for the original
1022 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1023 if (!is_pipe2) {
1024 #if defined(TARGET_ALPHA)
1025 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1026 return host_pipe[0];
1027 #elif defined(TARGET_MIPS)
1028 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1029 return host_pipe[0];
1030 #elif defined(TARGET_SH4)
1031 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1032 return host_pipe[0];
1033 #endif
1036 if (put_user_s32(host_pipe[0], pipedes)
1037 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1038 return -TARGET_EFAULT;
1039 return get_errno(ret);
1042 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1043 abi_ulong target_addr,
1044 socklen_t len)
1046 struct target_ip_mreqn *target_smreqn;
1048 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1049 if (!target_smreqn)
1050 return -TARGET_EFAULT;
1051 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1052 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1053 if (len == sizeof(struct target_ip_mreqn))
1054 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1055 unlock_user(target_smreqn, target_addr, 0);
1057 return 0;
1060 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1061 abi_ulong target_addr,
1062 socklen_t len)
1064 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1065 sa_family_t sa_family;
1066 struct target_sockaddr *target_saddr;
1068 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1069 if (!target_saddr)
1070 return -TARGET_EFAULT;
1072 sa_family = tswap16(target_saddr->sa_family);
1074 /* Oops. The caller might send a incomplete sun_path; sun_path
1075 * must be terminated by \0 (see the manual page), but
1076 * unfortunately it is quite common to specify sockaddr_un
1077 * length as "strlen(x->sun_path)" while it should be
1078 * "strlen(...) + 1". We'll fix that here if needed.
1079 * Linux kernel has a similar feature.
1082 if (sa_family == AF_UNIX) {
1083 if (len < unix_maxlen && len > 0) {
1084 char *cp = (char*)target_saddr;
1086 if ( cp[len-1] && !cp[len] )
1087 len++;
1089 if (len > unix_maxlen)
1090 len = unix_maxlen;
1093 memcpy(addr, target_saddr, len);
1094 addr->sa_family = sa_family;
1095 unlock_user(target_saddr, target_addr, 0);
1097 return 0;
1100 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1101 struct sockaddr *addr,
1102 socklen_t len)
1104 struct target_sockaddr *target_saddr;
1106 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1107 if (!target_saddr)
1108 return -TARGET_EFAULT;
1109 memcpy(target_saddr, addr, len);
1110 target_saddr->sa_family = tswap16(addr->sa_family);
1111 unlock_user(target_saddr, target_addr, len);
1113 return 0;
1116 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1117 struct target_msghdr *target_msgh)
1119 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1120 abi_long msg_controllen;
1121 abi_ulong target_cmsg_addr;
1122 struct target_cmsghdr *target_cmsg;
1123 socklen_t space = 0;
1125 msg_controllen = tswapal(target_msgh->msg_controllen);
1126 if (msg_controllen < sizeof (struct target_cmsghdr))
1127 goto the_end;
1128 target_cmsg_addr = tswapal(target_msgh->msg_control);
1129 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1130 if (!target_cmsg)
1131 return -TARGET_EFAULT;
1133 while (cmsg && target_cmsg) {
1134 void *data = CMSG_DATA(cmsg);
1135 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1137 int len = tswapal(target_cmsg->cmsg_len)
1138 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1140 space += CMSG_SPACE(len);
1141 if (space > msgh->msg_controllen) {
1142 space -= CMSG_SPACE(len);
1143 gemu_log("Host cmsg overflow\n");
1144 break;
1147 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1148 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1149 cmsg->cmsg_len = CMSG_LEN(len);
1151 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1152 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1153 memcpy(data, target_data, len);
1154 } else {
1155 int *fd = (int *)data;
1156 int *target_fd = (int *)target_data;
1157 int i, numfds = len / sizeof(int);
1159 for (i = 0; i < numfds; i++)
1160 fd[i] = tswap32(target_fd[i]);
1163 cmsg = CMSG_NXTHDR(msgh, cmsg);
1164 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1166 unlock_user(target_cmsg, target_cmsg_addr, 0);
1167 the_end:
1168 msgh->msg_controllen = space;
1169 return 0;
1172 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1173 struct msghdr *msgh)
1175 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1176 abi_long msg_controllen;
1177 abi_ulong target_cmsg_addr;
1178 struct target_cmsghdr *target_cmsg;
1179 socklen_t space = 0;
1181 msg_controllen = tswapal(target_msgh->msg_controllen);
1182 if (msg_controllen < sizeof (struct target_cmsghdr))
1183 goto the_end;
1184 target_cmsg_addr = tswapal(target_msgh->msg_control);
1185 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1186 if (!target_cmsg)
1187 return -TARGET_EFAULT;
1189 while (cmsg && target_cmsg) {
1190 void *data = CMSG_DATA(cmsg);
1191 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1193 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1195 space += TARGET_CMSG_SPACE(len);
1196 if (space > msg_controllen) {
1197 space -= TARGET_CMSG_SPACE(len);
1198 gemu_log("Target cmsg overflow\n");
1199 break;
1202 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1203 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1204 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1206 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1207 (cmsg->cmsg_type == SCM_RIGHTS)) {
1208 int *fd = (int *)data;
1209 int *target_fd = (int *)target_data;
1210 int i, numfds = len / sizeof(int);
1212 for (i = 0; i < numfds; i++)
1213 target_fd[i] = tswap32(fd[i]);
1214 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1215 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1216 (len == sizeof(struct timeval))) {
1217 /* copy struct timeval to target */
1218 struct timeval *tv = (struct timeval *)data;
1219 struct target_timeval *target_tv =
1220 (struct target_timeval *)target_data;
1222 target_tv->tv_sec = tswapal(tv->tv_sec);
1223 target_tv->tv_usec = tswapal(tv->tv_usec);
1224 } else {
1225 gemu_log("Unsupported ancillary data: %d/%d\n",
1226 cmsg->cmsg_level, cmsg->cmsg_type);
1227 memcpy(target_data, data, len);
1230 cmsg = CMSG_NXTHDR(msgh, cmsg);
1231 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1233 unlock_user(target_cmsg, target_cmsg_addr, space);
1234 the_end:
1235 target_msgh->msg_controllen = tswapal(space);
1236 return 0;
1239 /* do_setsockopt() Must return target values and target errnos. */
1240 static abi_long do_setsockopt(int sockfd, int level, int optname,
1241 abi_ulong optval_addr, socklen_t optlen)
1243 abi_long ret;
1244 int val;
1245 struct ip_mreqn *ip_mreq;
1246 struct ip_mreq_source *ip_mreq_source;
1248 switch(level) {
1249 case SOL_TCP:
1250 /* TCP options all take an 'int' value. */
1251 if (optlen < sizeof(uint32_t))
1252 return -TARGET_EINVAL;
1254 if (get_user_u32(val, optval_addr))
1255 return -TARGET_EFAULT;
1256 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1257 break;
1258 case SOL_IP:
1259 switch(optname) {
1260 case IP_TOS:
1261 case IP_TTL:
1262 case IP_HDRINCL:
1263 case IP_ROUTER_ALERT:
1264 case IP_RECVOPTS:
1265 case IP_RETOPTS:
1266 case IP_PKTINFO:
1267 case IP_MTU_DISCOVER:
1268 case IP_RECVERR:
1269 case IP_RECVTOS:
1270 #ifdef IP_FREEBIND
1271 case IP_FREEBIND:
1272 #endif
1273 case IP_MULTICAST_TTL:
1274 case IP_MULTICAST_LOOP:
1275 val = 0;
1276 if (optlen >= sizeof(uint32_t)) {
1277 if (get_user_u32(val, optval_addr))
1278 return -TARGET_EFAULT;
1279 } else if (optlen >= 1) {
1280 if (get_user_u8(val, optval_addr))
1281 return -TARGET_EFAULT;
1283 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1284 break;
1285 case IP_ADD_MEMBERSHIP:
1286 case IP_DROP_MEMBERSHIP:
1287 if (optlen < sizeof (struct target_ip_mreq) ||
1288 optlen > sizeof (struct target_ip_mreqn))
1289 return -TARGET_EINVAL;
1291 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1292 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1293 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1294 break;
1296 case IP_BLOCK_SOURCE:
1297 case IP_UNBLOCK_SOURCE:
1298 case IP_ADD_SOURCE_MEMBERSHIP:
1299 case IP_DROP_SOURCE_MEMBERSHIP:
1300 if (optlen != sizeof (struct target_ip_mreq_source))
1301 return -TARGET_EINVAL;
1303 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1304 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1305 unlock_user (ip_mreq_source, optval_addr, 0);
1306 break;
1308 default:
1309 goto unimplemented;
1311 break;
1312 case SOL_RAW:
1313 switch (optname) {
1314 case ICMP_FILTER:
1315 /* struct icmp_filter takes an u32 value */
1316 if (optlen < sizeof(uint32_t)) {
1317 return -TARGET_EINVAL;
1320 if (get_user_u32(val, optval_addr)) {
1321 return -TARGET_EFAULT;
1323 ret = get_errno(setsockopt(sockfd, level, optname,
1324 &val, sizeof(val)));
1325 break;
1327 default:
1328 goto unimplemented;
1330 break;
1331 case TARGET_SOL_SOCKET:
1332 switch (optname) {
1333 case TARGET_SO_RCVTIMEO:
1335 struct timeval tv;
1337 optname = SO_RCVTIMEO;
1339 set_timeout:
1340 if (optlen != sizeof(struct target_timeval)) {
1341 return -TARGET_EINVAL;
1344 if (copy_from_user_timeval(&tv, optval_addr)) {
1345 return -TARGET_EFAULT;
1348 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1349 &tv, sizeof(tv)));
1350 return ret;
1352 case TARGET_SO_SNDTIMEO:
1353 optname = SO_SNDTIMEO;
1354 goto set_timeout;
1355 /* Options with 'int' argument. */
1356 case TARGET_SO_DEBUG:
1357 optname = SO_DEBUG;
1358 break;
1359 case TARGET_SO_REUSEADDR:
1360 optname = SO_REUSEADDR;
1361 break;
1362 case TARGET_SO_TYPE:
1363 optname = SO_TYPE;
1364 break;
1365 case TARGET_SO_ERROR:
1366 optname = SO_ERROR;
1367 break;
1368 case TARGET_SO_DONTROUTE:
1369 optname = SO_DONTROUTE;
1370 break;
1371 case TARGET_SO_BROADCAST:
1372 optname = SO_BROADCAST;
1373 break;
1374 case TARGET_SO_SNDBUF:
1375 optname = SO_SNDBUF;
1376 break;
1377 case TARGET_SO_RCVBUF:
1378 optname = SO_RCVBUF;
1379 break;
1380 case TARGET_SO_KEEPALIVE:
1381 optname = SO_KEEPALIVE;
1382 break;
1383 case TARGET_SO_OOBINLINE:
1384 optname = SO_OOBINLINE;
1385 break;
1386 case TARGET_SO_NO_CHECK:
1387 optname = SO_NO_CHECK;
1388 break;
1389 case TARGET_SO_PRIORITY:
1390 optname = SO_PRIORITY;
1391 break;
1392 #ifdef SO_BSDCOMPAT
1393 case TARGET_SO_BSDCOMPAT:
1394 optname = SO_BSDCOMPAT;
1395 break;
1396 #endif
1397 case TARGET_SO_PASSCRED:
1398 optname = SO_PASSCRED;
1399 break;
1400 case TARGET_SO_TIMESTAMP:
1401 optname = SO_TIMESTAMP;
1402 break;
1403 case TARGET_SO_RCVLOWAT:
1404 optname = SO_RCVLOWAT;
1405 break;
1406 break;
1407 default:
1408 goto unimplemented;
1410 if (optlen < sizeof(uint32_t))
1411 return -TARGET_EINVAL;
1413 if (get_user_u32(val, optval_addr))
1414 return -TARGET_EFAULT;
1415 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1416 break;
1417 default:
1418 unimplemented:
1419 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1420 ret = -TARGET_ENOPROTOOPT;
1422 return ret;
1425 /* do_getsockopt() Must return target values and target errnos. */
1426 static abi_long do_getsockopt(int sockfd, int level, int optname,
1427 abi_ulong optval_addr, abi_ulong optlen)
1429 abi_long ret;
1430 int len, val;
1431 socklen_t lv;
1433 switch(level) {
1434 case TARGET_SOL_SOCKET:
1435 level = SOL_SOCKET;
1436 switch (optname) {
1437 /* These don't just return a single integer */
1438 case TARGET_SO_LINGER:
1439 case TARGET_SO_RCVTIMEO:
1440 case TARGET_SO_SNDTIMEO:
1441 case TARGET_SO_PEERNAME:
1442 goto unimplemented;
1443 case TARGET_SO_PEERCRED: {
1444 struct ucred cr;
1445 socklen_t crlen;
1446 struct target_ucred *tcr;
1448 if (get_user_u32(len, optlen)) {
1449 return -TARGET_EFAULT;
1451 if (len < 0) {
1452 return -TARGET_EINVAL;
1455 crlen = sizeof(cr);
1456 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1457 &cr, &crlen));
1458 if (ret < 0) {
1459 return ret;
1461 if (len > crlen) {
1462 len = crlen;
1464 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1465 return -TARGET_EFAULT;
1467 __put_user(cr.pid, &tcr->pid);
1468 __put_user(cr.uid, &tcr->uid);
1469 __put_user(cr.gid, &tcr->gid);
1470 unlock_user_struct(tcr, optval_addr, 1);
1471 if (put_user_u32(len, optlen)) {
1472 return -TARGET_EFAULT;
1474 break;
1476 /* Options with 'int' argument. */
1477 case TARGET_SO_DEBUG:
1478 optname = SO_DEBUG;
1479 goto int_case;
1480 case TARGET_SO_REUSEADDR:
1481 optname = SO_REUSEADDR;
1482 goto int_case;
1483 case TARGET_SO_TYPE:
1484 optname = SO_TYPE;
1485 goto int_case;
1486 case TARGET_SO_ERROR:
1487 optname = SO_ERROR;
1488 goto int_case;
1489 case TARGET_SO_DONTROUTE:
1490 optname = SO_DONTROUTE;
1491 goto int_case;
1492 case TARGET_SO_BROADCAST:
1493 optname = SO_BROADCAST;
1494 goto int_case;
1495 case TARGET_SO_SNDBUF:
1496 optname = SO_SNDBUF;
1497 goto int_case;
1498 case TARGET_SO_RCVBUF:
1499 optname = SO_RCVBUF;
1500 goto int_case;
1501 case TARGET_SO_KEEPALIVE:
1502 optname = SO_KEEPALIVE;
1503 goto int_case;
1504 case TARGET_SO_OOBINLINE:
1505 optname = SO_OOBINLINE;
1506 goto int_case;
1507 case TARGET_SO_NO_CHECK:
1508 optname = SO_NO_CHECK;
1509 goto int_case;
1510 case TARGET_SO_PRIORITY:
1511 optname = SO_PRIORITY;
1512 goto int_case;
1513 #ifdef SO_BSDCOMPAT
1514 case TARGET_SO_BSDCOMPAT:
1515 optname = SO_BSDCOMPAT;
1516 goto int_case;
1517 #endif
1518 case TARGET_SO_PASSCRED:
1519 optname = SO_PASSCRED;
1520 goto int_case;
1521 case TARGET_SO_TIMESTAMP:
1522 optname = SO_TIMESTAMP;
1523 goto int_case;
1524 case TARGET_SO_RCVLOWAT:
1525 optname = SO_RCVLOWAT;
1526 goto int_case;
1527 default:
1528 goto int_case;
1530 break;
1531 case SOL_TCP:
1532 /* TCP options all take an 'int' value. */
1533 int_case:
1534 if (get_user_u32(len, optlen))
1535 return -TARGET_EFAULT;
1536 if (len < 0)
1537 return -TARGET_EINVAL;
1538 lv = sizeof(lv);
1539 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1540 if (ret < 0)
1541 return ret;
1542 if (len > lv)
1543 len = lv;
1544 if (len == 4) {
1545 if (put_user_u32(val, optval_addr))
1546 return -TARGET_EFAULT;
1547 } else {
1548 if (put_user_u8(val, optval_addr))
1549 return -TARGET_EFAULT;
1551 if (put_user_u32(len, optlen))
1552 return -TARGET_EFAULT;
1553 break;
1554 case SOL_IP:
1555 switch(optname) {
1556 case IP_TOS:
1557 case IP_TTL:
1558 case IP_HDRINCL:
1559 case IP_ROUTER_ALERT:
1560 case IP_RECVOPTS:
1561 case IP_RETOPTS:
1562 case IP_PKTINFO:
1563 case IP_MTU_DISCOVER:
1564 case IP_RECVERR:
1565 case IP_RECVTOS:
1566 #ifdef IP_FREEBIND
1567 case IP_FREEBIND:
1568 #endif
1569 case IP_MULTICAST_TTL:
1570 case IP_MULTICAST_LOOP:
1571 if (get_user_u32(len, optlen))
1572 return -TARGET_EFAULT;
1573 if (len < 0)
1574 return -TARGET_EINVAL;
1575 lv = sizeof(lv);
1576 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1577 if (ret < 0)
1578 return ret;
1579 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1580 len = 1;
1581 if (put_user_u32(len, optlen)
1582 || put_user_u8(val, optval_addr))
1583 return -TARGET_EFAULT;
1584 } else {
1585 if (len > sizeof(int))
1586 len = sizeof(int);
1587 if (put_user_u32(len, optlen)
1588 || put_user_u32(val, optval_addr))
1589 return -TARGET_EFAULT;
1591 break;
1592 default:
1593 ret = -TARGET_ENOPROTOOPT;
1594 break;
1596 break;
1597 default:
1598 unimplemented:
1599 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1600 level, optname);
1601 ret = -TARGET_EOPNOTSUPP;
1602 break;
1604 return ret;
1607 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1608 int count, int copy)
1610 struct target_iovec *target_vec;
1611 struct iovec *vec;
1612 abi_ulong total_len, max_len;
1613 int i;
1615 if (count == 0) {
1616 errno = 0;
1617 return NULL;
1619 if (count < 0 || count > IOV_MAX) {
1620 errno = EINVAL;
1621 return NULL;
1624 vec = calloc(count, sizeof(struct iovec));
1625 if (vec == NULL) {
1626 errno = ENOMEM;
1627 return NULL;
1630 target_vec = lock_user(VERIFY_READ, target_addr,
1631 count * sizeof(struct target_iovec), 1);
1632 if (target_vec == NULL) {
1633 errno = EFAULT;
1634 goto fail2;
1637 /* ??? If host page size > target page size, this will result in a
1638 value larger than what we can actually support. */
1639 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1640 total_len = 0;
1642 for (i = 0; i < count; i++) {
1643 abi_ulong base = tswapal(target_vec[i].iov_base);
1644 abi_long len = tswapal(target_vec[i].iov_len);
1646 if (len < 0) {
1647 errno = EINVAL;
1648 goto fail;
1649 } else if (len == 0) {
1650 /* Zero length pointer is ignored. */
1651 vec[i].iov_base = 0;
1652 } else {
1653 vec[i].iov_base = lock_user(type, base, len, copy);
1654 if (!vec[i].iov_base) {
1655 errno = EFAULT;
1656 goto fail;
1658 if (len > max_len - total_len) {
1659 len = max_len - total_len;
1662 vec[i].iov_len = len;
1663 total_len += len;
1666 unlock_user(target_vec, target_addr, 0);
1667 return vec;
1669 fail:
1670 free(vec);
1671 fail2:
1672 unlock_user(target_vec, target_addr, 0);
1673 return NULL;
1676 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1677 int count, int copy)
1679 struct target_iovec *target_vec;
1680 int i;
1682 target_vec = lock_user(VERIFY_READ, target_addr,
1683 count * sizeof(struct target_iovec), 1);
1684 if (target_vec) {
1685 for (i = 0; i < count; i++) {
1686 abi_ulong base = tswapal(target_vec[i].iov_base);
1687 abi_long len = tswapal(target_vec[i].iov_base);
1688 if (len < 0) {
1689 break;
1691 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1693 unlock_user(target_vec, target_addr, 0);
1696 free(vec);
1699 /* do_socket() Must return target values and target errnos. */
1700 static abi_long do_socket(int domain, int type, int protocol)
1702 #if defined(TARGET_MIPS)
1703 switch(type) {
1704 case TARGET_SOCK_DGRAM:
1705 type = SOCK_DGRAM;
1706 break;
1707 case TARGET_SOCK_STREAM:
1708 type = SOCK_STREAM;
1709 break;
1710 case TARGET_SOCK_RAW:
1711 type = SOCK_RAW;
1712 break;
1713 case TARGET_SOCK_RDM:
1714 type = SOCK_RDM;
1715 break;
1716 case TARGET_SOCK_SEQPACKET:
1717 type = SOCK_SEQPACKET;
1718 break;
1719 case TARGET_SOCK_PACKET:
1720 type = SOCK_PACKET;
1721 break;
1723 #endif
1724 if (domain == PF_NETLINK)
1725 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1726 return get_errno(socket(domain, type, protocol));
1729 /* do_bind() Must return target values and target errnos. */
1730 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1731 socklen_t addrlen)
1733 void *addr;
1734 abi_long ret;
1736 if ((int)addrlen < 0) {
1737 return -TARGET_EINVAL;
1740 addr = alloca(addrlen+1);
1742 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1743 if (ret)
1744 return ret;
1746 return get_errno(bind(sockfd, addr, addrlen));
1749 /* do_connect() Must return target values and target errnos. */
1750 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1751 socklen_t addrlen)
1753 void *addr;
1754 abi_long ret;
1756 if ((int)addrlen < 0) {
1757 return -TARGET_EINVAL;
1760 addr = alloca(addrlen);
1762 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1763 if (ret)
1764 return ret;
1766 return get_errno(connect(sockfd, addr, addrlen));
1769 /* do_sendrecvmsg() Must return target values and target errnos. */
1770 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1771 int flags, int send)
1773 abi_long ret, len;
1774 struct target_msghdr *msgp;
1775 struct msghdr msg;
1776 int count;
1777 struct iovec *vec;
1778 abi_ulong target_vec;
1780 /* FIXME */
1781 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1782 msgp,
1783 target_msg,
1784 send ? 1 : 0))
1785 return -TARGET_EFAULT;
1786 if (msgp->msg_name) {
1787 msg.msg_namelen = tswap32(msgp->msg_namelen);
1788 msg.msg_name = alloca(msg.msg_namelen);
1789 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1790 msg.msg_namelen);
1791 if (ret) {
1792 goto out2;
1794 } else {
1795 msg.msg_name = NULL;
1796 msg.msg_namelen = 0;
1798 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1799 msg.msg_control = alloca(msg.msg_controllen);
1800 msg.msg_flags = tswap32(msgp->msg_flags);
1802 count = tswapal(msgp->msg_iovlen);
1803 target_vec = tswapal(msgp->msg_iov);
1804 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1805 target_vec, count, send);
1806 if (vec == NULL) {
1807 ret = -host_to_target_errno(errno);
1808 goto out2;
1810 msg.msg_iovlen = count;
1811 msg.msg_iov = vec;
1813 if (send) {
1814 ret = target_to_host_cmsg(&msg, msgp);
1815 if (ret == 0)
1816 ret = get_errno(sendmsg(fd, &msg, flags));
1817 } else {
1818 ret = get_errno(recvmsg(fd, &msg, flags));
1819 if (!is_error(ret)) {
1820 len = ret;
1821 ret = host_to_target_cmsg(msgp, &msg);
1822 if (!is_error(ret)) {
1823 msgp->msg_namelen = tswap32(msg.msg_namelen);
1824 if (msg.msg_name != NULL) {
1825 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1826 msg.msg_name, msg.msg_namelen);
1827 if (ret) {
1828 goto out;
1832 ret = len;
1837 out:
1838 unlock_iovec(vec, target_vec, count, !send);
1839 out2:
1840 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1841 return ret;
1844 /* If we don't have a system accept4() then just call accept.
1845 * The callsites to do_accept4() will ensure that they don't
1846 * pass a non-zero flags argument in this config.
1848 #ifndef CONFIG_ACCEPT4
1849 static inline int accept4(int sockfd, struct sockaddr *addr,
1850 socklen_t *addrlen, int flags)
1852 assert(flags == 0);
1853 return accept(sockfd, addr, addrlen);
1855 #endif
1857 /* do_accept4() Must return target values and target errnos. */
1858 static abi_long do_accept4(int fd, abi_ulong target_addr,
1859 abi_ulong target_addrlen_addr, int flags)
1861 socklen_t addrlen;
1862 void *addr;
1863 abi_long ret;
1865 if (target_addr == 0) {
1866 return get_errno(accept4(fd, NULL, NULL, flags));
1869 /* linux returns EINVAL if addrlen pointer is invalid */
1870 if (get_user_u32(addrlen, target_addrlen_addr))
1871 return -TARGET_EINVAL;
1873 if ((int)addrlen < 0) {
1874 return -TARGET_EINVAL;
1877 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1878 return -TARGET_EINVAL;
1880 addr = alloca(addrlen);
1882 ret = get_errno(accept4(fd, addr, &addrlen, flags));
1883 if (!is_error(ret)) {
1884 host_to_target_sockaddr(target_addr, addr, addrlen);
1885 if (put_user_u32(addrlen, target_addrlen_addr))
1886 ret = -TARGET_EFAULT;
1888 return ret;
1891 /* do_getpeername() Must return target values and target errnos. */
1892 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1893 abi_ulong target_addrlen_addr)
1895 socklen_t addrlen;
1896 void *addr;
1897 abi_long ret;
1899 if (get_user_u32(addrlen, target_addrlen_addr))
1900 return -TARGET_EFAULT;
1902 if ((int)addrlen < 0) {
1903 return -TARGET_EINVAL;
1906 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1907 return -TARGET_EFAULT;
1909 addr = alloca(addrlen);
1911 ret = get_errno(getpeername(fd, addr, &addrlen));
1912 if (!is_error(ret)) {
1913 host_to_target_sockaddr(target_addr, addr, addrlen);
1914 if (put_user_u32(addrlen, target_addrlen_addr))
1915 ret = -TARGET_EFAULT;
1917 return ret;
1920 /* do_getsockname() Must return target values and target errnos. */
1921 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1922 abi_ulong target_addrlen_addr)
1924 socklen_t addrlen;
1925 void *addr;
1926 abi_long ret;
1928 if (get_user_u32(addrlen, target_addrlen_addr))
1929 return -TARGET_EFAULT;
1931 if ((int)addrlen < 0) {
1932 return -TARGET_EINVAL;
1935 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1936 return -TARGET_EFAULT;
1938 addr = alloca(addrlen);
1940 ret = get_errno(getsockname(fd, addr, &addrlen));
1941 if (!is_error(ret)) {
1942 host_to_target_sockaddr(target_addr, addr, addrlen);
1943 if (put_user_u32(addrlen, target_addrlen_addr))
1944 ret = -TARGET_EFAULT;
1946 return ret;
1949 /* do_socketpair() Must return target values and target errnos. */
1950 static abi_long do_socketpair(int domain, int type, int protocol,
1951 abi_ulong target_tab_addr)
1953 int tab[2];
1954 abi_long ret;
1956 ret = get_errno(socketpair(domain, type, protocol, tab));
1957 if (!is_error(ret)) {
1958 if (put_user_s32(tab[0], target_tab_addr)
1959 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1960 ret = -TARGET_EFAULT;
1962 return ret;
1965 /* do_sendto() Must return target values and target errnos. */
1966 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1967 abi_ulong target_addr, socklen_t addrlen)
1969 void *addr;
1970 void *host_msg;
1971 abi_long ret;
1973 if ((int)addrlen < 0) {
1974 return -TARGET_EINVAL;
1977 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1978 if (!host_msg)
1979 return -TARGET_EFAULT;
1980 if (target_addr) {
1981 addr = alloca(addrlen);
1982 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1983 if (ret) {
1984 unlock_user(host_msg, msg, 0);
1985 return ret;
1987 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1988 } else {
1989 ret = get_errno(send(fd, host_msg, len, flags));
1991 unlock_user(host_msg, msg, 0);
1992 return ret;
1995 /* do_recvfrom() Must return target values and target errnos. */
1996 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1997 abi_ulong target_addr,
1998 abi_ulong target_addrlen)
2000 socklen_t addrlen;
2001 void *addr;
2002 void *host_msg;
2003 abi_long ret;
2005 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2006 if (!host_msg)
2007 return -TARGET_EFAULT;
2008 if (target_addr) {
2009 if (get_user_u32(addrlen, target_addrlen)) {
2010 ret = -TARGET_EFAULT;
2011 goto fail;
2013 if ((int)addrlen < 0) {
2014 ret = -TARGET_EINVAL;
2015 goto fail;
2017 addr = alloca(addrlen);
2018 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2019 } else {
2020 addr = NULL; /* To keep compiler quiet. */
2021 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2023 if (!is_error(ret)) {
2024 if (target_addr) {
2025 host_to_target_sockaddr(target_addr, addr, addrlen);
2026 if (put_user_u32(addrlen, target_addrlen)) {
2027 ret = -TARGET_EFAULT;
2028 goto fail;
2031 unlock_user(host_msg, msg, len);
2032 } else {
2033 fail:
2034 unlock_user(host_msg, msg, 0);
2036 return ret;
2039 #ifdef TARGET_NR_socketcall
2040 /* do_socketcall() Must return target values and target errnos. */
2041 static abi_long do_socketcall(int num, abi_ulong vptr)
2043 abi_long ret;
2044 const int n = sizeof(abi_ulong);
2046 switch(num) {
2047 case SOCKOP_socket:
2049 abi_ulong domain, type, protocol;
2051 if (get_user_ual(domain, vptr)
2052 || get_user_ual(type, vptr + n)
2053 || get_user_ual(protocol, vptr + 2 * n))
2054 return -TARGET_EFAULT;
2056 ret = do_socket(domain, type, protocol);
2058 break;
2059 case SOCKOP_bind:
2061 abi_ulong sockfd;
2062 abi_ulong target_addr;
2063 socklen_t addrlen;
2065 if (get_user_ual(sockfd, vptr)
2066 || get_user_ual(target_addr, vptr + n)
2067 || get_user_ual(addrlen, vptr + 2 * n))
2068 return -TARGET_EFAULT;
2070 ret = do_bind(sockfd, target_addr, addrlen);
2072 break;
2073 case SOCKOP_connect:
2075 abi_ulong sockfd;
2076 abi_ulong target_addr;
2077 socklen_t addrlen;
2079 if (get_user_ual(sockfd, vptr)
2080 || get_user_ual(target_addr, vptr + n)
2081 || get_user_ual(addrlen, vptr + 2 * n))
2082 return -TARGET_EFAULT;
2084 ret = do_connect(sockfd, target_addr, addrlen);
2086 break;
2087 case SOCKOP_listen:
2089 abi_ulong sockfd, backlog;
2091 if (get_user_ual(sockfd, vptr)
2092 || get_user_ual(backlog, vptr + n))
2093 return -TARGET_EFAULT;
2095 ret = get_errno(listen(sockfd, backlog));
2097 break;
2098 case SOCKOP_accept:
2100 abi_ulong sockfd;
2101 abi_ulong target_addr, target_addrlen;
2103 if (get_user_ual(sockfd, vptr)
2104 || get_user_ual(target_addr, vptr + n)
2105 || get_user_ual(target_addrlen, vptr + 2 * n))
2106 return -TARGET_EFAULT;
2108 ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
2110 break;
2111 case SOCKOP_getsockname:
2113 abi_ulong sockfd;
2114 abi_ulong target_addr, target_addrlen;
2116 if (get_user_ual(sockfd, vptr)
2117 || get_user_ual(target_addr, vptr + n)
2118 || get_user_ual(target_addrlen, vptr + 2 * n))
2119 return -TARGET_EFAULT;
2121 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2123 break;
2124 case SOCKOP_getpeername:
2126 abi_ulong sockfd;
2127 abi_ulong target_addr, target_addrlen;
2129 if (get_user_ual(sockfd, vptr)
2130 || get_user_ual(target_addr, vptr + n)
2131 || get_user_ual(target_addrlen, vptr + 2 * n))
2132 return -TARGET_EFAULT;
2134 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2136 break;
2137 case SOCKOP_socketpair:
2139 abi_ulong domain, type, protocol;
2140 abi_ulong tab;
2142 if (get_user_ual(domain, vptr)
2143 || get_user_ual(type, vptr + n)
2144 || get_user_ual(protocol, vptr + 2 * n)
2145 || get_user_ual(tab, vptr + 3 * n))
2146 return -TARGET_EFAULT;
2148 ret = do_socketpair(domain, type, protocol, tab);
2150 break;
2151 case SOCKOP_send:
2153 abi_ulong sockfd;
2154 abi_ulong msg;
2155 size_t len;
2156 abi_ulong flags;
2158 if (get_user_ual(sockfd, vptr)
2159 || get_user_ual(msg, vptr + n)
2160 || get_user_ual(len, vptr + 2 * n)
2161 || get_user_ual(flags, vptr + 3 * n))
2162 return -TARGET_EFAULT;
2164 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2166 break;
2167 case SOCKOP_recv:
2169 abi_ulong sockfd;
2170 abi_ulong msg;
2171 size_t len;
2172 abi_ulong flags;
2174 if (get_user_ual(sockfd, vptr)
2175 || get_user_ual(msg, vptr + n)
2176 || get_user_ual(len, vptr + 2 * n)
2177 || get_user_ual(flags, vptr + 3 * n))
2178 return -TARGET_EFAULT;
2180 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2182 break;
2183 case SOCKOP_sendto:
2185 abi_ulong sockfd;
2186 abi_ulong msg;
2187 size_t len;
2188 abi_ulong flags;
2189 abi_ulong addr;
2190 socklen_t addrlen;
2192 if (get_user_ual(sockfd, vptr)
2193 || get_user_ual(msg, vptr + n)
2194 || get_user_ual(len, vptr + 2 * n)
2195 || get_user_ual(flags, vptr + 3 * n)
2196 || get_user_ual(addr, vptr + 4 * n)
2197 || get_user_ual(addrlen, vptr + 5 * n))
2198 return -TARGET_EFAULT;
2200 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2202 break;
2203 case SOCKOP_recvfrom:
2205 abi_ulong sockfd;
2206 abi_ulong msg;
2207 size_t len;
2208 abi_ulong flags;
2209 abi_ulong addr;
2210 socklen_t addrlen;
2212 if (get_user_ual(sockfd, vptr)
2213 || get_user_ual(msg, vptr + n)
2214 || get_user_ual(len, vptr + 2 * n)
2215 || get_user_ual(flags, vptr + 3 * n)
2216 || get_user_ual(addr, vptr + 4 * n)
2217 || get_user_ual(addrlen, vptr + 5 * n))
2218 return -TARGET_EFAULT;
2220 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2222 break;
2223 case SOCKOP_shutdown:
2225 abi_ulong sockfd, how;
2227 if (get_user_ual(sockfd, vptr)
2228 || get_user_ual(how, vptr + n))
2229 return -TARGET_EFAULT;
2231 ret = get_errno(shutdown(sockfd, how));
2233 break;
2234 case SOCKOP_sendmsg:
2235 case SOCKOP_recvmsg:
2237 abi_ulong fd;
2238 abi_ulong target_msg;
2239 abi_ulong flags;
2241 if (get_user_ual(fd, vptr)
2242 || get_user_ual(target_msg, vptr + n)
2243 || get_user_ual(flags, vptr + 2 * n))
2244 return -TARGET_EFAULT;
2246 ret = do_sendrecvmsg(fd, target_msg, flags,
2247 (num == SOCKOP_sendmsg));
2249 break;
2250 case SOCKOP_setsockopt:
2252 abi_ulong sockfd;
2253 abi_ulong level;
2254 abi_ulong optname;
2255 abi_ulong optval;
2256 socklen_t optlen;
2258 if (get_user_ual(sockfd, vptr)
2259 || get_user_ual(level, vptr + n)
2260 || get_user_ual(optname, vptr + 2 * n)
2261 || get_user_ual(optval, vptr + 3 * n)
2262 || get_user_ual(optlen, vptr + 4 * n))
2263 return -TARGET_EFAULT;
2265 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2267 break;
2268 case SOCKOP_getsockopt:
2270 abi_ulong sockfd;
2271 abi_ulong level;
2272 abi_ulong optname;
2273 abi_ulong optval;
2274 socklen_t optlen;
2276 if (get_user_ual(sockfd, vptr)
2277 || get_user_ual(level, vptr + n)
2278 || get_user_ual(optname, vptr + 2 * n)
2279 || get_user_ual(optval, vptr + 3 * n)
2280 || get_user_ual(optlen, vptr + 4 * n))
2281 return -TARGET_EFAULT;
2283 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2285 break;
2286 default:
2287 gemu_log("Unsupported socketcall: %d\n", num);
2288 ret = -TARGET_ENOSYS;
2289 break;
2291 return ret;
2293 #endif
2295 #define N_SHM_REGIONS 32
2297 static struct shm_region {
2298 abi_ulong start;
2299 abi_ulong size;
2300 } shm_regions[N_SHM_REGIONS];
2302 struct target_ipc_perm
2304 abi_long __key;
2305 abi_ulong uid;
2306 abi_ulong gid;
2307 abi_ulong cuid;
2308 abi_ulong cgid;
2309 unsigned short int mode;
2310 unsigned short int __pad1;
2311 unsigned short int __seq;
2312 unsigned short int __pad2;
2313 abi_ulong __unused1;
2314 abi_ulong __unused2;
2317 struct target_semid_ds
2319 struct target_ipc_perm sem_perm;
2320 abi_ulong sem_otime;
2321 abi_ulong __unused1;
2322 abi_ulong sem_ctime;
2323 abi_ulong __unused2;
2324 abi_ulong sem_nsems;
2325 abi_ulong __unused3;
2326 abi_ulong __unused4;
2329 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2330 abi_ulong target_addr)
2332 struct target_ipc_perm *target_ip;
2333 struct target_semid_ds *target_sd;
2335 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2336 return -TARGET_EFAULT;
2337 target_ip = &(target_sd->sem_perm);
2338 host_ip->__key = tswapal(target_ip->__key);
2339 host_ip->uid = tswapal(target_ip->uid);
2340 host_ip->gid = tswapal(target_ip->gid);
2341 host_ip->cuid = tswapal(target_ip->cuid);
2342 host_ip->cgid = tswapal(target_ip->cgid);
2343 host_ip->mode = tswap16(target_ip->mode);
2344 unlock_user_struct(target_sd, target_addr, 0);
2345 return 0;
2348 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2349 struct ipc_perm *host_ip)
2351 struct target_ipc_perm *target_ip;
2352 struct target_semid_ds *target_sd;
2354 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2355 return -TARGET_EFAULT;
2356 target_ip = &(target_sd->sem_perm);
2357 target_ip->__key = tswapal(host_ip->__key);
2358 target_ip->uid = tswapal(host_ip->uid);
2359 target_ip->gid = tswapal(host_ip->gid);
2360 target_ip->cuid = tswapal(host_ip->cuid);
2361 target_ip->cgid = tswapal(host_ip->cgid);
2362 target_ip->mode = tswap16(host_ip->mode);
2363 unlock_user_struct(target_sd, target_addr, 1);
2364 return 0;
2367 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2368 abi_ulong target_addr)
2370 struct target_semid_ds *target_sd;
2372 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2373 return -TARGET_EFAULT;
2374 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2375 return -TARGET_EFAULT;
2376 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2377 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2378 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2379 unlock_user_struct(target_sd, target_addr, 0);
2380 return 0;
2383 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2384 struct semid_ds *host_sd)
2386 struct target_semid_ds *target_sd;
2388 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2389 return -TARGET_EFAULT;
2390 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2391 return -TARGET_EFAULT;
2392 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2393 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2394 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2395 unlock_user_struct(target_sd, target_addr, 1);
2396 return 0;
2399 struct target_seminfo {
2400 int semmap;
2401 int semmni;
2402 int semmns;
2403 int semmnu;
2404 int semmsl;
2405 int semopm;
2406 int semume;
2407 int semusz;
2408 int semvmx;
2409 int semaem;
2412 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2413 struct seminfo *host_seminfo)
2415 struct target_seminfo *target_seminfo;
2416 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2417 return -TARGET_EFAULT;
2418 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2419 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2420 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2421 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2422 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2423 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2424 __put_user(host_seminfo->semume, &target_seminfo->semume);
2425 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2426 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2427 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2428 unlock_user_struct(target_seminfo, target_addr, 1);
2429 return 0;
2432 union semun {
2433 int val;
2434 struct semid_ds *buf;
2435 unsigned short *array;
2436 struct seminfo *__buf;
2439 union target_semun {
2440 int val;
2441 abi_ulong buf;
2442 abi_ulong array;
2443 abi_ulong __buf;
2446 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2447 abi_ulong target_addr)
2449 int nsems;
2450 unsigned short *array;
2451 union semun semun;
2452 struct semid_ds semid_ds;
2453 int i, ret;
2455 semun.buf = &semid_ds;
2457 ret = semctl(semid, 0, IPC_STAT, semun);
2458 if (ret == -1)
2459 return get_errno(ret);
2461 nsems = semid_ds.sem_nsems;
2463 *host_array = malloc(nsems*sizeof(unsigned short));
2464 array = lock_user(VERIFY_READ, target_addr,
2465 nsems*sizeof(unsigned short), 1);
2466 if (!array)
2467 return -TARGET_EFAULT;
2469 for(i=0; i<nsems; i++) {
2470 __get_user((*host_array)[i], &array[i]);
2472 unlock_user(array, target_addr, 0);
2474 return 0;
2477 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2478 unsigned short **host_array)
2480 int nsems;
2481 unsigned short *array;
2482 union semun semun;
2483 struct semid_ds semid_ds;
2484 int i, ret;
2486 semun.buf = &semid_ds;
2488 ret = semctl(semid, 0, IPC_STAT, semun);
2489 if (ret == -1)
2490 return get_errno(ret);
2492 nsems = semid_ds.sem_nsems;
2494 array = lock_user(VERIFY_WRITE, target_addr,
2495 nsems*sizeof(unsigned short), 0);
2496 if (!array)
2497 return -TARGET_EFAULT;
2499 for(i=0; i<nsems; i++) {
2500 __put_user((*host_array)[i], &array[i]);
2502 free(*host_array);
2503 unlock_user(array, target_addr, 1);
2505 return 0;
2508 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2509 union target_semun target_su)
2511 union semun arg;
2512 struct semid_ds dsarg;
2513 unsigned short *array = NULL;
2514 struct seminfo seminfo;
2515 abi_long ret = -TARGET_EINVAL;
2516 abi_long err;
2517 cmd &= 0xff;
2519 switch( cmd ) {
2520 case GETVAL:
2521 case SETVAL:
2522 arg.val = tswap32(target_su.val);
2523 ret = get_errno(semctl(semid, semnum, cmd, arg));
2524 target_su.val = tswap32(arg.val);
2525 break;
2526 case GETALL:
2527 case SETALL:
2528 err = target_to_host_semarray(semid, &array, target_su.array);
2529 if (err)
2530 return err;
2531 arg.array = array;
2532 ret = get_errno(semctl(semid, semnum, cmd, arg));
2533 err = host_to_target_semarray(semid, target_su.array, &array);
2534 if (err)
2535 return err;
2536 break;
2537 case IPC_STAT:
2538 case IPC_SET:
2539 case SEM_STAT:
2540 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2541 if (err)
2542 return err;
2543 arg.buf = &dsarg;
2544 ret = get_errno(semctl(semid, semnum, cmd, arg));
2545 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2546 if (err)
2547 return err;
2548 break;
2549 case IPC_INFO:
2550 case SEM_INFO:
2551 arg.__buf = &seminfo;
2552 ret = get_errno(semctl(semid, semnum, cmd, arg));
2553 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2554 if (err)
2555 return err;
2556 break;
2557 case IPC_RMID:
2558 case GETPID:
2559 case GETNCNT:
2560 case GETZCNT:
2561 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2562 break;
2565 return ret;
2568 struct target_sembuf {
2569 unsigned short sem_num;
2570 short sem_op;
2571 short sem_flg;
2574 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2575 abi_ulong target_addr,
2576 unsigned nsops)
2578 struct target_sembuf *target_sembuf;
2579 int i;
2581 target_sembuf = lock_user(VERIFY_READ, target_addr,
2582 nsops*sizeof(struct target_sembuf), 1);
2583 if (!target_sembuf)
2584 return -TARGET_EFAULT;
2586 for(i=0; i<nsops; i++) {
2587 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2588 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2589 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2592 unlock_user(target_sembuf, target_addr, 0);
2594 return 0;
2597 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2599 struct sembuf sops[nsops];
2601 if (target_to_host_sembuf(sops, ptr, nsops))
2602 return -TARGET_EFAULT;
2604 return get_errno(semop(semid, sops, nsops));
2607 struct target_msqid_ds
2609 struct target_ipc_perm msg_perm;
2610 abi_ulong msg_stime;
2611 #if TARGET_ABI_BITS == 32
2612 abi_ulong __unused1;
2613 #endif
2614 abi_ulong msg_rtime;
2615 #if TARGET_ABI_BITS == 32
2616 abi_ulong __unused2;
2617 #endif
2618 abi_ulong msg_ctime;
2619 #if TARGET_ABI_BITS == 32
2620 abi_ulong __unused3;
2621 #endif
2622 abi_ulong __msg_cbytes;
2623 abi_ulong msg_qnum;
2624 abi_ulong msg_qbytes;
2625 abi_ulong msg_lspid;
2626 abi_ulong msg_lrpid;
2627 abi_ulong __unused4;
2628 abi_ulong __unused5;
2631 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2632 abi_ulong target_addr)
2634 struct target_msqid_ds *target_md;
2636 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2637 return -TARGET_EFAULT;
2638 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2639 return -TARGET_EFAULT;
2640 host_md->msg_stime = tswapal(target_md->msg_stime);
2641 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2642 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2643 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2644 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2645 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2646 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2647 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2648 unlock_user_struct(target_md, target_addr, 0);
2649 return 0;
2652 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2653 struct msqid_ds *host_md)
2655 struct target_msqid_ds *target_md;
2657 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2658 return -TARGET_EFAULT;
2659 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2660 return -TARGET_EFAULT;
2661 target_md->msg_stime = tswapal(host_md->msg_stime);
2662 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2663 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2664 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2665 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2666 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2667 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2668 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2669 unlock_user_struct(target_md, target_addr, 1);
2670 return 0;
2673 struct target_msginfo {
2674 int msgpool;
2675 int msgmap;
2676 int msgmax;
2677 int msgmnb;
2678 int msgmni;
2679 int msgssz;
2680 int msgtql;
2681 unsigned short int msgseg;
2684 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2685 struct msginfo *host_msginfo)
2687 struct target_msginfo *target_msginfo;
2688 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2689 return -TARGET_EFAULT;
2690 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2691 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2692 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2693 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2694 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2695 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2696 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2697 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2698 unlock_user_struct(target_msginfo, target_addr, 1);
2699 return 0;
2702 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2704 struct msqid_ds dsarg;
2705 struct msginfo msginfo;
2706 abi_long ret = -TARGET_EINVAL;
2708 cmd &= 0xff;
2710 switch (cmd) {
2711 case IPC_STAT:
2712 case IPC_SET:
2713 case MSG_STAT:
2714 if (target_to_host_msqid_ds(&dsarg,ptr))
2715 return -TARGET_EFAULT;
2716 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2717 if (host_to_target_msqid_ds(ptr,&dsarg))
2718 return -TARGET_EFAULT;
2719 break;
2720 case IPC_RMID:
2721 ret = get_errno(msgctl(msgid, cmd, NULL));
2722 break;
2723 case IPC_INFO:
2724 case MSG_INFO:
2725 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2726 if (host_to_target_msginfo(ptr, &msginfo))
2727 return -TARGET_EFAULT;
2728 break;
2731 return ret;
2734 struct target_msgbuf {
2735 abi_long mtype;
2736 char mtext[1];
2739 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2740 unsigned int msgsz, int msgflg)
2742 struct target_msgbuf *target_mb;
2743 struct msgbuf *host_mb;
2744 abi_long ret = 0;
2746 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2747 return -TARGET_EFAULT;
2748 host_mb = malloc(msgsz+sizeof(long));
2749 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2750 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2751 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2752 free(host_mb);
2753 unlock_user_struct(target_mb, msgp, 0);
2755 return ret;
2758 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2759 unsigned int msgsz, abi_long msgtyp,
2760 int msgflg)
2762 struct target_msgbuf *target_mb;
2763 char *target_mtext;
2764 struct msgbuf *host_mb;
2765 abi_long ret = 0;
2767 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2768 return -TARGET_EFAULT;
2770 host_mb = g_malloc(msgsz+sizeof(long));
2771 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2773 if (ret > 0) {
2774 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2775 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2776 if (!target_mtext) {
2777 ret = -TARGET_EFAULT;
2778 goto end;
2780 memcpy(target_mb->mtext, host_mb->mtext, ret);
2781 unlock_user(target_mtext, target_mtext_addr, ret);
2784 target_mb->mtype = tswapal(host_mb->mtype);
2786 end:
2787 if (target_mb)
2788 unlock_user_struct(target_mb, msgp, 1);
2789 g_free(host_mb);
2790 return ret;
2793 struct target_shmid_ds
2795 struct target_ipc_perm shm_perm;
2796 abi_ulong shm_segsz;
2797 abi_ulong shm_atime;
2798 #if TARGET_ABI_BITS == 32
2799 abi_ulong __unused1;
2800 #endif
2801 abi_ulong shm_dtime;
2802 #if TARGET_ABI_BITS == 32
2803 abi_ulong __unused2;
2804 #endif
2805 abi_ulong shm_ctime;
2806 #if TARGET_ABI_BITS == 32
2807 abi_ulong __unused3;
2808 #endif
2809 int shm_cpid;
2810 int shm_lpid;
2811 abi_ulong shm_nattch;
2812 unsigned long int __unused4;
2813 unsigned long int __unused5;
2816 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2817 abi_ulong target_addr)
2819 struct target_shmid_ds *target_sd;
2821 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2822 return -TARGET_EFAULT;
2823 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2824 return -TARGET_EFAULT;
2825 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2826 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2827 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2828 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2829 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2830 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2831 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2832 unlock_user_struct(target_sd, target_addr, 0);
2833 return 0;
2836 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2837 struct shmid_ds *host_sd)
2839 struct target_shmid_ds *target_sd;
2841 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2842 return -TARGET_EFAULT;
2843 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2844 return -TARGET_EFAULT;
2845 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2846 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2847 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2848 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2849 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2850 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2851 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2852 unlock_user_struct(target_sd, target_addr, 1);
2853 return 0;
2856 struct target_shminfo {
2857 abi_ulong shmmax;
2858 abi_ulong shmmin;
2859 abi_ulong shmmni;
2860 abi_ulong shmseg;
2861 abi_ulong shmall;
2864 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2865 struct shminfo *host_shminfo)
2867 struct target_shminfo *target_shminfo;
2868 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2869 return -TARGET_EFAULT;
2870 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2871 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2872 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2873 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2874 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2875 unlock_user_struct(target_shminfo, target_addr, 1);
2876 return 0;
2879 struct target_shm_info {
2880 int used_ids;
2881 abi_ulong shm_tot;
2882 abi_ulong shm_rss;
2883 abi_ulong shm_swp;
2884 abi_ulong swap_attempts;
2885 abi_ulong swap_successes;
2888 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2889 struct shm_info *host_shm_info)
2891 struct target_shm_info *target_shm_info;
2892 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2893 return -TARGET_EFAULT;
2894 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2895 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2896 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2897 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2898 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2899 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2900 unlock_user_struct(target_shm_info, target_addr, 1);
2901 return 0;
2904 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2906 struct shmid_ds dsarg;
2907 struct shminfo shminfo;
2908 struct shm_info shm_info;
2909 abi_long ret = -TARGET_EINVAL;
2911 cmd &= 0xff;
2913 switch(cmd) {
2914 case IPC_STAT:
2915 case IPC_SET:
2916 case SHM_STAT:
2917 if (target_to_host_shmid_ds(&dsarg, buf))
2918 return -TARGET_EFAULT;
2919 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2920 if (host_to_target_shmid_ds(buf, &dsarg))
2921 return -TARGET_EFAULT;
2922 break;
2923 case IPC_INFO:
2924 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2925 if (host_to_target_shminfo(buf, &shminfo))
2926 return -TARGET_EFAULT;
2927 break;
2928 case SHM_INFO:
2929 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2930 if (host_to_target_shm_info(buf, &shm_info))
2931 return -TARGET_EFAULT;
2932 break;
2933 case IPC_RMID:
2934 case SHM_LOCK:
2935 case SHM_UNLOCK:
2936 ret = get_errno(shmctl(shmid, cmd, NULL));
2937 break;
2940 return ret;
2943 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2945 abi_long raddr;
2946 void *host_raddr;
2947 struct shmid_ds shm_info;
2948 int i,ret;
2950 /* find out the length of the shared memory segment */
2951 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2952 if (is_error(ret)) {
2953 /* can't get length, bail out */
2954 return ret;
2957 mmap_lock();
2959 if (shmaddr)
2960 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2961 else {
2962 abi_ulong mmap_start;
2964 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2966 if (mmap_start == -1) {
2967 errno = ENOMEM;
2968 host_raddr = (void *)-1;
2969 } else
2970 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2973 if (host_raddr == (void *)-1) {
2974 mmap_unlock();
2975 return get_errno((long)host_raddr);
2977 raddr=h2g((unsigned long)host_raddr);
2979 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2980 PAGE_VALID | PAGE_READ |
2981 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2983 for (i = 0; i < N_SHM_REGIONS; i++) {
2984 if (shm_regions[i].start == 0) {
2985 shm_regions[i].start = raddr;
2986 shm_regions[i].size = shm_info.shm_segsz;
2987 break;
2991 mmap_unlock();
2992 return raddr;
2996 static inline abi_long do_shmdt(abi_ulong shmaddr)
2998 int i;
3000 for (i = 0; i < N_SHM_REGIONS; ++i) {
3001 if (shm_regions[i].start == shmaddr) {
3002 shm_regions[i].start = 0;
3003 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3004 break;
3008 return get_errno(shmdt(g2h(shmaddr)));
3011 #ifdef TARGET_NR_ipc
3012 /* ??? This only works with linear mappings. */
3013 /* do_ipc() must return target values and target errnos. */
3014 static abi_long do_ipc(unsigned int call, int first,
3015 int second, int third,
3016 abi_long ptr, abi_long fifth)
3018 int version;
3019 abi_long ret = 0;
3021 version = call >> 16;
3022 call &= 0xffff;
3024 switch (call) {
3025 case IPCOP_semop:
3026 ret = do_semop(first, ptr, second);
3027 break;
3029 case IPCOP_semget:
3030 ret = get_errno(semget(first, second, third));
3031 break;
3033 case IPCOP_semctl:
3034 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3035 break;
3037 case IPCOP_msgget:
3038 ret = get_errno(msgget(first, second));
3039 break;
3041 case IPCOP_msgsnd:
3042 ret = do_msgsnd(first, ptr, second, third);
3043 break;
3045 case IPCOP_msgctl:
3046 ret = do_msgctl(first, second, ptr);
3047 break;
3049 case IPCOP_msgrcv:
3050 switch (version) {
3051 case 0:
3053 struct target_ipc_kludge {
3054 abi_long msgp;
3055 abi_long msgtyp;
3056 } *tmp;
3058 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3059 ret = -TARGET_EFAULT;
3060 break;
3063 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3065 unlock_user_struct(tmp, ptr, 0);
3066 break;
3068 default:
3069 ret = do_msgrcv(first, ptr, second, fifth, third);
3071 break;
3073 case IPCOP_shmat:
3074 switch (version) {
3075 default:
3077 abi_ulong raddr;
3078 raddr = do_shmat(first, ptr, second);
3079 if (is_error(raddr))
3080 return get_errno(raddr);
3081 if (put_user_ual(raddr, third))
3082 return -TARGET_EFAULT;
3083 break;
3085 case 1:
3086 ret = -TARGET_EINVAL;
3087 break;
3089 break;
3090 case IPCOP_shmdt:
3091 ret = do_shmdt(ptr);
3092 break;
3094 case IPCOP_shmget:
3095 /* IPC_* flag values are the same on all linux platforms */
3096 ret = get_errno(shmget(first, second, third));
3097 break;
3099 /* IPC_* and SHM_* command values are the same on all linux platforms */
3100 case IPCOP_shmctl:
3101 ret = do_shmctl(first, second, third);
3102 break;
3103 default:
3104 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3105 ret = -TARGET_ENOSYS;
3106 break;
3108 return ret;
3110 #endif
3112 /* kernel structure types definitions */
3114 #define STRUCT(name, ...) STRUCT_ ## name,
3115 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3116 enum {
3117 #include "syscall_types.h"
3119 #undef STRUCT
3120 #undef STRUCT_SPECIAL
3122 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3123 #define STRUCT_SPECIAL(name)
3124 #include "syscall_types.h"
3125 #undef STRUCT
3126 #undef STRUCT_SPECIAL
3128 typedef struct IOCTLEntry IOCTLEntry;
3130 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3131 int fd, abi_long cmd, abi_long arg);
3133 struct IOCTLEntry {
3134 unsigned int target_cmd;
3135 unsigned int host_cmd;
3136 const char *name;
3137 int access;
3138 do_ioctl_fn *do_ioctl;
3139 const argtype arg_type[5];
3142 #define IOC_R 0x0001
3143 #define IOC_W 0x0002
3144 #define IOC_RW (IOC_R | IOC_W)
3146 #define MAX_STRUCT_SIZE 4096
3148 #ifdef CONFIG_FIEMAP
3149 /* So fiemap access checks don't overflow on 32 bit systems.
3150 * This is very slightly smaller than the limit imposed by
3151 * the underlying kernel.
3153 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3154 / sizeof(struct fiemap_extent))
3156 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3157 int fd, abi_long cmd, abi_long arg)
3159 /* The parameter for this ioctl is a struct fiemap followed
3160 * by an array of struct fiemap_extent whose size is set
3161 * in fiemap->fm_extent_count. The array is filled in by the
3162 * ioctl.
3164 int target_size_in, target_size_out;
3165 struct fiemap *fm;
3166 const argtype *arg_type = ie->arg_type;
3167 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3168 void *argptr, *p;
3169 abi_long ret;
3170 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3171 uint32_t outbufsz;
3172 int free_fm = 0;
3174 assert(arg_type[0] == TYPE_PTR);
3175 assert(ie->access == IOC_RW);
3176 arg_type++;
3177 target_size_in = thunk_type_size(arg_type, 0);
3178 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3179 if (!argptr) {
3180 return -TARGET_EFAULT;
3182 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3183 unlock_user(argptr, arg, 0);
3184 fm = (struct fiemap *)buf_temp;
3185 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3186 return -TARGET_EINVAL;
3189 outbufsz = sizeof (*fm) +
3190 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3192 if (outbufsz > MAX_STRUCT_SIZE) {
3193 /* We can't fit all the extents into the fixed size buffer.
3194 * Allocate one that is large enough and use it instead.
3196 fm = malloc(outbufsz);
3197 if (!fm) {
3198 return -TARGET_ENOMEM;
3200 memcpy(fm, buf_temp, sizeof(struct fiemap));
3201 free_fm = 1;
3203 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3204 if (!is_error(ret)) {
3205 target_size_out = target_size_in;
3206 /* An extent_count of 0 means we were only counting the extents
3207 * so there are no structs to copy
3209 if (fm->fm_extent_count != 0) {
3210 target_size_out += fm->fm_mapped_extents * extent_size;
3212 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3213 if (!argptr) {
3214 ret = -TARGET_EFAULT;
3215 } else {
3216 /* Convert the struct fiemap */
3217 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3218 if (fm->fm_extent_count != 0) {
3219 p = argptr + target_size_in;
3220 /* ...and then all the struct fiemap_extents */
3221 for (i = 0; i < fm->fm_mapped_extents; i++) {
3222 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3223 THUNK_TARGET);
3224 p += extent_size;
3227 unlock_user(argptr, arg, target_size_out);
3230 if (free_fm) {
3231 free(fm);
3233 return ret;
3235 #endif
3237 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3238 int fd, abi_long cmd, abi_long arg)
3240 const argtype *arg_type = ie->arg_type;
3241 int target_size;
3242 void *argptr;
3243 int ret;
3244 struct ifconf *host_ifconf;
3245 uint32_t outbufsz;
3246 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3247 int target_ifreq_size;
3248 int nb_ifreq;
3249 int free_buf = 0;
3250 int i;
3251 int target_ifc_len;
3252 abi_long target_ifc_buf;
3253 int host_ifc_len;
3254 char *host_ifc_buf;
3256 assert(arg_type[0] == TYPE_PTR);
3257 assert(ie->access == IOC_RW);
3259 arg_type++;
3260 target_size = thunk_type_size(arg_type, 0);
3262 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3263 if (!argptr)
3264 return -TARGET_EFAULT;
3265 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3266 unlock_user(argptr, arg, 0);
3268 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3269 target_ifc_len = host_ifconf->ifc_len;
3270 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3272 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3273 nb_ifreq = target_ifc_len / target_ifreq_size;
3274 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3276 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3277 if (outbufsz > MAX_STRUCT_SIZE) {
3278 /* We can't fit all the extents into the fixed size buffer.
3279 * Allocate one that is large enough and use it instead.
3281 host_ifconf = malloc(outbufsz);
3282 if (!host_ifconf) {
3283 return -TARGET_ENOMEM;
3285 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3286 free_buf = 1;
3288 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3290 host_ifconf->ifc_len = host_ifc_len;
3291 host_ifconf->ifc_buf = host_ifc_buf;
3293 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3294 if (!is_error(ret)) {
3295 /* convert host ifc_len to target ifc_len */
3297 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3298 target_ifc_len = nb_ifreq * target_ifreq_size;
3299 host_ifconf->ifc_len = target_ifc_len;
3301 /* restore target ifc_buf */
3303 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3305 /* copy struct ifconf to target user */
3307 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3308 if (!argptr)
3309 return -TARGET_EFAULT;
3310 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3311 unlock_user(argptr, arg, target_size);
3313 /* copy ifreq[] to target user */
3315 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3316 for (i = 0; i < nb_ifreq ; i++) {
3317 thunk_convert(argptr + i * target_ifreq_size,
3318 host_ifc_buf + i * sizeof(struct ifreq),
3319 ifreq_arg_type, THUNK_TARGET);
3321 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3324 if (free_buf) {
3325 free(host_ifconf);
3328 return ret;
3331 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3332 abi_long cmd, abi_long arg)
3334 void *argptr;
3335 struct dm_ioctl *host_dm;
3336 abi_long guest_data;
3337 uint32_t guest_data_size;
3338 int target_size;
3339 const argtype *arg_type = ie->arg_type;
3340 abi_long ret;
3341 void *big_buf = NULL;
3342 char *host_data;
3344 arg_type++;
3345 target_size = thunk_type_size(arg_type, 0);
3346 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3347 if (!argptr) {
3348 ret = -TARGET_EFAULT;
3349 goto out;
3351 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3352 unlock_user(argptr, arg, 0);
3354 /* buf_temp is too small, so fetch things into a bigger buffer */
3355 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3356 memcpy(big_buf, buf_temp, target_size);
3357 buf_temp = big_buf;
3358 host_dm = big_buf;
3360 guest_data = arg + host_dm->data_start;
3361 if ((guest_data - arg) < 0) {
3362 ret = -EINVAL;
3363 goto out;
3365 guest_data_size = host_dm->data_size - host_dm->data_start;
3366 host_data = (char*)host_dm + host_dm->data_start;
3368 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3369 switch (ie->host_cmd) {
3370 case DM_REMOVE_ALL:
3371 case DM_LIST_DEVICES:
3372 case DM_DEV_CREATE:
3373 case DM_DEV_REMOVE:
3374 case DM_DEV_SUSPEND:
3375 case DM_DEV_STATUS:
3376 case DM_DEV_WAIT:
3377 case DM_TABLE_STATUS:
3378 case DM_TABLE_CLEAR:
3379 case DM_TABLE_DEPS:
3380 case DM_LIST_VERSIONS:
3381 /* no input data */
3382 break;
3383 case DM_DEV_RENAME:
3384 case DM_DEV_SET_GEOMETRY:
3385 /* data contains only strings */
3386 memcpy(host_data, argptr, guest_data_size);
3387 break;
3388 case DM_TARGET_MSG:
3389 memcpy(host_data, argptr, guest_data_size);
3390 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3391 break;
3392 case DM_TABLE_LOAD:
3394 void *gspec = argptr;
3395 void *cur_data = host_data;
3396 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3397 int spec_size = thunk_type_size(arg_type, 0);
3398 int i;
3400 for (i = 0; i < host_dm->target_count; i++) {
3401 struct dm_target_spec *spec = cur_data;
3402 uint32_t next;
3403 int slen;
3405 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3406 slen = strlen((char*)gspec + spec_size) + 1;
3407 next = spec->next;
3408 spec->next = sizeof(*spec) + slen;
3409 strcpy((char*)&spec[1], gspec + spec_size);
3410 gspec += next;
3411 cur_data += spec->next;
3413 break;
3415 default:
3416 ret = -TARGET_EINVAL;
3417 goto out;
3419 unlock_user(argptr, guest_data, 0);
3421 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3422 if (!is_error(ret)) {
3423 guest_data = arg + host_dm->data_start;
3424 guest_data_size = host_dm->data_size - host_dm->data_start;
3425 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3426 switch (ie->host_cmd) {
3427 case DM_REMOVE_ALL:
3428 case DM_DEV_CREATE:
3429 case DM_DEV_REMOVE:
3430 case DM_DEV_RENAME:
3431 case DM_DEV_SUSPEND:
3432 case DM_DEV_STATUS:
3433 case DM_TABLE_LOAD:
3434 case DM_TABLE_CLEAR:
3435 case DM_TARGET_MSG:
3436 case DM_DEV_SET_GEOMETRY:
3437 /* no return data */
3438 break;
3439 case DM_LIST_DEVICES:
3441 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3442 uint32_t remaining_data = guest_data_size;
3443 void *cur_data = argptr;
3444 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3445 int nl_size = 12; /* can't use thunk_size due to alignment */
3447 while (1) {
3448 uint32_t next = nl->next;
3449 if (next) {
3450 nl->next = nl_size + (strlen(nl->name) + 1);
3452 if (remaining_data < nl->next) {
3453 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3454 break;
3456 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3457 strcpy(cur_data + nl_size, nl->name);
3458 cur_data += nl->next;
3459 remaining_data -= nl->next;
3460 if (!next) {
3461 break;
3463 nl = (void*)nl + next;
3465 break;
3467 case DM_DEV_WAIT:
3468 case DM_TABLE_STATUS:
3470 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3471 void *cur_data = argptr;
3472 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3473 int spec_size = thunk_type_size(arg_type, 0);
3474 int i;
3476 for (i = 0; i < host_dm->target_count; i++) {
3477 uint32_t next = spec->next;
3478 int slen = strlen((char*)&spec[1]) + 1;
3479 spec->next = (cur_data - argptr) + spec_size + slen;
3480 if (guest_data_size < spec->next) {
3481 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3482 break;
3484 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3485 strcpy(cur_data + spec_size, (char*)&spec[1]);
3486 cur_data = argptr + spec->next;
3487 spec = (void*)host_dm + host_dm->data_start + next;
3489 break;
3491 case DM_TABLE_DEPS:
3493 void *hdata = (void*)host_dm + host_dm->data_start;
3494 int count = *(uint32_t*)hdata;
3495 uint64_t *hdev = hdata + 8;
3496 uint64_t *gdev = argptr + 8;
3497 int i;
3499 *(uint32_t*)argptr = tswap32(count);
3500 for (i = 0; i < count; i++) {
3501 *gdev = tswap64(*hdev);
3502 gdev++;
3503 hdev++;
3505 break;
3507 case DM_LIST_VERSIONS:
3509 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3510 uint32_t remaining_data = guest_data_size;
3511 void *cur_data = argptr;
3512 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3513 int vers_size = thunk_type_size(arg_type, 0);
3515 while (1) {
3516 uint32_t next = vers->next;
3517 if (next) {
3518 vers->next = vers_size + (strlen(vers->name) + 1);
3520 if (remaining_data < vers->next) {
3521 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3522 break;
3524 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3525 strcpy(cur_data + vers_size, vers->name);
3526 cur_data += vers->next;
3527 remaining_data -= vers->next;
3528 if (!next) {
3529 break;
3531 vers = (void*)vers + next;
3533 break;
3535 default:
3536 ret = -TARGET_EINVAL;
3537 goto out;
3539 unlock_user(argptr, guest_data, guest_data_size);
3541 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3542 if (!argptr) {
3543 ret = -TARGET_EFAULT;
3544 goto out;
3546 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3547 unlock_user(argptr, arg, target_size);
3549 out:
3550 g_free(big_buf);
3551 return ret;
3554 static IOCTLEntry ioctl_entries[] = {
3555 #define IOCTL(cmd, access, ...) \
3556 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3557 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3558 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3559 #include "ioctls.h"
3560 { 0, 0, },
3563 /* ??? Implement proper locking for ioctls. */
3564 /* do_ioctl() Must return target values and target errnos. */
3565 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3567 const IOCTLEntry *ie;
3568 const argtype *arg_type;
3569 abi_long ret;
3570 uint8_t buf_temp[MAX_STRUCT_SIZE];
3571 int target_size;
3572 void *argptr;
3574 ie = ioctl_entries;
3575 for(;;) {
3576 if (ie->target_cmd == 0) {
3577 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3578 return -TARGET_ENOSYS;
3580 if (ie->target_cmd == cmd)
3581 break;
3582 ie++;
3584 arg_type = ie->arg_type;
3585 #if defined(DEBUG)
3586 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3587 #endif
3588 if (ie->do_ioctl) {
3589 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3592 switch(arg_type[0]) {
3593 case TYPE_NULL:
3594 /* no argument */
3595 ret = get_errno(ioctl(fd, ie->host_cmd));
3596 break;
3597 case TYPE_PTRVOID:
3598 case TYPE_INT:
3599 /* int argment */
3600 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3601 break;
3602 case TYPE_PTR:
3603 arg_type++;
3604 target_size = thunk_type_size(arg_type, 0);
3605 switch(ie->access) {
3606 case IOC_R:
3607 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3608 if (!is_error(ret)) {
3609 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3610 if (!argptr)
3611 return -TARGET_EFAULT;
3612 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3613 unlock_user(argptr, arg, target_size);
3615 break;
3616 case IOC_W:
3617 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3618 if (!argptr)
3619 return -TARGET_EFAULT;
3620 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3621 unlock_user(argptr, arg, 0);
3622 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3623 break;
3624 default:
3625 case IOC_RW:
3626 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3627 if (!argptr)
3628 return -TARGET_EFAULT;
3629 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3630 unlock_user(argptr, arg, 0);
3631 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3632 if (!is_error(ret)) {
3633 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3634 if (!argptr)
3635 return -TARGET_EFAULT;
3636 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3637 unlock_user(argptr, arg, target_size);
3639 break;
3641 break;
3642 default:
3643 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3644 (long)cmd, arg_type[0]);
3645 ret = -TARGET_ENOSYS;
3646 break;
3648 return ret;
3651 static const bitmask_transtbl iflag_tbl[] = {
3652 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3653 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3654 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3655 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3656 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3657 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3658 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3659 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3660 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3661 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3662 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3663 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3664 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3665 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3666 { 0, 0, 0, 0 }
3669 static const bitmask_transtbl oflag_tbl[] = {
3670 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3671 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3672 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3673 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3674 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3675 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3676 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3677 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3678 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3679 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3680 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3681 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3682 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3683 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3684 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3685 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3686 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3687 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3688 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3689 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3690 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3691 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3692 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3693 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3694 { 0, 0, 0, 0 }
3697 static const bitmask_transtbl cflag_tbl[] = {
3698 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3699 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3700 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3701 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3702 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3703 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3704 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3705 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3706 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3707 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3708 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3709 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3710 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3711 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3712 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3713 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3714 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3715 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3716 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3717 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3718 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3719 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3720 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3721 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3722 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3723 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3724 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3725 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3726 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3727 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3728 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3729 { 0, 0, 0, 0 }
3732 static const bitmask_transtbl lflag_tbl[] = {
3733 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3734 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3735 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3736 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3737 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3738 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3739 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3740 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3741 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3742 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3743 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3744 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3745 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3746 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3747 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3748 { 0, 0, 0, 0 }
3751 static void target_to_host_termios (void *dst, const void *src)
3753 struct host_termios *host = dst;
3754 const struct target_termios *target = src;
3756 host->c_iflag =
3757 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3758 host->c_oflag =
3759 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3760 host->c_cflag =
3761 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3762 host->c_lflag =
3763 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3764 host->c_line = target->c_line;
3766 memset(host->c_cc, 0, sizeof(host->c_cc));
3767 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3768 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3769 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3770 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3771 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3772 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3773 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3774 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3775 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3776 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3777 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3778 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3779 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3780 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3781 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3782 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3783 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3786 static void host_to_target_termios (void *dst, const void *src)
3788 struct target_termios *target = dst;
3789 const struct host_termios *host = src;
3791 target->c_iflag =
3792 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3793 target->c_oflag =
3794 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3795 target->c_cflag =
3796 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3797 target->c_lflag =
3798 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3799 target->c_line = host->c_line;
3801 memset(target->c_cc, 0, sizeof(target->c_cc));
3802 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3803 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3804 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3805 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3806 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3807 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3808 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3809 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3810 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3811 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3812 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3813 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3814 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3815 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3816 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3817 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3818 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3821 static const StructEntry struct_termios_def = {
3822 .convert = { host_to_target_termios, target_to_host_termios },
3823 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3824 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3827 static bitmask_transtbl mmap_flags_tbl[] = {
3828 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3829 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3830 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3831 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3832 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3833 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3834 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3835 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3836 { 0, 0, 0, 0 }
3839 #if defined(TARGET_I386)
3841 /* NOTE: there is really one LDT for all the threads */
3842 static uint8_t *ldt_table;
3844 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3846 int size;
3847 void *p;
3849 if (!ldt_table)
3850 return 0;
3851 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3852 if (size > bytecount)
3853 size = bytecount;
3854 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3855 if (!p)
3856 return -TARGET_EFAULT;
3857 /* ??? Should this by byteswapped? */
3858 memcpy(p, ldt_table, size);
3859 unlock_user(p, ptr, size);
3860 return size;
3863 /* XXX: add locking support */
3864 static abi_long write_ldt(CPUX86State *env,
3865 abi_ulong ptr, unsigned long bytecount, int oldmode)
3867 struct target_modify_ldt_ldt_s ldt_info;
3868 struct target_modify_ldt_ldt_s *target_ldt_info;
3869 int seg_32bit, contents, read_exec_only, limit_in_pages;
3870 int seg_not_present, useable, lm;
3871 uint32_t *lp, entry_1, entry_2;
3873 if (bytecount != sizeof(ldt_info))
3874 return -TARGET_EINVAL;
3875 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3876 return -TARGET_EFAULT;
3877 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3878 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3879 ldt_info.limit = tswap32(target_ldt_info->limit);
3880 ldt_info.flags = tswap32(target_ldt_info->flags);
3881 unlock_user_struct(target_ldt_info, ptr, 0);
3883 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3884 return -TARGET_EINVAL;
3885 seg_32bit = ldt_info.flags & 1;
3886 contents = (ldt_info.flags >> 1) & 3;
3887 read_exec_only = (ldt_info.flags >> 3) & 1;
3888 limit_in_pages = (ldt_info.flags >> 4) & 1;
3889 seg_not_present = (ldt_info.flags >> 5) & 1;
3890 useable = (ldt_info.flags >> 6) & 1;
3891 #ifdef TARGET_ABI32
3892 lm = 0;
3893 #else
3894 lm = (ldt_info.flags >> 7) & 1;
3895 #endif
3896 if (contents == 3) {
3897 if (oldmode)
3898 return -TARGET_EINVAL;
3899 if (seg_not_present == 0)
3900 return -TARGET_EINVAL;
3902 /* allocate the LDT */
3903 if (!ldt_table) {
3904 env->ldt.base = target_mmap(0,
3905 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3906 PROT_READ|PROT_WRITE,
3907 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3908 if (env->ldt.base == -1)
3909 return -TARGET_ENOMEM;
3910 memset(g2h(env->ldt.base), 0,
3911 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3912 env->ldt.limit = 0xffff;
3913 ldt_table = g2h(env->ldt.base);
3916 /* NOTE: same code as Linux kernel */
3917 /* Allow LDTs to be cleared by the user. */
3918 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3919 if (oldmode ||
3920 (contents == 0 &&
3921 read_exec_only == 1 &&
3922 seg_32bit == 0 &&
3923 limit_in_pages == 0 &&
3924 seg_not_present == 1 &&
3925 useable == 0 )) {
3926 entry_1 = 0;
3927 entry_2 = 0;
3928 goto install;
3932 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3933 (ldt_info.limit & 0x0ffff);
3934 entry_2 = (ldt_info.base_addr & 0xff000000) |
3935 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3936 (ldt_info.limit & 0xf0000) |
3937 ((read_exec_only ^ 1) << 9) |
3938 (contents << 10) |
3939 ((seg_not_present ^ 1) << 15) |
3940 (seg_32bit << 22) |
3941 (limit_in_pages << 23) |
3942 (lm << 21) |
3943 0x7000;
3944 if (!oldmode)
3945 entry_2 |= (useable << 20);
3947 /* Install the new entry ... */
3948 install:
3949 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3950 lp[0] = tswap32(entry_1);
3951 lp[1] = tswap32(entry_2);
3952 return 0;
3955 /* specific and weird i386 syscalls */
3956 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3957 unsigned long bytecount)
3959 abi_long ret;
3961 switch (func) {
3962 case 0:
3963 ret = read_ldt(ptr, bytecount);
3964 break;
3965 case 1:
3966 ret = write_ldt(env, ptr, bytecount, 1);
3967 break;
3968 case 0x11:
3969 ret = write_ldt(env, ptr, bytecount, 0);
3970 break;
3971 default:
3972 ret = -TARGET_ENOSYS;
3973 break;
3975 return ret;
3978 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3979 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3981 uint64_t *gdt_table = g2h(env->gdt.base);
3982 struct target_modify_ldt_ldt_s ldt_info;
3983 struct target_modify_ldt_ldt_s *target_ldt_info;
3984 int seg_32bit, contents, read_exec_only, limit_in_pages;
3985 int seg_not_present, useable, lm;
3986 uint32_t *lp, entry_1, entry_2;
3987 int i;
3989 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3990 if (!target_ldt_info)
3991 return -TARGET_EFAULT;
3992 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3993 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3994 ldt_info.limit = tswap32(target_ldt_info->limit);
3995 ldt_info.flags = tswap32(target_ldt_info->flags);
3996 if (ldt_info.entry_number == -1) {
3997 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3998 if (gdt_table[i] == 0) {
3999 ldt_info.entry_number = i;
4000 target_ldt_info->entry_number = tswap32(i);
4001 break;
4005 unlock_user_struct(target_ldt_info, ptr, 1);
4007 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4008 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4009 return -TARGET_EINVAL;
4010 seg_32bit = ldt_info.flags & 1;
4011 contents = (ldt_info.flags >> 1) & 3;
4012 read_exec_only = (ldt_info.flags >> 3) & 1;
4013 limit_in_pages = (ldt_info.flags >> 4) & 1;
4014 seg_not_present = (ldt_info.flags >> 5) & 1;
4015 useable = (ldt_info.flags >> 6) & 1;
4016 #ifdef TARGET_ABI32
4017 lm = 0;
4018 #else
4019 lm = (ldt_info.flags >> 7) & 1;
4020 #endif
4022 if (contents == 3) {
4023 if (seg_not_present == 0)
4024 return -TARGET_EINVAL;
4027 /* NOTE: same code as Linux kernel */
4028 /* Allow LDTs to be cleared by the user. */
4029 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4030 if ((contents == 0 &&
4031 read_exec_only == 1 &&
4032 seg_32bit == 0 &&
4033 limit_in_pages == 0 &&
4034 seg_not_present == 1 &&
4035 useable == 0 )) {
4036 entry_1 = 0;
4037 entry_2 = 0;
4038 goto install;
4042 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4043 (ldt_info.limit & 0x0ffff);
4044 entry_2 = (ldt_info.base_addr & 0xff000000) |
4045 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4046 (ldt_info.limit & 0xf0000) |
4047 ((read_exec_only ^ 1) << 9) |
4048 (contents << 10) |
4049 ((seg_not_present ^ 1) << 15) |
4050 (seg_32bit << 22) |
4051 (limit_in_pages << 23) |
4052 (useable << 20) |
4053 (lm << 21) |
4054 0x7000;
4056 /* Install the new entry ... */
4057 install:
4058 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4059 lp[0] = tswap32(entry_1);
4060 lp[1] = tswap32(entry_2);
4061 return 0;
4064 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4066 struct target_modify_ldt_ldt_s *target_ldt_info;
4067 uint64_t *gdt_table = g2h(env->gdt.base);
4068 uint32_t base_addr, limit, flags;
4069 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4070 int seg_not_present, useable, lm;
4071 uint32_t *lp, entry_1, entry_2;
4073 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4074 if (!target_ldt_info)
4075 return -TARGET_EFAULT;
4076 idx = tswap32(target_ldt_info->entry_number);
4077 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4078 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4079 unlock_user_struct(target_ldt_info, ptr, 1);
4080 return -TARGET_EINVAL;
4082 lp = (uint32_t *)(gdt_table + idx);
4083 entry_1 = tswap32(lp[0]);
4084 entry_2 = tswap32(lp[1]);
4086 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4087 contents = (entry_2 >> 10) & 3;
4088 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4089 seg_32bit = (entry_2 >> 22) & 1;
4090 limit_in_pages = (entry_2 >> 23) & 1;
4091 useable = (entry_2 >> 20) & 1;
4092 #ifdef TARGET_ABI32
4093 lm = 0;
4094 #else
4095 lm = (entry_2 >> 21) & 1;
4096 #endif
4097 flags = (seg_32bit << 0) | (contents << 1) |
4098 (read_exec_only << 3) | (limit_in_pages << 4) |
4099 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4100 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4101 base_addr = (entry_1 >> 16) |
4102 (entry_2 & 0xff000000) |
4103 ((entry_2 & 0xff) << 16);
4104 target_ldt_info->base_addr = tswapal(base_addr);
4105 target_ldt_info->limit = tswap32(limit);
4106 target_ldt_info->flags = tswap32(flags);
4107 unlock_user_struct(target_ldt_info, ptr, 1);
4108 return 0;
4110 #endif /* TARGET_I386 && TARGET_ABI32 */
4112 #ifndef TARGET_ABI32
4113 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4115 abi_long ret = 0;
4116 abi_ulong val;
4117 int idx;
4119 switch(code) {
4120 case TARGET_ARCH_SET_GS:
4121 case TARGET_ARCH_SET_FS:
4122 if (code == TARGET_ARCH_SET_GS)
4123 idx = R_GS;
4124 else
4125 idx = R_FS;
4126 cpu_x86_load_seg(env, idx, 0);
4127 env->segs[idx].base = addr;
4128 break;
4129 case TARGET_ARCH_GET_GS:
4130 case TARGET_ARCH_GET_FS:
4131 if (code == TARGET_ARCH_GET_GS)
4132 idx = R_GS;
4133 else
4134 idx = R_FS;
4135 val = env->segs[idx].base;
4136 if (put_user(val, addr, abi_ulong))
4137 ret = -TARGET_EFAULT;
4138 break;
4139 default:
4140 ret = -TARGET_EINVAL;
4141 break;
4143 return ret;
4145 #endif
4147 #endif /* defined(TARGET_I386) */
4149 #define NEW_STACK_SIZE 0x40000
4151 #if defined(CONFIG_USE_NPTL)
4153 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4154 typedef struct {
4155 CPUArchState *env;
4156 pthread_mutex_t mutex;
4157 pthread_cond_t cond;
4158 pthread_t thread;
4159 uint32_t tid;
4160 abi_ulong child_tidptr;
4161 abi_ulong parent_tidptr;
4162 sigset_t sigmask;
4163 } new_thread_info;
4165 static void *clone_func(void *arg)
4167 new_thread_info *info = arg;
4168 CPUArchState *env;
4169 CPUState *cpu;
4170 TaskState *ts;
4172 env = info->env;
4173 cpu = ENV_GET_CPU(env);
4174 thread_cpu = cpu;
4175 ts = (TaskState *)env->opaque;
4176 info->tid = gettid();
4177 cpu->host_tid = info->tid;
4178 task_settid(ts);
4179 if (info->child_tidptr)
4180 put_user_u32(info->tid, info->child_tidptr);
4181 if (info->parent_tidptr)
4182 put_user_u32(info->tid, info->parent_tidptr);
4183 /* Enable signals. */
4184 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4185 /* Signal to the parent that we're ready. */
4186 pthread_mutex_lock(&info->mutex);
4187 pthread_cond_broadcast(&info->cond);
4188 pthread_mutex_unlock(&info->mutex);
4189 /* Wait until the parent has finshed initializing the tls state. */
4190 pthread_mutex_lock(&clone_lock);
4191 pthread_mutex_unlock(&clone_lock);
4192 cpu_loop(env);
4193 /* never exits */
4194 return NULL;
4196 #else
4198 static int clone_func(void *arg)
4200 CPUArchState *env = arg;
4201 cpu_loop(env);
4202 /* never exits */
4203 return 0;
4205 #endif
4207 /* do_fork() Must return host values and target errnos (unlike most
4208 do_*() functions). */
4209 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4210 abi_ulong parent_tidptr, target_ulong newtls,
4211 abi_ulong child_tidptr)
4213 int ret;
4214 TaskState *ts;
4215 CPUArchState *new_env;
4216 #if defined(CONFIG_USE_NPTL)
4217 unsigned int nptl_flags;
4218 sigset_t sigmask;
4219 #else
4220 uint8_t *new_stack;
4221 #endif
4223 /* Emulate vfork() with fork() */
4224 if (flags & CLONE_VFORK)
4225 flags &= ~(CLONE_VFORK | CLONE_VM);
4227 if (flags & CLONE_VM) {
4228 TaskState *parent_ts = (TaskState *)env->opaque;
4229 #if defined(CONFIG_USE_NPTL)
4230 new_thread_info info;
4231 pthread_attr_t attr;
4232 #endif
4233 ts = g_malloc0(sizeof(TaskState));
4234 init_task_state(ts);
4235 /* we create a new CPU instance. */
4236 new_env = cpu_copy(env);
4237 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4238 cpu_reset(ENV_GET_CPU(new_env));
4239 #endif
4240 /* Init regs that differ from the parent. */
4241 cpu_clone_regs(new_env, newsp);
4242 new_env->opaque = ts;
4243 ts->bprm = parent_ts->bprm;
4244 ts->info = parent_ts->info;
4245 #if defined(CONFIG_USE_NPTL)
4246 nptl_flags = flags;
4247 flags &= ~CLONE_NPTL_FLAGS2;
4249 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4250 ts->child_tidptr = child_tidptr;
4253 if (nptl_flags & CLONE_SETTLS)
4254 cpu_set_tls (new_env, newtls);
4256 /* Grab a mutex so that thread setup appears atomic. */
4257 pthread_mutex_lock(&clone_lock);
4259 memset(&info, 0, sizeof(info));
4260 pthread_mutex_init(&info.mutex, NULL);
4261 pthread_mutex_lock(&info.mutex);
4262 pthread_cond_init(&info.cond, NULL);
4263 info.env = new_env;
4264 if (nptl_flags & CLONE_CHILD_SETTID)
4265 info.child_tidptr = child_tidptr;
4266 if (nptl_flags & CLONE_PARENT_SETTID)
4267 info.parent_tidptr = parent_tidptr;
4269 ret = pthread_attr_init(&attr);
4270 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4271 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4272 /* It is not safe to deliver signals until the child has finished
4273 initializing, so temporarily block all signals. */
4274 sigfillset(&sigmask);
4275 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4277 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4278 /* TODO: Free new CPU state if thread creation failed. */
4280 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4281 pthread_attr_destroy(&attr);
4282 if (ret == 0) {
4283 /* Wait for the child to initialize. */
4284 pthread_cond_wait(&info.cond, &info.mutex);
4285 ret = info.tid;
4286 if (flags & CLONE_PARENT_SETTID)
4287 put_user_u32(ret, parent_tidptr);
4288 } else {
4289 ret = -1;
4291 pthread_mutex_unlock(&info.mutex);
4292 pthread_cond_destroy(&info.cond);
4293 pthread_mutex_destroy(&info.mutex);
4294 pthread_mutex_unlock(&clone_lock);
4295 #else
4296 if (flags & CLONE_NPTL_FLAGS2)
4297 return -EINVAL;
4298 /* This is probably going to die very quickly, but do it anyway. */
4299 new_stack = g_malloc0 (NEW_STACK_SIZE);
4300 #ifdef __ia64__
4301 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4302 #else
4303 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4304 #endif
4305 #endif
4306 } else {
4307 /* if no CLONE_VM, we consider it is a fork */
4308 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4309 return -EINVAL;
4310 fork_start();
4311 ret = fork();
4312 if (ret == 0) {
4313 /* Child Process. */
4314 cpu_clone_regs(env, newsp);
4315 fork_end(1);
4316 #if defined(CONFIG_USE_NPTL)
4317 /* There is a race condition here. The parent process could
4318 theoretically read the TID in the child process before the child
4319 tid is set. This would require using either ptrace
4320 (not implemented) or having *_tidptr to point at a shared memory
4321 mapping. We can't repeat the spinlock hack used above because
4322 the child process gets its own copy of the lock. */
4323 if (flags & CLONE_CHILD_SETTID)
4324 put_user_u32(gettid(), child_tidptr);
4325 if (flags & CLONE_PARENT_SETTID)
4326 put_user_u32(gettid(), parent_tidptr);
4327 ts = (TaskState *)env->opaque;
4328 if (flags & CLONE_SETTLS)
4329 cpu_set_tls (env, newtls);
4330 if (flags & CLONE_CHILD_CLEARTID)
4331 ts->child_tidptr = child_tidptr;
4332 #endif
4333 } else {
4334 fork_end(0);
4337 return ret;
4340 /* warning : doesn't handle linux specific flags... */
4341 static int target_to_host_fcntl_cmd(int cmd)
4343 switch(cmd) {
4344 case TARGET_F_DUPFD:
4345 case TARGET_F_GETFD:
4346 case TARGET_F_SETFD:
4347 case TARGET_F_GETFL:
4348 case TARGET_F_SETFL:
4349 return cmd;
4350 case TARGET_F_GETLK:
4351 return F_GETLK;
4352 case TARGET_F_SETLK:
4353 return F_SETLK;
4354 case TARGET_F_SETLKW:
4355 return F_SETLKW;
4356 case TARGET_F_GETOWN:
4357 return F_GETOWN;
4358 case TARGET_F_SETOWN:
4359 return F_SETOWN;
4360 case TARGET_F_GETSIG:
4361 return F_GETSIG;
4362 case TARGET_F_SETSIG:
4363 return F_SETSIG;
4364 #if TARGET_ABI_BITS == 32
4365 case TARGET_F_GETLK64:
4366 return F_GETLK64;
4367 case TARGET_F_SETLK64:
4368 return F_SETLK64;
4369 case TARGET_F_SETLKW64:
4370 return F_SETLKW64;
4371 #endif
4372 case TARGET_F_SETLEASE:
4373 return F_SETLEASE;
4374 case TARGET_F_GETLEASE:
4375 return F_GETLEASE;
4376 #ifdef F_DUPFD_CLOEXEC
4377 case TARGET_F_DUPFD_CLOEXEC:
4378 return F_DUPFD_CLOEXEC;
4379 #endif
4380 case TARGET_F_NOTIFY:
4381 return F_NOTIFY;
4382 default:
4383 return -TARGET_EINVAL;
4385 return -TARGET_EINVAL;
4388 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4389 static const bitmask_transtbl flock_tbl[] = {
4390 TRANSTBL_CONVERT(F_RDLCK),
4391 TRANSTBL_CONVERT(F_WRLCK),
4392 TRANSTBL_CONVERT(F_UNLCK),
4393 TRANSTBL_CONVERT(F_EXLCK),
4394 TRANSTBL_CONVERT(F_SHLCK),
4395 { 0, 0, 0, 0 }
4398 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4400 struct flock fl;
4401 struct target_flock *target_fl;
4402 struct flock64 fl64;
4403 struct target_flock64 *target_fl64;
4404 abi_long ret;
4405 int host_cmd = target_to_host_fcntl_cmd(cmd);
4407 if (host_cmd == -TARGET_EINVAL)
4408 return host_cmd;
4410 switch(cmd) {
4411 case TARGET_F_GETLK:
4412 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4413 return -TARGET_EFAULT;
4414 fl.l_type =
4415 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4416 fl.l_whence = tswap16(target_fl->l_whence);
4417 fl.l_start = tswapal(target_fl->l_start);
4418 fl.l_len = tswapal(target_fl->l_len);
4419 fl.l_pid = tswap32(target_fl->l_pid);
4420 unlock_user_struct(target_fl, arg, 0);
4421 ret = get_errno(fcntl(fd, host_cmd, &fl));
4422 if (ret == 0) {
4423 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4424 return -TARGET_EFAULT;
4425 target_fl->l_type =
4426 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4427 target_fl->l_whence = tswap16(fl.l_whence);
4428 target_fl->l_start = tswapal(fl.l_start);
4429 target_fl->l_len = tswapal(fl.l_len);
4430 target_fl->l_pid = tswap32(fl.l_pid);
4431 unlock_user_struct(target_fl, arg, 1);
4433 break;
4435 case TARGET_F_SETLK:
4436 case TARGET_F_SETLKW:
4437 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4438 return -TARGET_EFAULT;
4439 fl.l_type =
4440 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4441 fl.l_whence = tswap16(target_fl->l_whence);
4442 fl.l_start = tswapal(target_fl->l_start);
4443 fl.l_len = tswapal(target_fl->l_len);
4444 fl.l_pid = tswap32(target_fl->l_pid);
4445 unlock_user_struct(target_fl, arg, 0);
4446 ret = get_errno(fcntl(fd, host_cmd, &fl));
4447 break;
4449 case TARGET_F_GETLK64:
4450 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4451 return -TARGET_EFAULT;
4452 fl64.l_type =
4453 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4454 fl64.l_whence = tswap16(target_fl64->l_whence);
4455 fl64.l_start = tswap64(target_fl64->l_start);
4456 fl64.l_len = tswap64(target_fl64->l_len);
4457 fl64.l_pid = tswap32(target_fl64->l_pid);
4458 unlock_user_struct(target_fl64, arg, 0);
4459 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4460 if (ret == 0) {
4461 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4462 return -TARGET_EFAULT;
4463 target_fl64->l_type =
4464 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4465 target_fl64->l_whence = tswap16(fl64.l_whence);
4466 target_fl64->l_start = tswap64(fl64.l_start);
4467 target_fl64->l_len = tswap64(fl64.l_len);
4468 target_fl64->l_pid = tswap32(fl64.l_pid);
4469 unlock_user_struct(target_fl64, arg, 1);
4471 break;
4472 case TARGET_F_SETLK64:
4473 case TARGET_F_SETLKW64:
4474 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4475 return -TARGET_EFAULT;
4476 fl64.l_type =
4477 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4478 fl64.l_whence = tswap16(target_fl64->l_whence);
4479 fl64.l_start = tswap64(target_fl64->l_start);
4480 fl64.l_len = tswap64(target_fl64->l_len);
4481 fl64.l_pid = tswap32(target_fl64->l_pid);
4482 unlock_user_struct(target_fl64, arg, 0);
4483 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4484 break;
4486 case TARGET_F_GETFL:
4487 ret = get_errno(fcntl(fd, host_cmd, arg));
4488 if (ret >= 0) {
4489 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4491 break;
4493 case TARGET_F_SETFL:
4494 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4495 break;
4497 case TARGET_F_SETOWN:
4498 case TARGET_F_GETOWN:
4499 case TARGET_F_SETSIG:
4500 case TARGET_F_GETSIG:
4501 case TARGET_F_SETLEASE:
4502 case TARGET_F_GETLEASE:
4503 ret = get_errno(fcntl(fd, host_cmd, arg));
4504 break;
4506 default:
4507 ret = get_errno(fcntl(fd, cmd, arg));
4508 break;
4510 return ret;
4513 #ifdef USE_UID16
4515 static inline int high2lowuid(int uid)
4517 if (uid > 65535)
4518 return 65534;
4519 else
4520 return uid;
4523 static inline int high2lowgid(int gid)
4525 if (gid > 65535)
4526 return 65534;
4527 else
4528 return gid;
4531 static inline int low2highuid(int uid)
4533 if ((int16_t)uid == -1)
4534 return -1;
4535 else
4536 return uid;
4539 static inline int low2highgid(int gid)
4541 if ((int16_t)gid == -1)
4542 return -1;
4543 else
4544 return gid;
4546 static inline int tswapid(int id)
4548 return tswap16(id);
4550 #else /* !USE_UID16 */
4551 static inline int high2lowuid(int uid)
4553 return uid;
4555 static inline int high2lowgid(int gid)
4557 return gid;
4559 static inline int low2highuid(int uid)
4561 return uid;
4563 static inline int low2highgid(int gid)
4565 return gid;
4567 static inline int tswapid(int id)
4569 return tswap32(id);
4571 #endif /* USE_UID16 */
4573 void syscall_init(void)
4575 IOCTLEntry *ie;
4576 const argtype *arg_type;
4577 int size;
4578 int i;
4580 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4581 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4582 #include "syscall_types.h"
4583 #undef STRUCT
4584 #undef STRUCT_SPECIAL
4586 /* Build target_to_host_errno_table[] table from
4587 * host_to_target_errno_table[]. */
4588 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4589 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4592 /* we patch the ioctl size if necessary. We rely on the fact that
4593 no ioctl has all the bits at '1' in the size field */
4594 ie = ioctl_entries;
4595 while (ie->target_cmd != 0) {
4596 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4597 TARGET_IOC_SIZEMASK) {
4598 arg_type = ie->arg_type;
4599 if (arg_type[0] != TYPE_PTR) {
4600 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4601 ie->target_cmd);
4602 exit(1);
4604 arg_type++;
4605 size = thunk_type_size(arg_type, 0);
4606 ie->target_cmd = (ie->target_cmd &
4607 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4608 (size << TARGET_IOC_SIZESHIFT);
4611 /* automatic consistency check if same arch */
4612 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4613 (defined(__x86_64__) && defined(TARGET_X86_64))
4614 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4615 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4616 ie->name, ie->target_cmd, ie->host_cmd);
4618 #endif
4619 ie++;
4623 #if TARGET_ABI_BITS == 32
4624 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4626 #ifdef TARGET_WORDS_BIGENDIAN
4627 return ((uint64_t)word0 << 32) | word1;
4628 #else
4629 return ((uint64_t)word1 << 32) | word0;
4630 #endif
4632 #else /* TARGET_ABI_BITS == 32 */
4633 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4635 return word0;
4637 #endif /* TARGET_ABI_BITS != 32 */
4639 #ifdef TARGET_NR_truncate64
4640 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4641 abi_long arg2,
4642 abi_long arg3,
4643 abi_long arg4)
4645 if (regpairs_aligned(cpu_env)) {
4646 arg2 = arg3;
4647 arg3 = arg4;
4649 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4651 #endif
4653 #ifdef TARGET_NR_ftruncate64
4654 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4655 abi_long arg2,
4656 abi_long arg3,
4657 abi_long arg4)
4659 if (regpairs_aligned(cpu_env)) {
4660 arg2 = arg3;
4661 arg3 = arg4;
4663 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4665 #endif
4667 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4668 abi_ulong target_addr)
4670 struct target_timespec *target_ts;
4672 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4673 return -TARGET_EFAULT;
4674 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4675 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4676 unlock_user_struct(target_ts, target_addr, 0);
4677 return 0;
4680 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4681 struct timespec *host_ts)
4683 struct target_timespec *target_ts;
4685 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4686 return -TARGET_EFAULT;
4687 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4688 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4689 unlock_user_struct(target_ts, target_addr, 1);
4690 return 0;
4693 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4694 static inline abi_long host_to_target_stat64(void *cpu_env,
4695 abi_ulong target_addr,
4696 struct stat *host_st)
4698 #ifdef TARGET_ARM
4699 if (((CPUARMState *)cpu_env)->eabi) {
4700 struct target_eabi_stat64 *target_st;
4702 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4703 return -TARGET_EFAULT;
4704 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4705 __put_user(host_st->st_dev, &target_st->st_dev);
4706 __put_user(host_st->st_ino, &target_st->st_ino);
4707 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4708 __put_user(host_st->st_ino, &target_st->__st_ino);
4709 #endif
4710 __put_user(host_st->st_mode, &target_st->st_mode);
4711 __put_user(host_st->st_nlink, &target_st->st_nlink);
4712 __put_user(host_st->st_uid, &target_st->st_uid);
4713 __put_user(host_st->st_gid, &target_st->st_gid);
4714 __put_user(host_st->st_rdev, &target_st->st_rdev);
4715 __put_user(host_st->st_size, &target_st->st_size);
4716 __put_user(host_st->st_blksize, &target_st->st_blksize);
4717 __put_user(host_st->st_blocks, &target_st->st_blocks);
4718 __put_user(host_st->st_atime, &target_st->target_st_atime);
4719 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4720 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4721 unlock_user_struct(target_st, target_addr, 1);
4722 } else
4723 #endif
4725 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4726 struct target_stat *target_st;
4727 #else
4728 struct target_stat64 *target_st;
4729 #endif
4731 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4732 return -TARGET_EFAULT;
4733 memset(target_st, 0, sizeof(*target_st));
4734 __put_user(host_st->st_dev, &target_st->st_dev);
4735 __put_user(host_st->st_ino, &target_st->st_ino);
4736 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4737 __put_user(host_st->st_ino, &target_st->__st_ino);
4738 #endif
4739 __put_user(host_st->st_mode, &target_st->st_mode);
4740 __put_user(host_st->st_nlink, &target_st->st_nlink);
4741 __put_user(host_st->st_uid, &target_st->st_uid);
4742 __put_user(host_st->st_gid, &target_st->st_gid);
4743 __put_user(host_st->st_rdev, &target_st->st_rdev);
4744 /* XXX: better use of kernel struct */
4745 __put_user(host_st->st_size, &target_st->st_size);
4746 __put_user(host_st->st_blksize, &target_st->st_blksize);
4747 __put_user(host_st->st_blocks, &target_st->st_blocks);
4748 __put_user(host_st->st_atime, &target_st->target_st_atime);
4749 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4750 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4751 unlock_user_struct(target_st, target_addr, 1);
4754 return 0;
4756 #endif
4758 #if defined(CONFIG_USE_NPTL)
4759 /* ??? Using host futex calls even when target atomic operations
4760 are not really atomic probably breaks things. However implementing
4761 futexes locally would make futexes shared between multiple processes
4762 tricky. However they're probably useless because guest atomic
4763 operations won't work either. */
4764 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4765 target_ulong uaddr2, int val3)
4767 struct timespec ts, *pts;
4768 int base_op;
4770 /* ??? We assume FUTEX_* constants are the same on both host
4771 and target. */
4772 #ifdef FUTEX_CMD_MASK
4773 base_op = op & FUTEX_CMD_MASK;
4774 #else
4775 base_op = op;
4776 #endif
4777 switch (base_op) {
4778 case FUTEX_WAIT:
4779 case FUTEX_WAIT_BITSET:
4780 if (timeout) {
4781 pts = &ts;
4782 target_to_host_timespec(pts, timeout);
4783 } else {
4784 pts = NULL;
4786 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4787 pts, NULL, val3));
4788 case FUTEX_WAKE:
4789 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4790 case FUTEX_FD:
4791 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4792 case FUTEX_REQUEUE:
4793 case FUTEX_CMP_REQUEUE:
4794 case FUTEX_WAKE_OP:
4795 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4796 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4797 But the prototype takes a `struct timespec *'; insert casts
4798 to satisfy the compiler. We do not need to tswap TIMEOUT
4799 since it's not compared to guest memory. */
4800 pts = (struct timespec *)(uintptr_t) timeout;
4801 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4802 g2h(uaddr2),
4803 (base_op == FUTEX_CMP_REQUEUE
4804 ? tswap32(val3)
4805 : val3)));
4806 default:
4807 return -TARGET_ENOSYS;
4810 #endif
4812 /* Map host to target signal numbers for the wait family of syscalls.
4813 Assume all other status bits are the same. */
4814 int host_to_target_waitstatus(int status)
4816 if (WIFSIGNALED(status)) {
4817 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4819 if (WIFSTOPPED(status)) {
4820 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4821 | (status & 0xff);
4823 return status;
4826 int get_osversion(void)
4828 static int osversion;
4829 struct new_utsname buf;
4830 const char *s;
4831 int i, n, tmp;
4832 if (osversion)
4833 return osversion;
4834 if (qemu_uname_release && *qemu_uname_release) {
4835 s = qemu_uname_release;
4836 } else {
4837 if (sys_uname(&buf))
4838 return 0;
4839 s = buf.release;
4841 tmp = 0;
4842 for (i = 0; i < 3; i++) {
4843 n = 0;
4844 while (*s >= '0' && *s <= '9') {
4845 n *= 10;
4846 n += *s - '0';
4847 s++;
4849 tmp = (tmp << 8) + n;
4850 if (*s == '.')
4851 s++;
4853 osversion = tmp;
4854 return osversion;
4858 static int open_self_maps(void *cpu_env, int fd)
4860 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4861 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4862 #endif
4863 FILE *fp;
4864 char *line = NULL;
4865 size_t len = 0;
4866 ssize_t read;
4868 fp = fopen("/proc/self/maps", "r");
4869 if (fp == NULL) {
4870 return -EACCES;
4873 while ((read = getline(&line, &len, fp)) != -1) {
4874 int fields, dev_maj, dev_min, inode;
4875 uint64_t min, max, offset;
4876 char flag_r, flag_w, flag_x, flag_p;
4877 char path[512] = "";
4878 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4879 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4880 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4882 if ((fields < 10) || (fields > 11)) {
4883 continue;
4885 if (!strncmp(path, "[stack]", 7)) {
4886 continue;
4888 if (h2g_valid(min) && h2g_valid(max)) {
4889 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4890 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
4891 h2g(min), h2g(max), flag_r, flag_w,
4892 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4893 path[0] ? " " : "", path);
4897 free(line);
4898 fclose(fp);
4900 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4901 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4902 (unsigned long long)ts->info->stack_limit,
4903 (unsigned long long)(ts->info->start_stack +
4904 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
4905 (unsigned long long)0);
4906 #endif
4908 return 0;
4911 static int open_self_stat(void *cpu_env, int fd)
4913 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4914 abi_ulong start_stack = ts->info->start_stack;
4915 int i;
4917 for (i = 0; i < 44; i++) {
4918 char buf[128];
4919 int len;
4920 uint64_t val = 0;
4922 if (i == 0) {
4923 /* pid */
4924 val = getpid();
4925 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4926 } else if (i == 1) {
4927 /* app name */
4928 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4929 } else if (i == 27) {
4930 /* stack bottom */
4931 val = start_stack;
4932 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4933 } else {
4934 /* for the rest, there is MasterCard */
4935 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
4938 len = strlen(buf);
4939 if (write(fd, buf, len) != len) {
4940 return -1;
4944 return 0;
4947 static int open_self_auxv(void *cpu_env, int fd)
4949 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4950 abi_ulong auxv = ts->info->saved_auxv;
4951 abi_ulong len = ts->info->auxv_len;
4952 char *ptr;
4955 * Auxiliary vector is stored in target process stack.
4956 * read in whole auxv vector and copy it to file
4958 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4959 if (ptr != NULL) {
4960 while (len > 0) {
4961 ssize_t r;
4962 r = write(fd, ptr, len);
4963 if (r <= 0) {
4964 break;
4966 len -= r;
4967 ptr += r;
4969 lseek(fd, 0, SEEK_SET);
4970 unlock_user(ptr, auxv, len);
4973 return 0;
4976 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
4978 struct fake_open {
4979 const char *filename;
4980 int (*fill)(void *cpu_env, int fd);
4982 const struct fake_open *fake_open;
4983 static const struct fake_open fakes[] = {
4984 { "/proc/self/maps", open_self_maps },
4985 { "/proc/self/stat", open_self_stat },
4986 { "/proc/self/auxv", open_self_auxv },
4987 { NULL, NULL }
4990 for (fake_open = fakes; fake_open->filename; fake_open++) {
4991 if (!strncmp(pathname, fake_open->filename,
4992 strlen(fake_open->filename))) {
4993 break;
4997 if (fake_open->filename) {
4998 const char *tmpdir;
4999 char filename[PATH_MAX];
5000 int fd, r;
5002 /* create temporary file to map stat to */
5003 tmpdir = getenv("TMPDIR");
5004 if (!tmpdir)
5005 tmpdir = "/tmp";
5006 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5007 fd = mkstemp(filename);
5008 if (fd < 0) {
5009 return fd;
5011 unlink(filename);
5013 if ((r = fake_open->fill(cpu_env, fd))) {
5014 close(fd);
5015 return r;
5017 lseek(fd, 0, SEEK_SET);
5019 return fd;
5022 return get_errno(open(path(pathname), flags, mode));
5025 /* do_syscall() should always have a single exit point at the end so
5026 that actions, such as logging of syscall results, can be performed.
5027 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5028 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5029 abi_long arg2, abi_long arg3, abi_long arg4,
5030 abi_long arg5, abi_long arg6, abi_long arg7,
5031 abi_long arg8)
5033 #ifdef CONFIG_USE_NPTL
5034 CPUState *cpu = ENV_GET_CPU(cpu_env);
5035 #endif
5036 abi_long ret;
5037 struct stat st;
5038 struct statfs stfs;
5039 void *p;
5041 #ifdef DEBUG
5042 gemu_log("syscall %d", num);
5043 #endif
5044 if(do_strace)
5045 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5047 switch(num) {
5048 case TARGET_NR_exit:
5049 #ifdef CONFIG_USE_NPTL
5050 /* In old applications this may be used to implement _exit(2).
5051 However in threaded applictions it is used for thread termination,
5052 and _exit_group is used for application termination.
5053 Do thread termination if we have more then one thread. */
5054 /* FIXME: This probably breaks if a signal arrives. We should probably
5055 be disabling signals. */
5056 if (first_cpu->next_cpu) {
5057 TaskState *ts;
5058 CPUState **lastp;
5059 CPUState *p;
5061 cpu_list_lock();
5062 lastp = &first_cpu;
5063 p = first_cpu;
5064 while (p && p != cpu) {
5065 lastp = &p->next_cpu;
5066 p = p->next_cpu;
5068 /* If we didn't find the CPU for this thread then something is
5069 horribly wrong. */
5070 if (!p) {
5071 abort();
5073 /* Remove the CPU from the list. */
5074 *lastp = p->next_cpu;
5075 cpu_list_unlock();
5076 ts = ((CPUArchState *)cpu_env)->opaque;
5077 if (ts->child_tidptr) {
5078 put_user_u32(0, ts->child_tidptr);
5079 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5080 NULL, NULL, 0);
5082 thread_cpu = NULL;
5083 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5084 g_free(ts);
5085 pthread_exit(NULL);
5087 #endif
5088 #ifdef TARGET_GPROF
5089 _mcleanup();
5090 #endif
5091 gdb_exit(cpu_env, arg1);
5092 _exit(arg1);
5093 ret = 0; /* avoid warning */
5094 break;
5095 case TARGET_NR_read:
5096 if (arg3 == 0)
5097 ret = 0;
5098 else {
5099 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5100 goto efault;
5101 ret = get_errno(read(arg1, p, arg3));
5102 unlock_user(p, arg2, ret);
5104 break;
5105 case TARGET_NR_write:
5106 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5107 goto efault;
5108 ret = get_errno(write(arg1, p, arg3));
5109 unlock_user(p, arg2, 0);
5110 break;
5111 case TARGET_NR_open:
5112 if (!(p = lock_user_string(arg1)))
5113 goto efault;
5114 ret = get_errno(do_open(cpu_env, p,
5115 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5116 arg3));
5117 unlock_user(p, arg1, 0);
5118 break;
5119 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5120 case TARGET_NR_openat:
5121 if (!(p = lock_user_string(arg2)))
5122 goto efault;
5123 ret = get_errno(sys_openat(arg1,
5124 path(p),
5125 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5126 arg4));
5127 unlock_user(p, arg2, 0);
5128 break;
5129 #endif
5130 case TARGET_NR_close:
5131 ret = get_errno(close(arg1));
5132 break;
5133 case TARGET_NR_brk:
5134 ret = do_brk(arg1);
5135 break;
5136 case TARGET_NR_fork:
5137 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5138 break;
5139 #ifdef TARGET_NR_waitpid
5140 case TARGET_NR_waitpid:
5142 int status;
5143 ret = get_errno(waitpid(arg1, &status, arg3));
5144 if (!is_error(ret) && arg2 && ret
5145 && put_user_s32(host_to_target_waitstatus(status), arg2))
5146 goto efault;
5148 break;
5149 #endif
5150 #ifdef TARGET_NR_waitid
5151 case TARGET_NR_waitid:
5153 siginfo_t info;
5154 info.si_pid = 0;
5155 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5156 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5157 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5158 goto efault;
5159 host_to_target_siginfo(p, &info);
5160 unlock_user(p, arg3, sizeof(target_siginfo_t));
5163 break;
5164 #endif
5165 #ifdef TARGET_NR_creat /* not on alpha */
5166 case TARGET_NR_creat:
5167 if (!(p = lock_user_string(arg1)))
5168 goto efault;
5169 ret = get_errno(creat(p, arg2));
5170 unlock_user(p, arg1, 0);
5171 break;
5172 #endif
5173 case TARGET_NR_link:
5175 void * p2;
5176 p = lock_user_string(arg1);
5177 p2 = lock_user_string(arg2);
5178 if (!p || !p2)
5179 ret = -TARGET_EFAULT;
5180 else
5181 ret = get_errno(link(p, p2));
5182 unlock_user(p2, arg2, 0);
5183 unlock_user(p, arg1, 0);
5185 break;
5186 #if defined(TARGET_NR_linkat)
5187 case TARGET_NR_linkat:
5189 void * p2 = NULL;
5190 if (!arg2 || !arg4)
5191 goto efault;
5192 p = lock_user_string(arg2);
5193 p2 = lock_user_string(arg4);
5194 if (!p || !p2)
5195 ret = -TARGET_EFAULT;
5196 else
5197 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5198 unlock_user(p, arg2, 0);
5199 unlock_user(p2, arg4, 0);
5201 break;
5202 #endif
5203 case TARGET_NR_unlink:
5204 if (!(p = lock_user_string(arg1)))
5205 goto efault;
5206 ret = get_errno(unlink(p));
5207 unlock_user(p, arg1, 0);
5208 break;
5209 #if defined(TARGET_NR_unlinkat)
5210 case TARGET_NR_unlinkat:
5211 if (!(p = lock_user_string(arg2)))
5212 goto efault;
5213 ret = get_errno(unlinkat(arg1, p, arg3));
5214 unlock_user(p, arg2, 0);
5215 break;
5216 #endif
5217 case TARGET_NR_execve:
5219 char **argp, **envp;
5220 int argc, envc;
5221 abi_ulong gp;
5222 abi_ulong guest_argp;
5223 abi_ulong guest_envp;
5224 abi_ulong addr;
5225 char **q;
5226 int total_size = 0;
5228 argc = 0;
5229 guest_argp = arg2;
5230 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5231 if (get_user_ual(addr, gp))
5232 goto efault;
5233 if (!addr)
5234 break;
5235 argc++;
5237 envc = 0;
5238 guest_envp = arg3;
5239 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5240 if (get_user_ual(addr, gp))
5241 goto efault;
5242 if (!addr)
5243 break;
5244 envc++;
5247 argp = alloca((argc + 1) * sizeof(void *));
5248 envp = alloca((envc + 1) * sizeof(void *));
5250 for (gp = guest_argp, q = argp; gp;
5251 gp += sizeof(abi_ulong), q++) {
5252 if (get_user_ual(addr, gp))
5253 goto execve_efault;
5254 if (!addr)
5255 break;
5256 if (!(*q = lock_user_string(addr)))
5257 goto execve_efault;
5258 total_size += strlen(*q) + 1;
5260 *q = NULL;
5262 for (gp = guest_envp, q = envp; gp;
5263 gp += sizeof(abi_ulong), q++) {
5264 if (get_user_ual(addr, gp))
5265 goto execve_efault;
5266 if (!addr)
5267 break;
5268 if (!(*q = lock_user_string(addr)))
5269 goto execve_efault;
5270 total_size += strlen(*q) + 1;
5272 *q = NULL;
5274 /* This case will not be caught by the host's execve() if its
5275 page size is bigger than the target's. */
5276 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5277 ret = -TARGET_E2BIG;
5278 goto execve_end;
5280 if (!(p = lock_user_string(arg1)))
5281 goto execve_efault;
5282 ret = get_errno(execve(p, argp, envp));
5283 unlock_user(p, arg1, 0);
5285 goto execve_end;
5287 execve_efault:
5288 ret = -TARGET_EFAULT;
5290 execve_end:
5291 for (gp = guest_argp, q = argp; *q;
5292 gp += sizeof(abi_ulong), q++) {
5293 if (get_user_ual(addr, gp)
5294 || !addr)
5295 break;
5296 unlock_user(*q, addr, 0);
5298 for (gp = guest_envp, q = envp; *q;
5299 gp += sizeof(abi_ulong), q++) {
5300 if (get_user_ual(addr, gp)
5301 || !addr)
5302 break;
5303 unlock_user(*q, addr, 0);
5306 break;
5307 case TARGET_NR_chdir:
5308 if (!(p = lock_user_string(arg1)))
5309 goto efault;
5310 ret = get_errno(chdir(p));
5311 unlock_user(p, arg1, 0);
5312 break;
5313 #ifdef TARGET_NR_time
5314 case TARGET_NR_time:
5316 time_t host_time;
5317 ret = get_errno(time(&host_time));
5318 if (!is_error(ret)
5319 && arg1
5320 && put_user_sal(host_time, arg1))
5321 goto efault;
5323 break;
5324 #endif
5325 case TARGET_NR_mknod:
5326 if (!(p = lock_user_string(arg1)))
5327 goto efault;
5328 ret = get_errno(mknod(p, arg2, arg3));
5329 unlock_user(p, arg1, 0);
5330 break;
5331 #if defined(TARGET_NR_mknodat)
5332 case TARGET_NR_mknodat:
5333 if (!(p = lock_user_string(arg2)))
5334 goto efault;
5335 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5336 unlock_user(p, arg2, 0);
5337 break;
5338 #endif
5339 case TARGET_NR_chmod:
5340 if (!(p = lock_user_string(arg1)))
5341 goto efault;
5342 ret = get_errno(chmod(p, arg2));
5343 unlock_user(p, arg1, 0);
5344 break;
5345 #ifdef TARGET_NR_break
5346 case TARGET_NR_break:
5347 goto unimplemented;
5348 #endif
5349 #ifdef TARGET_NR_oldstat
5350 case TARGET_NR_oldstat:
5351 goto unimplemented;
5352 #endif
5353 case TARGET_NR_lseek:
5354 ret = get_errno(lseek(arg1, arg2, arg3));
5355 break;
5356 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5357 /* Alpha specific */
5358 case TARGET_NR_getxpid:
5359 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5360 ret = get_errno(getpid());
5361 break;
5362 #endif
5363 #ifdef TARGET_NR_getpid
5364 case TARGET_NR_getpid:
5365 ret = get_errno(getpid());
5366 break;
5367 #endif
5368 case TARGET_NR_mount:
5370 /* need to look at the data field */
5371 void *p2, *p3;
5372 p = lock_user_string(arg1);
5373 p2 = lock_user_string(arg2);
5374 p3 = lock_user_string(arg3);
5375 if (!p || !p2 || !p3)
5376 ret = -TARGET_EFAULT;
5377 else {
5378 /* FIXME - arg5 should be locked, but it isn't clear how to
5379 * do that since it's not guaranteed to be a NULL-terminated
5380 * string.
5382 if ( ! arg5 )
5383 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5384 else
5385 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5387 unlock_user(p, arg1, 0);
5388 unlock_user(p2, arg2, 0);
5389 unlock_user(p3, arg3, 0);
5390 break;
5392 #ifdef TARGET_NR_umount
5393 case TARGET_NR_umount:
5394 if (!(p = lock_user_string(arg1)))
5395 goto efault;
5396 ret = get_errno(umount(p));
5397 unlock_user(p, arg1, 0);
5398 break;
5399 #endif
5400 #ifdef TARGET_NR_stime /* not on alpha */
5401 case TARGET_NR_stime:
5403 time_t host_time;
5404 if (get_user_sal(host_time, arg1))
5405 goto efault;
5406 ret = get_errno(stime(&host_time));
5408 break;
5409 #endif
5410 case TARGET_NR_ptrace:
5411 goto unimplemented;
5412 #ifdef TARGET_NR_alarm /* not on alpha */
5413 case TARGET_NR_alarm:
5414 ret = alarm(arg1);
5415 break;
5416 #endif
5417 #ifdef TARGET_NR_oldfstat
5418 case TARGET_NR_oldfstat:
5419 goto unimplemented;
5420 #endif
5421 #ifdef TARGET_NR_pause /* not on alpha */
5422 case TARGET_NR_pause:
5423 ret = get_errno(pause());
5424 break;
5425 #endif
5426 #ifdef TARGET_NR_utime
5427 case TARGET_NR_utime:
5429 struct utimbuf tbuf, *host_tbuf;
5430 struct target_utimbuf *target_tbuf;
5431 if (arg2) {
5432 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5433 goto efault;
5434 tbuf.actime = tswapal(target_tbuf->actime);
5435 tbuf.modtime = tswapal(target_tbuf->modtime);
5436 unlock_user_struct(target_tbuf, arg2, 0);
5437 host_tbuf = &tbuf;
5438 } else {
5439 host_tbuf = NULL;
5441 if (!(p = lock_user_string(arg1)))
5442 goto efault;
5443 ret = get_errno(utime(p, host_tbuf));
5444 unlock_user(p, arg1, 0);
5446 break;
5447 #endif
5448 case TARGET_NR_utimes:
5450 struct timeval *tvp, tv[2];
5451 if (arg2) {
5452 if (copy_from_user_timeval(&tv[0], arg2)
5453 || copy_from_user_timeval(&tv[1],
5454 arg2 + sizeof(struct target_timeval)))
5455 goto efault;
5456 tvp = tv;
5457 } else {
5458 tvp = NULL;
5460 if (!(p = lock_user_string(arg1)))
5461 goto efault;
5462 ret = get_errno(utimes(p, tvp));
5463 unlock_user(p, arg1, 0);
5465 break;
5466 #if defined(TARGET_NR_futimesat)
5467 case TARGET_NR_futimesat:
5469 struct timeval *tvp, tv[2];
5470 if (arg3) {
5471 if (copy_from_user_timeval(&tv[0], arg3)
5472 || copy_from_user_timeval(&tv[1],
5473 arg3 + sizeof(struct target_timeval)))
5474 goto efault;
5475 tvp = tv;
5476 } else {
5477 tvp = NULL;
5479 if (!(p = lock_user_string(arg2)))
5480 goto efault;
5481 ret = get_errno(futimesat(arg1, path(p), tvp));
5482 unlock_user(p, arg2, 0);
5484 break;
5485 #endif
5486 #ifdef TARGET_NR_stty
5487 case TARGET_NR_stty:
5488 goto unimplemented;
5489 #endif
5490 #ifdef TARGET_NR_gtty
5491 case TARGET_NR_gtty:
5492 goto unimplemented;
5493 #endif
5494 case TARGET_NR_access:
5495 if (!(p = lock_user_string(arg1)))
5496 goto efault;
5497 ret = get_errno(access(path(p), arg2));
5498 unlock_user(p, arg1, 0);
5499 break;
5500 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5501 case TARGET_NR_faccessat:
5502 if (!(p = lock_user_string(arg2)))
5503 goto efault;
5504 ret = get_errno(faccessat(arg1, p, arg3, 0));
5505 unlock_user(p, arg2, 0);
5506 break;
5507 #endif
5508 #ifdef TARGET_NR_nice /* not on alpha */
5509 case TARGET_NR_nice:
5510 ret = get_errno(nice(arg1));
5511 break;
5512 #endif
5513 #ifdef TARGET_NR_ftime
5514 case TARGET_NR_ftime:
5515 goto unimplemented;
5516 #endif
5517 case TARGET_NR_sync:
5518 sync();
5519 ret = 0;
5520 break;
5521 case TARGET_NR_kill:
5522 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5523 break;
5524 case TARGET_NR_rename:
5526 void *p2;
5527 p = lock_user_string(arg1);
5528 p2 = lock_user_string(arg2);
5529 if (!p || !p2)
5530 ret = -TARGET_EFAULT;
5531 else
5532 ret = get_errno(rename(p, p2));
5533 unlock_user(p2, arg2, 0);
5534 unlock_user(p, arg1, 0);
5536 break;
5537 #if defined(TARGET_NR_renameat)
5538 case TARGET_NR_renameat:
5540 void *p2;
5541 p = lock_user_string(arg2);
5542 p2 = lock_user_string(arg4);
5543 if (!p || !p2)
5544 ret = -TARGET_EFAULT;
5545 else
5546 ret = get_errno(renameat(arg1, p, arg3, p2));
5547 unlock_user(p2, arg4, 0);
5548 unlock_user(p, arg2, 0);
5550 break;
5551 #endif
5552 case TARGET_NR_mkdir:
5553 if (!(p = lock_user_string(arg1)))
5554 goto efault;
5555 ret = get_errno(mkdir(p, arg2));
5556 unlock_user(p, arg1, 0);
5557 break;
5558 #if defined(TARGET_NR_mkdirat)
5559 case TARGET_NR_mkdirat:
5560 if (!(p = lock_user_string(arg2)))
5561 goto efault;
5562 ret = get_errno(mkdirat(arg1, p, arg3));
5563 unlock_user(p, arg2, 0);
5564 break;
5565 #endif
5566 case TARGET_NR_rmdir:
5567 if (!(p = lock_user_string(arg1)))
5568 goto efault;
5569 ret = get_errno(rmdir(p));
5570 unlock_user(p, arg1, 0);
5571 break;
5572 case TARGET_NR_dup:
5573 ret = get_errno(dup(arg1));
5574 break;
5575 case TARGET_NR_pipe:
5576 ret = do_pipe(cpu_env, arg1, 0, 0);
5577 break;
5578 #ifdef TARGET_NR_pipe2
5579 case TARGET_NR_pipe2:
5580 ret = do_pipe(cpu_env, arg1,
5581 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5582 break;
5583 #endif
5584 case TARGET_NR_times:
5586 struct target_tms *tmsp;
5587 struct tms tms;
5588 ret = get_errno(times(&tms));
5589 if (arg1) {
5590 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5591 if (!tmsp)
5592 goto efault;
5593 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5594 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5595 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5596 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5598 if (!is_error(ret))
5599 ret = host_to_target_clock_t(ret);
5601 break;
5602 #ifdef TARGET_NR_prof
5603 case TARGET_NR_prof:
5604 goto unimplemented;
5605 #endif
5606 #ifdef TARGET_NR_signal
5607 case TARGET_NR_signal:
5608 goto unimplemented;
5609 #endif
5610 case TARGET_NR_acct:
5611 if (arg1 == 0) {
5612 ret = get_errno(acct(NULL));
5613 } else {
5614 if (!(p = lock_user_string(arg1)))
5615 goto efault;
5616 ret = get_errno(acct(path(p)));
5617 unlock_user(p, arg1, 0);
5619 break;
5620 #ifdef TARGET_NR_umount2 /* not on alpha */
5621 case TARGET_NR_umount2:
5622 if (!(p = lock_user_string(arg1)))
5623 goto efault;
5624 ret = get_errno(umount2(p, arg2));
5625 unlock_user(p, arg1, 0);
5626 break;
5627 #endif
5628 #ifdef TARGET_NR_lock
5629 case TARGET_NR_lock:
5630 goto unimplemented;
5631 #endif
5632 case TARGET_NR_ioctl:
5633 ret = do_ioctl(arg1, arg2, arg3);
5634 break;
5635 case TARGET_NR_fcntl:
5636 ret = do_fcntl(arg1, arg2, arg3);
5637 break;
5638 #ifdef TARGET_NR_mpx
5639 case TARGET_NR_mpx:
5640 goto unimplemented;
5641 #endif
5642 case TARGET_NR_setpgid:
5643 ret = get_errno(setpgid(arg1, arg2));
5644 break;
5645 #ifdef TARGET_NR_ulimit
5646 case TARGET_NR_ulimit:
5647 goto unimplemented;
5648 #endif
5649 #ifdef TARGET_NR_oldolduname
5650 case TARGET_NR_oldolduname:
5651 goto unimplemented;
5652 #endif
5653 case TARGET_NR_umask:
5654 ret = get_errno(umask(arg1));
5655 break;
5656 case TARGET_NR_chroot:
5657 if (!(p = lock_user_string(arg1)))
5658 goto efault;
5659 ret = get_errno(chroot(p));
5660 unlock_user(p, arg1, 0);
5661 break;
5662 case TARGET_NR_ustat:
5663 goto unimplemented;
5664 case TARGET_NR_dup2:
5665 ret = get_errno(dup2(arg1, arg2));
5666 break;
5667 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5668 case TARGET_NR_dup3:
5669 ret = get_errno(dup3(arg1, arg2, arg3));
5670 break;
5671 #endif
5672 #ifdef TARGET_NR_getppid /* not on alpha */
5673 case TARGET_NR_getppid:
5674 ret = get_errno(getppid());
5675 break;
5676 #endif
5677 case TARGET_NR_getpgrp:
5678 ret = get_errno(getpgrp());
5679 break;
5680 case TARGET_NR_setsid:
5681 ret = get_errno(setsid());
5682 break;
5683 #ifdef TARGET_NR_sigaction
5684 case TARGET_NR_sigaction:
5686 #if defined(TARGET_ALPHA)
5687 struct target_sigaction act, oact, *pact = 0;
5688 struct target_old_sigaction *old_act;
5689 if (arg2) {
5690 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5691 goto efault;
5692 act._sa_handler = old_act->_sa_handler;
5693 target_siginitset(&act.sa_mask, old_act->sa_mask);
5694 act.sa_flags = old_act->sa_flags;
5695 act.sa_restorer = 0;
5696 unlock_user_struct(old_act, arg2, 0);
5697 pact = &act;
5699 ret = get_errno(do_sigaction(arg1, pact, &oact));
5700 if (!is_error(ret) && arg3) {
5701 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5702 goto efault;
5703 old_act->_sa_handler = oact._sa_handler;
5704 old_act->sa_mask = oact.sa_mask.sig[0];
5705 old_act->sa_flags = oact.sa_flags;
5706 unlock_user_struct(old_act, arg3, 1);
5708 #elif defined(TARGET_MIPS)
5709 struct target_sigaction act, oact, *pact, *old_act;
5711 if (arg2) {
5712 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5713 goto efault;
5714 act._sa_handler = old_act->_sa_handler;
5715 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5716 act.sa_flags = old_act->sa_flags;
5717 unlock_user_struct(old_act, arg2, 0);
5718 pact = &act;
5719 } else {
5720 pact = NULL;
5723 ret = get_errno(do_sigaction(arg1, pact, &oact));
5725 if (!is_error(ret) && arg3) {
5726 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5727 goto efault;
5728 old_act->_sa_handler = oact._sa_handler;
5729 old_act->sa_flags = oact.sa_flags;
5730 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5731 old_act->sa_mask.sig[1] = 0;
5732 old_act->sa_mask.sig[2] = 0;
5733 old_act->sa_mask.sig[3] = 0;
5734 unlock_user_struct(old_act, arg3, 1);
5736 #else
5737 struct target_old_sigaction *old_act;
5738 struct target_sigaction act, oact, *pact;
5739 if (arg2) {
5740 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5741 goto efault;
5742 act._sa_handler = old_act->_sa_handler;
5743 target_siginitset(&act.sa_mask, old_act->sa_mask);
5744 act.sa_flags = old_act->sa_flags;
5745 act.sa_restorer = old_act->sa_restorer;
5746 unlock_user_struct(old_act, arg2, 0);
5747 pact = &act;
5748 } else {
5749 pact = NULL;
5751 ret = get_errno(do_sigaction(arg1, pact, &oact));
5752 if (!is_error(ret) && arg3) {
5753 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5754 goto efault;
5755 old_act->_sa_handler = oact._sa_handler;
5756 old_act->sa_mask = oact.sa_mask.sig[0];
5757 old_act->sa_flags = oact.sa_flags;
5758 old_act->sa_restorer = oact.sa_restorer;
5759 unlock_user_struct(old_act, arg3, 1);
5761 #endif
5763 break;
5764 #endif
5765 case TARGET_NR_rt_sigaction:
5767 #if defined(TARGET_ALPHA)
5768 struct target_sigaction act, oact, *pact = 0;
5769 struct target_rt_sigaction *rt_act;
5770 /* ??? arg4 == sizeof(sigset_t). */
5771 if (arg2) {
5772 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5773 goto efault;
5774 act._sa_handler = rt_act->_sa_handler;
5775 act.sa_mask = rt_act->sa_mask;
5776 act.sa_flags = rt_act->sa_flags;
5777 act.sa_restorer = arg5;
5778 unlock_user_struct(rt_act, arg2, 0);
5779 pact = &act;
5781 ret = get_errno(do_sigaction(arg1, pact, &oact));
5782 if (!is_error(ret) && arg3) {
5783 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5784 goto efault;
5785 rt_act->_sa_handler = oact._sa_handler;
5786 rt_act->sa_mask = oact.sa_mask;
5787 rt_act->sa_flags = oact.sa_flags;
5788 unlock_user_struct(rt_act, arg3, 1);
5790 #else
5791 struct target_sigaction *act;
5792 struct target_sigaction *oact;
5794 if (arg2) {
5795 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5796 goto efault;
5797 } else
5798 act = NULL;
5799 if (arg3) {
5800 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5801 ret = -TARGET_EFAULT;
5802 goto rt_sigaction_fail;
5804 } else
5805 oact = NULL;
5806 ret = get_errno(do_sigaction(arg1, act, oact));
5807 rt_sigaction_fail:
5808 if (act)
5809 unlock_user_struct(act, arg2, 0);
5810 if (oact)
5811 unlock_user_struct(oact, arg3, 1);
5812 #endif
5814 break;
5815 #ifdef TARGET_NR_sgetmask /* not on alpha */
5816 case TARGET_NR_sgetmask:
5818 sigset_t cur_set;
5819 abi_ulong target_set;
5820 sigprocmask(0, NULL, &cur_set);
5821 host_to_target_old_sigset(&target_set, &cur_set);
5822 ret = target_set;
5824 break;
5825 #endif
5826 #ifdef TARGET_NR_ssetmask /* not on alpha */
5827 case TARGET_NR_ssetmask:
5829 sigset_t set, oset, cur_set;
5830 abi_ulong target_set = arg1;
5831 sigprocmask(0, NULL, &cur_set);
5832 target_to_host_old_sigset(&set, &target_set);
5833 sigorset(&set, &set, &cur_set);
5834 sigprocmask(SIG_SETMASK, &set, &oset);
5835 host_to_target_old_sigset(&target_set, &oset);
5836 ret = target_set;
5838 break;
5839 #endif
5840 #ifdef TARGET_NR_sigprocmask
5841 case TARGET_NR_sigprocmask:
5843 #if defined(TARGET_ALPHA)
5844 sigset_t set, oldset;
5845 abi_ulong mask;
5846 int how;
5848 switch (arg1) {
5849 case TARGET_SIG_BLOCK:
5850 how = SIG_BLOCK;
5851 break;
5852 case TARGET_SIG_UNBLOCK:
5853 how = SIG_UNBLOCK;
5854 break;
5855 case TARGET_SIG_SETMASK:
5856 how = SIG_SETMASK;
5857 break;
5858 default:
5859 ret = -TARGET_EINVAL;
5860 goto fail;
5862 mask = arg2;
5863 target_to_host_old_sigset(&set, &mask);
5865 ret = get_errno(sigprocmask(how, &set, &oldset));
5866 if (!is_error(ret)) {
5867 host_to_target_old_sigset(&mask, &oldset);
5868 ret = mask;
5869 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5871 #else
5872 sigset_t set, oldset, *set_ptr;
5873 int how;
5875 if (arg2) {
5876 switch (arg1) {
5877 case TARGET_SIG_BLOCK:
5878 how = SIG_BLOCK;
5879 break;
5880 case TARGET_SIG_UNBLOCK:
5881 how = SIG_UNBLOCK;
5882 break;
5883 case TARGET_SIG_SETMASK:
5884 how = SIG_SETMASK;
5885 break;
5886 default:
5887 ret = -TARGET_EINVAL;
5888 goto fail;
5890 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5891 goto efault;
5892 target_to_host_old_sigset(&set, p);
5893 unlock_user(p, arg2, 0);
5894 set_ptr = &set;
5895 } else {
5896 how = 0;
5897 set_ptr = NULL;
5899 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5900 if (!is_error(ret) && arg3) {
5901 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5902 goto efault;
5903 host_to_target_old_sigset(p, &oldset);
5904 unlock_user(p, arg3, sizeof(target_sigset_t));
5906 #endif
5908 break;
5909 #endif
5910 case TARGET_NR_rt_sigprocmask:
5912 int how = arg1;
5913 sigset_t set, oldset, *set_ptr;
5915 if (arg2) {
5916 switch(how) {
5917 case TARGET_SIG_BLOCK:
5918 how = SIG_BLOCK;
5919 break;
5920 case TARGET_SIG_UNBLOCK:
5921 how = SIG_UNBLOCK;
5922 break;
5923 case TARGET_SIG_SETMASK:
5924 how = SIG_SETMASK;
5925 break;
5926 default:
5927 ret = -TARGET_EINVAL;
5928 goto fail;
5930 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5931 goto efault;
5932 target_to_host_sigset(&set, p);
5933 unlock_user(p, arg2, 0);
5934 set_ptr = &set;
5935 } else {
5936 how = 0;
5937 set_ptr = NULL;
5939 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5940 if (!is_error(ret) && arg3) {
5941 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5942 goto efault;
5943 host_to_target_sigset(p, &oldset);
5944 unlock_user(p, arg3, sizeof(target_sigset_t));
5947 break;
5948 #ifdef TARGET_NR_sigpending
5949 case TARGET_NR_sigpending:
5951 sigset_t set;
5952 ret = get_errno(sigpending(&set));
5953 if (!is_error(ret)) {
5954 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5955 goto efault;
5956 host_to_target_old_sigset(p, &set);
5957 unlock_user(p, arg1, sizeof(target_sigset_t));
5960 break;
5961 #endif
5962 case TARGET_NR_rt_sigpending:
5964 sigset_t set;
5965 ret = get_errno(sigpending(&set));
5966 if (!is_error(ret)) {
5967 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5968 goto efault;
5969 host_to_target_sigset(p, &set);
5970 unlock_user(p, arg1, sizeof(target_sigset_t));
5973 break;
5974 #ifdef TARGET_NR_sigsuspend
5975 case TARGET_NR_sigsuspend:
5977 sigset_t set;
5978 #if defined(TARGET_ALPHA)
5979 abi_ulong mask = arg1;
5980 target_to_host_old_sigset(&set, &mask);
5981 #else
5982 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5983 goto efault;
5984 target_to_host_old_sigset(&set, p);
5985 unlock_user(p, arg1, 0);
5986 #endif
5987 ret = get_errno(sigsuspend(&set));
5989 break;
5990 #endif
5991 case TARGET_NR_rt_sigsuspend:
5993 sigset_t set;
5994 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5995 goto efault;
5996 target_to_host_sigset(&set, p);
5997 unlock_user(p, arg1, 0);
5998 ret = get_errno(sigsuspend(&set));
6000 break;
6001 case TARGET_NR_rt_sigtimedwait:
6003 sigset_t set;
6004 struct timespec uts, *puts;
6005 siginfo_t uinfo;
6007 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6008 goto efault;
6009 target_to_host_sigset(&set, p);
6010 unlock_user(p, arg1, 0);
6011 if (arg3) {
6012 puts = &uts;
6013 target_to_host_timespec(puts, arg3);
6014 } else {
6015 puts = NULL;
6017 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6018 if (!is_error(ret) && arg2) {
6019 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6020 goto efault;
6021 host_to_target_siginfo(p, &uinfo);
6022 unlock_user(p, arg2, sizeof(target_siginfo_t));
6025 break;
6026 case TARGET_NR_rt_sigqueueinfo:
6028 siginfo_t uinfo;
6029 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6030 goto efault;
6031 target_to_host_siginfo(&uinfo, p);
6032 unlock_user(p, arg1, 0);
6033 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6035 break;
6036 #ifdef TARGET_NR_sigreturn
6037 case TARGET_NR_sigreturn:
6038 /* NOTE: ret is eax, so not transcoding must be done */
6039 ret = do_sigreturn(cpu_env);
6040 break;
6041 #endif
6042 case TARGET_NR_rt_sigreturn:
6043 /* NOTE: ret is eax, so not transcoding must be done */
6044 ret = do_rt_sigreturn(cpu_env);
6045 break;
6046 case TARGET_NR_sethostname:
6047 if (!(p = lock_user_string(arg1)))
6048 goto efault;
6049 ret = get_errno(sethostname(p, arg2));
6050 unlock_user(p, arg1, 0);
6051 break;
6052 case TARGET_NR_setrlimit:
6054 int resource = target_to_host_resource(arg1);
6055 struct target_rlimit *target_rlim;
6056 struct rlimit rlim;
6057 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6058 goto efault;
6059 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6060 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6061 unlock_user_struct(target_rlim, arg2, 0);
6062 ret = get_errno(setrlimit(resource, &rlim));
6064 break;
6065 case TARGET_NR_getrlimit:
6067 int resource = target_to_host_resource(arg1);
6068 struct target_rlimit *target_rlim;
6069 struct rlimit rlim;
6071 ret = get_errno(getrlimit(resource, &rlim));
6072 if (!is_error(ret)) {
6073 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6074 goto efault;
6075 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6076 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6077 unlock_user_struct(target_rlim, arg2, 1);
6080 break;
6081 case TARGET_NR_getrusage:
6083 struct rusage rusage;
6084 ret = get_errno(getrusage(arg1, &rusage));
6085 if (!is_error(ret)) {
6086 host_to_target_rusage(arg2, &rusage);
6089 break;
6090 case TARGET_NR_gettimeofday:
6092 struct timeval tv;
6093 ret = get_errno(gettimeofday(&tv, NULL));
6094 if (!is_error(ret)) {
6095 if (copy_to_user_timeval(arg1, &tv))
6096 goto efault;
6099 break;
6100 case TARGET_NR_settimeofday:
6102 struct timeval tv;
6103 if (copy_from_user_timeval(&tv, arg1))
6104 goto efault;
6105 ret = get_errno(settimeofday(&tv, NULL));
6107 break;
6108 #if defined(TARGET_NR_select)
6109 case TARGET_NR_select:
6110 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6111 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6112 #else
6114 struct target_sel_arg_struct *sel;
6115 abi_ulong inp, outp, exp, tvp;
6116 long nsel;
6118 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6119 goto efault;
6120 nsel = tswapal(sel->n);
6121 inp = tswapal(sel->inp);
6122 outp = tswapal(sel->outp);
6123 exp = tswapal(sel->exp);
6124 tvp = tswapal(sel->tvp);
6125 unlock_user_struct(sel, arg1, 0);
6126 ret = do_select(nsel, inp, outp, exp, tvp);
6128 #endif
6129 break;
6130 #endif
6131 #ifdef TARGET_NR_pselect6
6132 case TARGET_NR_pselect6:
6134 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6135 fd_set rfds, wfds, efds;
6136 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6137 struct timespec ts, *ts_ptr;
6140 * The 6th arg is actually two args smashed together,
6141 * so we cannot use the C library.
6143 sigset_t set;
6144 struct {
6145 sigset_t *set;
6146 size_t size;
6147 } sig, *sig_ptr;
6149 abi_ulong arg_sigset, arg_sigsize, *arg7;
6150 target_sigset_t *target_sigset;
6152 n = arg1;
6153 rfd_addr = arg2;
6154 wfd_addr = arg3;
6155 efd_addr = arg4;
6156 ts_addr = arg5;
6158 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6159 if (ret) {
6160 goto fail;
6162 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6163 if (ret) {
6164 goto fail;
6166 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6167 if (ret) {
6168 goto fail;
6172 * This takes a timespec, and not a timeval, so we cannot
6173 * use the do_select() helper ...
6175 if (ts_addr) {
6176 if (target_to_host_timespec(&ts, ts_addr)) {
6177 goto efault;
6179 ts_ptr = &ts;
6180 } else {
6181 ts_ptr = NULL;
6184 /* Extract the two packed args for the sigset */
6185 if (arg6) {
6186 sig_ptr = &sig;
6187 sig.size = _NSIG / 8;
6189 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6190 if (!arg7) {
6191 goto efault;
6193 arg_sigset = tswapal(arg7[0]);
6194 arg_sigsize = tswapal(arg7[1]);
6195 unlock_user(arg7, arg6, 0);
6197 if (arg_sigset) {
6198 sig.set = &set;
6199 if (arg_sigsize != sizeof(*target_sigset)) {
6200 /* Like the kernel, we enforce correct size sigsets */
6201 ret = -TARGET_EINVAL;
6202 goto fail;
6204 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6205 sizeof(*target_sigset), 1);
6206 if (!target_sigset) {
6207 goto efault;
6209 target_to_host_sigset(&set, target_sigset);
6210 unlock_user(target_sigset, arg_sigset, 0);
6211 } else {
6212 sig.set = NULL;
6214 } else {
6215 sig_ptr = NULL;
6218 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6219 ts_ptr, sig_ptr));
6221 if (!is_error(ret)) {
6222 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6223 goto efault;
6224 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6225 goto efault;
6226 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6227 goto efault;
6229 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6230 goto efault;
6233 break;
6234 #endif
6235 case TARGET_NR_symlink:
6237 void *p2;
6238 p = lock_user_string(arg1);
6239 p2 = lock_user_string(arg2);
6240 if (!p || !p2)
6241 ret = -TARGET_EFAULT;
6242 else
6243 ret = get_errno(symlink(p, p2));
6244 unlock_user(p2, arg2, 0);
6245 unlock_user(p, arg1, 0);
6247 break;
6248 #if defined(TARGET_NR_symlinkat)
6249 case TARGET_NR_symlinkat:
6251 void *p2;
6252 p = lock_user_string(arg1);
6253 p2 = lock_user_string(arg3);
6254 if (!p || !p2)
6255 ret = -TARGET_EFAULT;
6256 else
6257 ret = get_errno(symlinkat(p, arg2, p2));
6258 unlock_user(p2, arg3, 0);
6259 unlock_user(p, arg1, 0);
6261 break;
6262 #endif
6263 #ifdef TARGET_NR_oldlstat
6264 case TARGET_NR_oldlstat:
6265 goto unimplemented;
6266 #endif
6267 case TARGET_NR_readlink:
6269 void *p2, *temp;
6270 p = lock_user_string(arg1);
6271 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6272 if (!p || !p2)
6273 ret = -TARGET_EFAULT;
6274 else {
6275 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6276 char real[PATH_MAX];
6277 temp = realpath(exec_path,real);
6278 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6279 snprintf((char *)p2, arg3, "%s", real);
6281 else
6282 ret = get_errno(readlink(path(p), p2, arg3));
6284 unlock_user(p2, arg2, ret);
6285 unlock_user(p, arg1, 0);
6287 break;
6288 #if defined(TARGET_NR_readlinkat)
6289 case TARGET_NR_readlinkat:
6291 void *p2;
6292 p = lock_user_string(arg2);
6293 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6294 if (!p || !p2)
6295 ret = -TARGET_EFAULT;
6296 else
6297 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6298 unlock_user(p2, arg3, ret);
6299 unlock_user(p, arg2, 0);
6301 break;
6302 #endif
6303 #ifdef TARGET_NR_uselib
6304 case TARGET_NR_uselib:
6305 goto unimplemented;
6306 #endif
6307 #ifdef TARGET_NR_swapon
6308 case TARGET_NR_swapon:
6309 if (!(p = lock_user_string(arg1)))
6310 goto efault;
6311 ret = get_errno(swapon(p, arg2));
6312 unlock_user(p, arg1, 0);
6313 break;
6314 #endif
6315 case TARGET_NR_reboot:
6316 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6317 /* arg4 must be ignored in all other cases */
6318 p = lock_user_string(arg4);
6319 if (!p) {
6320 goto efault;
6322 ret = get_errno(reboot(arg1, arg2, arg3, p));
6323 unlock_user(p, arg4, 0);
6324 } else {
6325 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6327 break;
6328 #ifdef TARGET_NR_readdir
6329 case TARGET_NR_readdir:
6330 goto unimplemented;
6331 #endif
6332 #ifdef TARGET_NR_mmap
6333 case TARGET_NR_mmap:
6334 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6335 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6336 || defined(TARGET_S390X)
6338 abi_ulong *v;
6339 abi_ulong v1, v2, v3, v4, v5, v6;
6340 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6341 goto efault;
6342 v1 = tswapal(v[0]);
6343 v2 = tswapal(v[1]);
6344 v3 = tswapal(v[2]);
6345 v4 = tswapal(v[3]);
6346 v5 = tswapal(v[4]);
6347 v6 = tswapal(v[5]);
6348 unlock_user(v, arg1, 0);
6349 ret = get_errno(target_mmap(v1, v2, v3,
6350 target_to_host_bitmask(v4, mmap_flags_tbl),
6351 v5, v6));
6353 #else
6354 ret = get_errno(target_mmap(arg1, arg2, arg3,
6355 target_to_host_bitmask(arg4, mmap_flags_tbl),
6356 arg5,
6357 arg6));
6358 #endif
6359 break;
6360 #endif
6361 #ifdef TARGET_NR_mmap2
6362 case TARGET_NR_mmap2:
6363 #ifndef MMAP_SHIFT
6364 #define MMAP_SHIFT 12
6365 #endif
6366 ret = get_errno(target_mmap(arg1, arg2, arg3,
6367 target_to_host_bitmask(arg4, mmap_flags_tbl),
6368 arg5,
6369 arg6 << MMAP_SHIFT));
6370 break;
6371 #endif
6372 case TARGET_NR_munmap:
6373 ret = get_errno(target_munmap(arg1, arg2));
6374 break;
6375 case TARGET_NR_mprotect:
6377 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6378 /* Special hack to detect libc making the stack executable. */
6379 if ((arg3 & PROT_GROWSDOWN)
6380 && arg1 >= ts->info->stack_limit
6381 && arg1 <= ts->info->start_stack) {
6382 arg3 &= ~PROT_GROWSDOWN;
6383 arg2 = arg2 + arg1 - ts->info->stack_limit;
6384 arg1 = ts->info->stack_limit;
6387 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6388 break;
6389 #ifdef TARGET_NR_mremap
6390 case TARGET_NR_mremap:
6391 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6392 break;
6393 #endif
6394 /* ??? msync/mlock/munlock are broken for softmmu. */
6395 #ifdef TARGET_NR_msync
6396 case TARGET_NR_msync:
6397 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6398 break;
6399 #endif
6400 #ifdef TARGET_NR_mlock
6401 case TARGET_NR_mlock:
6402 ret = get_errno(mlock(g2h(arg1), arg2));
6403 break;
6404 #endif
6405 #ifdef TARGET_NR_munlock
6406 case TARGET_NR_munlock:
6407 ret = get_errno(munlock(g2h(arg1), arg2));
6408 break;
6409 #endif
6410 #ifdef TARGET_NR_mlockall
6411 case TARGET_NR_mlockall:
6412 ret = get_errno(mlockall(arg1));
6413 break;
6414 #endif
6415 #ifdef TARGET_NR_munlockall
6416 case TARGET_NR_munlockall:
6417 ret = get_errno(munlockall());
6418 break;
6419 #endif
6420 case TARGET_NR_truncate:
6421 if (!(p = lock_user_string(arg1)))
6422 goto efault;
6423 ret = get_errno(truncate(p, arg2));
6424 unlock_user(p, arg1, 0);
6425 break;
6426 case TARGET_NR_ftruncate:
6427 ret = get_errno(ftruncate(arg1, arg2));
6428 break;
6429 case TARGET_NR_fchmod:
6430 ret = get_errno(fchmod(arg1, arg2));
6431 break;
6432 #if defined(TARGET_NR_fchmodat)
6433 case TARGET_NR_fchmodat:
6434 if (!(p = lock_user_string(arg2)))
6435 goto efault;
6436 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6437 unlock_user(p, arg2, 0);
6438 break;
6439 #endif
6440 case TARGET_NR_getpriority:
6441 /* Note that negative values are valid for getpriority, so we must
6442 differentiate based on errno settings. */
6443 errno = 0;
6444 ret = getpriority(arg1, arg2);
6445 if (ret == -1 && errno != 0) {
6446 ret = -host_to_target_errno(errno);
6447 break;
6449 #ifdef TARGET_ALPHA
6450 /* Return value is the unbiased priority. Signal no error. */
6451 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6452 #else
6453 /* Return value is a biased priority to avoid negative numbers. */
6454 ret = 20 - ret;
6455 #endif
6456 break;
6457 case TARGET_NR_setpriority:
6458 ret = get_errno(setpriority(arg1, arg2, arg3));
6459 break;
6460 #ifdef TARGET_NR_profil
6461 case TARGET_NR_profil:
6462 goto unimplemented;
6463 #endif
6464 case TARGET_NR_statfs:
6465 if (!(p = lock_user_string(arg1)))
6466 goto efault;
6467 ret = get_errno(statfs(path(p), &stfs));
6468 unlock_user(p, arg1, 0);
6469 convert_statfs:
6470 if (!is_error(ret)) {
6471 struct target_statfs *target_stfs;
6473 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6474 goto efault;
6475 __put_user(stfs.f_type, &target_stfs->f_type);
6476 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6477 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6478 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6479 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6480 __put_user(stfs.f_files, &target_stfs->f_files);
6481 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6482 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6483 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6484 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6485 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6486 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6487 unlock_user_struct(target_stfs, arg2, 1);
6489 break;
6490 case TARGET_NR_fstatfs:
6491 ret = get_errno(fstatfs(arg1, &stfs));
6492 goto convert_statfs;
6493 #ifdef TARGET_NR_statfs64
6494 case TARGET_NR_statfs64:
6495 if (!(p = lock_user_string(arg1)))
6496 goto efault;
6497 ret = get_errno(statfs(path(p), &stfs));
6498 unlock_user(p, arg1, 0);
6499 convert_statfs64:
6500 if (!is_error(ret)) {
6501 struct target_statfs64 *target_stfs;
6503 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6504 goto efault;
6505 __put_user(stfs.f_type, &target_stfs->f_type);
6506 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6507 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6508 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6509 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6510 __put_user(stfs.f_files, &target_stfs->f_files);
6511 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6512 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6513 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6514 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6515 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6516 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6517 unlock_user_struct(target_stfs, arg3, 1);
6519 break;
6520 case TARGET_NR_fstatfs64:
6521 ret = get_errno(fstatfs(arg1, &stfs));
6522 goto convert_statfs64;
6523 #endif
6524 #ifdef TARGET_NR_ioperm
6525 case TARGET_NR_ioperm:
6526 goto unimplemented;
6527 #endif
6528 #ifdef TARGET_NR_socketcall
6529 case TARGET_NR_socketcall:
6530 ret = do_socketcall(arg1, arg2);
6531 break;
6532 #endif
6533 #ifdef TARGET_NR_accept
6534 case TARGET_NR_accept:
6535 ret = do_accept4(arg1, arg2, arg3, 0);
6536 break;
6537 #endif
6538 #ifdef TARGET_NR_accept4
6539 case TARGET_NR_accept4:
6540 #ifdef CONFIG_ACCEPT4
6541 ret = do_accept4(arg1, arg2, arg3, arg4);
6542 #else
6543 goto unimplemented;
6544 #endif
6545 break;
6546 #endif
6547 #ifdef TARGET_NR_bind
6548 case TARGET_NR_bind:
6549 ret = do_bind(arg1, arg2, arg3);
6550 break;
6551 #endif
6552 #ifdef TARGET_NR_connect
6553 case TARGET_NR_connect:
6554 ret = do_connect(arg1, arg2, arg3);
6555 break;
6556 #endif
6557 #ifdef TARGET_NR_getpeername
6558 case TARGET_NR_getpeername:
6559 ret = do_getpeername(arg1, arg2, arg3);
6560 break;
6561 #endif
6562 #ifdef TARGET_NR_getsockname
6563 case TARGET_NR_getsockname:
6564 ret = do_getsockname(arg1, arg2, arg3);
6565 break;
6566 #endif
6567 #ifdef TARGET_NR_getsockopt
6568 case TARGET_NR_getsockopt:
6569 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6570 break;
6571 #endif
6572 #ifdef TARGET_NR_listen
6573 case TARGET_NR_listen:
6574 ret = get_errno(listen(arg1, arg2));
6575 break;
6576 #endif
6577 #ifdef TARGET_NR_recv
6578 case TARGET_NR_recv:
6579 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6580 break;
6581 #endif
6582 #ifdef TARGET_NR_recvfrom
6583 case TARGET_NR_recvfrom:
6584 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6585 break;
6586 #endif
6587 #ifdef TARGET_NR_recvmsg
6588 case TARGET_NR_recvmsg:
6589 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6590 break;
6591 #endif
6592 #ifdef TARGET_NR_send
6593 case TARGET_NR_send:
6594 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6595 break;
6596 #endif
6597 #ifdef TARGET_NR_sendmsg
6598 case TARGET_NR_sendmsg:
6599 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6600 break;
6601 #endif
6602 #ifdef TARGET_NR_sendto
6603 case TARGET_NR_sendto:
6604 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6605 break;
6606 #endif
6607 #ifdef TARGET_NR_shutdown
6608 case TARGET_NR_shutdown:
6609 ret = get_errno(shutdown(arg1, arg2));
6610 break;
6611 #endif
6612 #ifdef TARGET_NR_socket
6613 case TARGET_NR_socket:
6614 ret = do_socket(arg1, arg2, arg3);
6615 break;
6616 #endif
6617 #ifdef TARGET_NR_socketpair
6618 case TARGET_NR_socketpair:
6619 ret = do_socketpair(arg1, arg2, arg3, arg4);
6620 break;
6621 #endif
6622 #ifdef TARGET_NR_setsockopt
6623 case TARGET_NR_setsockopt:
6624 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6625 break;
6626 #endif
6628 case TARGET_NR_syslog:
6629 if (!(p = lock_user_string(arg2)))
6630 goto efault;
6631 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6632 unlock_user(p, arg2, 0);
6633 break;
6635 case TARGET_NR_setitimer:
6637 struct itimerval value, ovalue, *pvalue;
6639 if (arg2) {
6640 pvalue = &value;
6641 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6642 || copy_from_user_timeval(&pvalue->it_value,
6643 arg2 + sizeof(struct target_timeval)))
6644 goto efault;
6645 } else {
6646 pvalue = NULL;
6648 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6649 if (!is_error(ret) && arg3) {
6650 if (copy_to_user_timeval(arg3,
6651 &ovalue.it_interval)
6652 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6653 &ovalue.it_value))
6654 goto efault;
6657 break;
6658 case TARGET_NR_getitimer:
6660 struct itimerval value;
6662 ret = get_errno(getitimer(arg1, &value));
6663 if (!is_error(ret) && arg2) {
6664 if (copy_to_user_timeval(arg2,
6665 &value.it_interval)
6666 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6667 &value.it_value))
6668 goto efault;
6671 break;
6672 case TARGET_NR_stat:
6673 if (!(p = lock_user_string(arg1)))
6674 goto efault;
6675 ret = get_errno(stat(path(p), &st));
6676 unlock_user(p, arg1, 0);
6677 goto do_stat;
6678 case TARGET_NR_lstat:
6679 if (!(p = lock_user_string(arg1)))
6680 goto efault;
6681 ret = get_errno(lstat(path(p), &st));
6682 unlock_user(p, arg1, 0);
6683 goto do_stat;
6684 case TARGET_NR_fstat:
6686 ret = get_errno(fstat(arg1, &st));
6687 do_stat:
6688 if (!is_error(ret)) {
6689 struct target_stat *target_st;
6691 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6692 goto efault;
6693 memset(target_st, 0, sizeof(*target_st));
6694 __put_user(st.st_dev, &target_st->st_dev);
6695 __put_user(st.st_ino, &target_st->st_ino);
6696 __put_user(st.st_mode, &target_st->st_mode);
6697 __put_user(st.st_uid, &target_st->st_uid);
6698 __put_user(st.st_gid, &target_st->st_gid);
6699 __put_user(st.st_nlink, &target_st->st_nlink);
6700 __put_user(st.st_rdev, &target_st->st_rdev);
6701 __put_user(st.st_size, &target_st->st_size);
6702 __put_user(st.st_blksize, &target_st->st_blksize);
6703 __put_user(st.st_blocks, &target_st->st_blocks);
6704 __put_user(st.st_atime, &target_st->target_st_atime);
6705 __put_user(st.st_mtime, &target_st->target_st_mtime);
6706 __put_user(st.st_ctime, &target_st->target_st_ctime);
6707 unlock_user_struct(target_st, arg2, 1);
6710 break;
6711 #ifdef TARGET_NR_olduname
6712 case TARGET_NR_olduname:
6713 goto unimplemented;
6714 #endif
6715 #ifdef TARGET_NR_iopl
6716 case TARGET_NR_iopl:
6717 goto unimplemented;
6718 #endif
6719 case TARGET_NR_vhangup:
6720 ret = get_errno(vhangup());
6721 break;
6722 #ifdef TARGET_NR_idle
6723 case TARGET_NR_idle:
6724 goto unimplemented;
6725 #endif
6726 #ifdef TARGET_NR_syscall
6727 case TARGET_NR_syscall:
6728 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6729 arg6, arg7, arg8, 0);
6730 break;
6731 #endif
6732 case TARGET_NR_wait4:
6734 int status;
6735 abi_long status_ptr = arg2;
6736 struct rusage rusage, *rusage_ptr;
6737 abi_ulong target_rusage = arg4;
6738 if (target_rusage)
6739 rusage_ptr = &rusage;
6740 else
6741 rusage_ptr = NULL;
6742 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6743 if (!is_error(ret)) {
6744 if (status_ptr && ret) {
6745 status = host_to_target_waitstatus(status);
6746 if (put_user_s32(status, status_ptr))
6747 goto efault;
6749 if (target_rusage)
6750 host_to_target_rusage(target_rusage, &rusage);
6753 break;
6754 #ifdef TARGET_NR_swapoff
6755 case TARGET_NR_swapoff:
6756 if (!(p = lock_user_string(arg1)))
6757 goto efault;
6758 ret = get_errno(swapoff(p));
6759 unlock_user(p, arg1, 0);
6760 break;
6761 #endif
6762 case TARGET_NR_sysinfo:
6764 struct target_sysinfo *target_value;
6765 struct sysinfo value;
6766 ret = get_errno(sysinfo(&value));
6767 if (!is_error(ret) && arg1)
6769 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6770 goto efault;
6771 __put_user(value.uptime, &target_value->uptime);
6772 __put_user(value.loads[0], &target_value->loads[0]);
6773 __put_user(value.loads[1], &target_value->loads[1]);
6774 __put_user(value.loads[2], &target_value->loads[2]);
6775 __put_user(value.totalram, &target_value->totalram);
6776 __put_user(value.freeram, &target_value->freeram);
6777 __put_user(value.sharedram, &target_value->sharedram);
6778 __put_user(value.bufferram, &target_value->bufferram);
6779 __put_user(value.totalswap, &target_value->totalswap);
6780 __put_user(value.freeswap, &target_value->freeswap);
6781 __put_user(value.procs, &target_value->procs);
6782 __put_user(value.totalhigh, &target_value->totalhigh);
6783 __put_user(value.freehigh, &target_value->freehigh);
6784 __put_user(value.mem_unit, &target_value->mem_unit);
6785 unlock_user_struct(target_value, arg1, 1);
6788 break;
6789 #ifdef TARGET_NR_ipc
6790 case TARGET_NR_ipc:
6791 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6792 break;
6793 #endif
6794 #ifdef TARGET_NR_semget
6795 case TARGET_NR_semget:
6796 ret = get_errno(semget(arg1, arg2, arg3));
6797 break;
6798 #endif
6799 #ifdef TARGET_NR_semop
6800 case TARGET_NR_semop:
6801 ret = do_semop(arg1, arg2, arg3);
6802 break;
6803 #endif
6804 #ifdef TARGET_NR_semctl
6805 case TARGET_NR_semctl:
6806 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6807 break;
6808 #endif
6809 #ifdef TARGET_NR_msgctl
6810 case TARGET_NR_msgctl:
6811 ret = do_msgctl(arg1, arg2, arg3);
6812 break;
6813 #endif
6814 #ifdef TARGET_NR_msgget
6815 case TARGET_NR_msgget:
6816 ret = get_errno(msgget(arg1, arg2));
6817 break;
6818 #endif
6819 #ifdef TARGET_NR_msgrcv
6820 case TARGET_NR_msgrcv:
6821 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6822 break;
6823 #endif
6824 #ifdef TARGET_NR_msgsnd
6825 case TARGET_NR_msgsnd:
6826 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6827 break;
6828 #endif
6829 #ifdef TARGET_NR_shmget
6830 case TARGET_NR_shmget:
6831 ret = get_errno(shmget(arg1, arg2, arg3));
6832 break;
6833 #endif
6834 #ifdef TARGET_NR_shmctl
6835 case TARGET_NR_shmctl:
6836 ret = do_shmctl(arg1, arg2, arg3);
6837 break;
6838 #endif
6839 #ifdef TARGET_NR_shmat
6840 case TARGET_NR_shmat:
6841 ret = do_shmat(arg1, arg2, arg3);
6842 break;
6843 #endif
6844 #ifdef TARGET_NR_shmdt
6845 case TARGET_NR_shmdt:
6846 ret = do_shmdt(arg1);
6847 break;
6848 #endif
6849 case TARGET_NR_fsync:
6850 ret = get_errno(fsync(arg1));
6851 break;
6852 case TARGET_NR_clone:
6853 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6854 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6855 #elif defined(TARGET_CRIS)
6856 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6857 #elif defined(TARGET_MICROBLAZE)
6858 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6859 #elif defined(TARGET_S390X)
6860 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6861 #else
6862 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6863 #endif
6864 break;
6865 #ifdef __NR_exit_group
6866 /* new thread calls */
6867 case TARGET_NR_exit_group:
6868 #ifdef TARGET_GPROF
6869 _mcleanup();
6870 #endif
6871 gdb_exit(cpu_env, arg1);
6872 ret = get_errno(exit_group(arg1));
6873 break;
6874 #endif
6875 case TARGET_NR_setdomainname:
6876 if (!(p = lock_user_string(arg1)))
6877 goto efault;
6878 ret = get_errno(setdomainname(p, arg2));
6879 unlock_user(p, arg1, 0);
6880 break;
6881 case TARGET_NR_uname:
6882 /* no need to transcode because we use the linux syscall */
6884 struct new_utsname * buf;
6886 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6887 goto efault;
6888 ret = get_errno(sys_uname(buf));
6889 if (!is_error(ret)) {
6890 /* Overrite the native machine name with whatever is being
6891 emulated. */
6892 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6893 /* Allow the user to override the reported release. */
6894 if (qemu_uname_release && *qemu_uname_release)
6895 strcpy (buf->release, qemu_uname_release);
6897 unlock_user_struct(buf, arg1, 1);
6899 break;
6900 #ifdef TARGET_I386
6901 case TARGET_NR_modify_ldt:
6902 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6903 break;
6904 #if !defined(TARGET_X86_64)
6905 case TARGET_NR_vm86old:
6906 goto unimplemented;
6907 case TARGET_NR_vm86:
6908 ret = do_vm86(cpu_env, arg1, arg2);
6909 break;
6910 #endif
6911 #endif
6912 case TARGET_NR_adjtimex:
6913 goto unimplemented;
6914 #ifdef TARGET_NR_create_module
6915 case TARGET_NR_create_module:
6916 #endif
6917 case TARGET_NR_init_module:
6918 case TARGET_NR_delete_module:
6919 #ifdef TARGET_NR_get_kernel_syms
6920 case TARGET_NR_get_kernel_syms:
6921 #endif
6922 goto unimplemented;
6923 case TARGET_NR_quotactl:
6924 goto unimplemented;
6925 case TARGET_NR_getpgid:
6926 ret = get_errno(getpgid(arg1));
6927 break;
6928 case TARGET_NR_fchdir:
6929 ret = get_errno(fchdir(arg1));
6930 break;
6931 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6932 case TARGET_NR_bdflush:
6933 goto unimplemented;
6934 #endif
6935 #ifdef TARGET_NR_sysfs
6936 case TARGET_NR_sysfs:
6937 goto unimplemented;
6938 #endif
6939 case TARGET_NR_personality:
6940 ret = get_errno(personality(arg1));
6941 break;
6942 #ifdef TARGET_NR_afs_syscall
6943 case TARGET_NR_afs_syscall:
6944 goto unimplemented;
6945 #endif
6946 #ifdef TARGET_NR__llseek /* Not on alpha */
6947 case TARGET_NR__llseek:
6949 int64_t res;
6950 #if !defined(__NR_llseek)
6951 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6952 if (res == -1) {
6953 ret = get_errno(res);
6954 } else {
6955 ret = 0;
6957 #else
6958 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6959 #endif
6960 if ((ret == 0) && put_user_s64(res, arg4)) {
6961 goto efault;
6964 break;
6965 #endif
6966 case TARGET_NR_getdents:
6967 #ifdef __NR_getdents
6968 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6970 struct target_dirent *target_dirp;
6971 struct linux_dirent *dirp;
6972 abi_long count = arg3;
6974 dirp = malloc(count);
6975 if (!dirp) {
6976 ret = -TARGET_ENOMEM;
6977 goto fail;
6980 ret = get_errno(sys_getdents(arg1, dirp, count));
6981 if (!is_error(ret)) {
6982 struct linux_dirent *de;
6983 struct target_dirent *tde;
6984 int len = ret;
6985 int reclen, treclen;
6986 int count1, tnamelen;
6988 count1 = 0;
6989 de = dirp;
6990 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6991 goto efault;
6992 tde = target_dirp;
6993 while (len > 0) {
6994 reclen = de->d_reclen;
6995 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
6996 assert(tnamelen >= 0);
6997 treclen = tnamelen + offsetof(struct target_dirent, d_name);
6998 assert(count1 + treclen <= count);
6999 tde->d_reclen = tswap16(treclen);
7000 tde->d_ino = tswapal(de->d_ino);
7001 tde->d_off = tswapal(de->d_off);
7002 memcpy(tde->d_name, de->d_name, tnamelen);
7003 de = (struct linux_dirent *)((char *)de + reclen);
7004 len -= reclen;
7005 tde = (struct target_dirent *)((char *)tde + treclen);
7006 count1 += treclen;
7008 ret = count1;
7009 unlock_user(target_dirp, arg2, ret);
7011 free(dirp);
7013 #else
7015 struct linux_dirent *dirp;
7016 abi_long count = arg3;
7018 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7019 goto efault;
7020 ret = get_errno(sys_getdents(arg1, dirp, count));
7021 if (!is_error(ret)) {
7022 struct linux_dirent *de;
7023 int len = ret;
7024 int reclen;
7025 de = dirp;
7026 while (len > 0) {
7027 reclen = de->d_reclen;
7028 if (reclen > len)
7029 break;
7030 de->d_reclen = tswap16(reclen);
7031 tswapls(&de->d_ino);
7032 tswapls(&de->d_off);
7033 de = (struct linux_dirent *)((char *)de + reclen);
7034 len -= reclen;
7037 unlock_user(dirp, arg2, ret);
7039 #endif
7040 #else
7041 /* Implement getdents in terms of getdents64 */
7043 struct linux_dirent64 *dirp;
7044 abi_long count = arg3;
7046 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7047 if (!dirp) {
7048 goto efault;
7050 ret = get_errno(sys_getdents64(arg1, dirp, count));
7051 if (!is_error(ret)) {
7052 /* Convert the dirent64 structs to target dirent. We do this
7053 * in-place, since we can guarantee that a target_dirent is no
7054 * larger than a dirent64; however this means we have to be
7055 * careful to read everything before writing in the new format.
7057 struct linux_dirent64 *de;
7058 struct target_dirent *tde;
7059 int len = ret;
7060 int tlen = 0;
7062 de = dirp;
7063 tde = (struct target_dirent *)dirp;
7064 while (len > 0) {
7065 int namelen, treclen;
7066 int reclen = de->d_reclen;
7067 uint64_t ino = de->d_ino;
7068 int64_t off = de->d_off;
7069 uint8_t type = de->d_type;
7071 namelen = strlen(de->d_name);
7072 treclen = offsetof(struct target_dirent, d_name)
7073 + namelen + 2;
7074 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7076 memmove(tde->d_name, de->d_name, namelen + 1);
7077 tde->d_ino = tswapal(ino);
7078 tde->d_off = tswapal(off);
7079 tde->d_reclen = tswap16(treclen);
7080 /* The target_dirent type is in what was formerly a padding
7081 * byte at the end of the structure:
7083 *(((char *)tde) + treclen - 1) = type;
7085 de = (struct linux_dirent64 *)((char *)de + reclen);
7086 tde = (struct target_dirent *)((char *)tde + treclen);
7087 len -= reclen;
7088 tlen += treclen;
7090 ret = tlen;
7092 unlock_user(dirp, arg2, ret);
7094 #endif
7095 break;
7096 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7097 case TARGET_NR_getdents64:
7099 struct linux_dirent64 *dirp;
7100 abi_long count = arg3;
7101 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7102 goto efault;
7103 ret = get_errno(sys_getdents64(arg1, dirp, count));
7104 if (!is_error(ret)) {
7105 struct linux_dirent64 *de;
7106 int len = ret;
7107 int reclen;
7108 de = dirp;
7109 while (len > 0) {
7110 reclen = de->d_reclen;
7111 if (reclen > len)
7112 break;
7113 de->d_reclen = tswap16(reclen);
7114 tswap64s((uint64_t *)&de->d_ino);
7115 tswap64s((uint64_t *)&de->d_off);
7116 de = (struct linux_dirent64 *)((char *)de + reclen);
7117 len -= reclen;
7120 unlock_user(dirp, arg2, ret);
7122 break;
7123 #endif /* TARGET_NR_getdents64 */
7124 #if defined(TARGET_NR__newselect)
7125 case TARGET_NR__newselect:
7126 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7127 break;
7128 #endif
7129 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7130 # ifdef TARGET_NR_poll
7131 case TARGET_NR_poll:
7132 # endif
7133 # ifdef TARGET_NR_ppoll
7134 case TARGET_NR_ppoll:
7135 # endif
7137 struct target_pollfd *target_pfd;
7138 unsigned int nfds = arg2;
7139 int timeout = arg3;
7140 struct pollfd *pfd;
7141 unsigned int i;
7143 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7144 if (!target_pfd)
7145 goto efault;
7147 pfd = alloca(sizeof(struct pollfd) * nfds);
7148 for(i = 0; i < nfds; i++) {
7149 pfd[i].fd = tswap32(target_pfd[i].fd);
7150 pfd[i].events = tswap16(target_pfd[i].events);
7153 # ifdef TARGET_NR_ppoll
7154 if (num == TARGET_NR_ppoll) {
7155 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7156 target_sigset_t *target_set;
7157 sigset_t _set, *set = &_set;
7159 if (arg3) {
7160 if (target_to_host_timespec(timeout_ts, arg3)) {
7161 unlock_user(target_pfd, arg1, 0);
7162 goto efault;
7164 } else {
7165 timeout_ts = NULL;
7168 if (arg4) {
7169 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7170 if (!target_set) {
7171 unlock_user(target_pfd, arg1, 0);
7172 goto efault;
7174 target_to_host_sigset(set, target_set);
7175 } else {
7176 set = NULL;
7179 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7181 if (!is_error(ret) && arg3) {
7182 host_to_target_timespec(arg3, timeout_ts);
7184 if (arg4) {
7185 unlock_user(target_set, arg4, 0);
7187 } else
7188 # endif
7189 ret = get_errno(poll(pfd, nfds, timeout));
7191 if (!is_error(ret)) {
7192 for(i = 0; i < nfds; i++) {
7193 target_pfd[i].revents = tswap16(pfd[i].revents);
7196 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7198 break;
7199 #endif
7200 case TARGET_NR_flock:
7201 /* NOTE: the flock constant seems to be the same for every
7202 Linux platform */
7203 ret = get_errno(flock(arg1, arg2));
7204 break;
7205 case TARGET_NR_readv:
7207 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7208 if (vec != NULL) {
7209 ret = get_errno(readv(arg1, vec, arg3));
7210 unlock_iovec(vec, arg2, arg3, 1);
7211 } else {
7212 ret = -host_to_target_errno(errno);
7215 break;
7216 case TARGET_NR_writev:
7218 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7219 if (vec != NULL) {
7220 ret = get_errno(writev(arg1, vec, arg3));
7221 unlock_iovec(vec, arg2, arg3, 0);
7222 } else {
7223 ret = -host_to_target_errno(errno);
7226 break;
7227 case TARGET_NR_getsid:
7228 ret = get_errno(getsid(arg1));
7229 break;
7230 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7231 case TARGET_NR_fdatasync:
7232 ret = get_errno(fdatasync(arg1));
7233 break;
7234 #endif
7235 case TARGET_NR__sysctl:
7236 /* We don't implement this, but ENOTDIR is always a safe
7237 return value. */
7238 ret = -TARGET_ENOTDIR;
7239 break;
7240 case TARGET_NR_sched_getaffinity:
7242 unsigned int mask_size;
7243 unsigned long *mask;
7246 * sched_getaffinity needs multiples of ulong, so need to take
7247 * care of mismatches between target ulong and host ulong sizes.
7249 if (arg2 & (sizeof(abi_ulong) - 1)) {
7250 ret = -TARGET_EINVAL;
7251 break;
7253 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7255 mask = alloca(mask_size);
7256 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7258 if (!is_error(ret)) {
7259 if (copy_to_user(arg3, mask, ret)) {
7260 goto efault;
7264 break;
7265 case TARGET_NR_sched_setaffinity:
7267 unsigned int mask_size;
7268 unsigned long *mask;
7271 * sched_setaffinity needs multiples of ulong, so need to take
7272 * care of mismatches between target ulong and host ulong sizes.
7274 if (arg2 & (sizeof(abi_ulong) - 1)) {
7275 ret = -TARGET_EINVAL;
7276 break;
7278 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7280 mask = alloca(mask_size);
7281 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7282 goto efault;
7284 memcpy(mask, p, arg2);
7285 unlock_user_struct(p, arg2, 0);
7287 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7289 break;
7290 case TARGET_NR_sched_setparam:
7292 struct sched_param *target_schp;
7293 struct sched_param schp;
7295 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7296 goto efault;
7297 schp.sched_priority = tswap32(target_schp->sched_priority);
7298 unlock_user_struct(target_schp, arg2, 0);
7299 ret = get_errno(sched_setparam(arg1, &schp));
7301 break;
7302 case TARGET_NR_sched_getparam:
7304 struct sched_param *target_schp;
7305 struct sched_param schp;
7306 ret = get_errno(sched_getparam(arg1, &schp));
7307 if (!is_error(ret)) {
7308 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7309 goto efault;
7310 target_schp->sched_priority = tswap32(schp.sched_priority);
7311 unlock_user_struct(target_schp, arg2, 1);
7314 break;
7315 case TARGET_NR_sched_setscheduler:
7317 struct sched_param *target_schp;
7318 struct sched_param schp;
7319 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7320 goto efault;
7321 schp.sched_priority = tswap32(target_schp->sched_priority);
7322 unlock_user_struct(target_schp, arg3, 0);
7323 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7325 break;
7326 case TARGET_NR_sched_getscheduler:
7327 ret = get_errno(sched_getscheduler(arg1));
7328 break;
7329 case TARGET_NR_sched_yield:
7330 ret = get_errno(sched_yield());
7331 break;
7332 case TARGET_NR_sched_get_priority_max:
7333 ret = get_errno(sched_get_priority_max(arg1));
7334 break;
7335 case TARGET_NR_sched_get_priority_min:
7336 ret = get_errno(sched_get_priority_min(arg1));
7337 break;
7338 case TARGET_NR_sched_rr_get_interval:
7340 struct timespec ts;
7341 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7342 if (!is_error(ret)) {
7343 host_to_target_timespec(arg2, &ts);
7346 break;
7347 case TARGET_NR_nanosleep:
7349 struct timespec req, rem;
7350 target_to_host_timespec(&req, arg1);
7351 ret = get_errno(nanosleep(&req, &rem));
7352 if (is_error(ret) && arg2) {
7353 host_to_target_timespec(arg2, &rem);
7356 break;
7357 #ifdef TARGET_NR_query_module
7358 case TARGET_NR_query_module:
7359 goto unimplemented;
7360 #endif
7361 #ifdef TARGET_NR_nfsservctl
7362 case TARGET_NR_nfsservctl:
7363 goto unimplemented;
7364 #endif
7365 case TARGET_NR_prctl:
7366 switch (arg1) {
7367 case PR_GET_PDEATHSIG:
7369 int deathsig;
7370 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7371 if (!is_error(ret) && arg2
7372 && put_user_ual(deathsig, arg2)) {
7373 goto efault;
7375 break;
7377 #ifdef PR_GET_NAME
7378 case PR_GET_NAME:
7380 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7381 if (!name) {
7382 goto efault;
7384 ret = get_errno(prctl(arg1, (unsigned long)name,
7385 arg3, arg4, arg5));
7386 unlock_user(name, arg2, 16);
7387 break;
7389 case PR_SET_NAME:
7391 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7392 if (!name) {
7393 goto efault;
7395 ret = get_errno(prctl(arg1, (unsigned long)name,
7396 arg3, arg4, arg5));
7397 unlock_user(name, arg2, 0);
7398 break;
7400 #endif
7401 default:
7402 /* Most prctl options have no pointer arguments */
7403 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7404 break;
7406 break;
7407 #ifdef TARGET_NR_arch_prctl
7408 case TARGET_NR_arch_prctl:
7409 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7410 ret = do_arch_prctl(cpu_env, arg1, arg2);
7411 break;
7412 #else
7413 goto unimplemented;
7414 #endif
7415 #endif
7416 #ifdef TARGET_NR_pread64
7417 case TARGET_NR_pread64:
7418 if (regpairs_aligned(cpu_env)) {
7419 arg4 = arg5;
7420 arg5 = arg6;
7422 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7423 goto efault;
7424 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7425 unlock_user(p, arg2, ret);
7426 break;
7427 case TARGET_NR_pwrite64:
7428 if (regpairs_aligned(cpu_env)) {
7429 arg4 = arg5;
7430 arg5 = arg6;
7432 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7433 goto efault;
7434 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7435 unlock_user(p, arg2, 0);
7436 break;
7437 #endif
7438 case TARGET_NR_getcwd:
7439 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7440 goto efault;
7441 ret = get_errno(sys_getcwd1(p, arg2));
7442 unlock_user(p, arg1, ret);
7443 break;
7444 case TARGET_NR_capget:
7445 goto unimplemented;
7446 case TARGET_NR_capset:
7447 goto unimplemented;
7448 case TARGET_NR_sigaltstack:
7449 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7450 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7451 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7452 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7453 break;
7454 #else
7455 goto unimplemented;
7456 #endif
7458 #ifdef CONFIG_SENDFILE
7459 case TARGET_NR_sendfile:
7461 off_t *offp = NULL;
7462 off_t off;
7463 if (arg3) {
7464 ret = get_user_sal(off, arg3);
7465 if (is_error(ret)) {
7466 break;
7468 offp = &off;
7470 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7471 if (!is_error(ret) && arg3) {
7472 abi_long ret2 = put_user_sal(off, arg3);
7473 if (is_error(ret2)) {
7474 ret = ret2;
7477 break;
7479 #ifdef TARGET_NR_sendfile64
7480 case TARGET_NR_sendfile64:
7482 off_t *offp = NULL;
7483 off_t off;
7484 if (arg3) {
7485 ret = get_user_s64(off, arg3);
7486 if (is_error(ret)) {
7487 break;
7489 offp = &off;
7491 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7492 if (!is_error(ret) && arg3) {
7493 abi_long ret2 = put_user_s64(off, arg3);
7494 if (is_error(ret2)) {
7495 ret = ret2;
7498 break;
7500 #endif
7501 #else
7502 case TARGET_NR_sendfile:
7503 #ifdef TARGET_NR_sendfile64
7504 case TARGET_NR_sendfile64:
7505 #endif
7506 goto unimplemented;
7507 #endif
7509 #ifdef TARGET_NR_getpmsg
7510 case TARGET_NR_getpmsg:
7511 goto unimplemented;
7512 #endif
7513 #ifdef TARGET_NR_putpmsg
7514 case TARGET_NR_putpmsg:
7515 goto unimplemented;
7516 #endif
7517 #ifdef TARGET_NR_vfork
7518 case TARGET_NR_vfork:
7519 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7520 0, 0, 0, 0));
7521 break;
7522 #endif
7523 #ifdef TARGET_NR_ugetrlimit
7524 case TARGET_NR_ugetrlimit:
7526 struct rlimit rlim;
7527 int resource = target_to_host_resource(arg1);
7528 ret = get_errno(getrlimit(resource, &rlim));
7529 if (!is_error(ret)) {
7530 struct target_rlimit *target_rlim;
7531 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7532 goto efault;
7533 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7534 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7535 unlock_user_struct(target_rlim, arg2, 1);
7537 break;
7539 #endif
7540 #ifdef TARGET_NR_truncate64
7541 case TARGET_NR_truncate64:
7542 if (!(p = lock_user_string(arg1)))
7543 goto efault;
7544 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7545 unlock_user(p, arg1, 0);
7546 break;
7547 #endif
7548 #ifdef TARGET_NR_ftruncate64
7549 case TARGET_NR_ftruncate64:
7550 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7551 break;
7552 #endif
7553 #ifdef TARGET_NR_stat64
7554 case TARGET_NR_stat64:
7555 if (!(p = lock_user_string(arg1)))
7556 goto efault;
7557 ret = get_errno(stat(path(p), &st));
7558 unlock_user(p, arg1, 0);
7559 if (!is_error(ret))
7560 ret = host_to_target_stat64(cpu_env, arg2, &st);
7561 break;
7562 #endif
7563 #ifdef TARGET_NR_lstat64
7564 case TARGET_NR_lstat64:
7565 if (!(p = lock_user_string(arg1)))
7566 goto efault;
7567 ret = get_errno(lstat(path(p), &st));
7568 unlock_user(p, arg1, 0);
7569 if (!is_error(ret))
7570 ret = host_to_target_stat64(cpu_env, arg2, &st);
7571 break;
7572 #endif
7573 #ifdef TARGET_NR_fstat64
7574 case TARGET_NR_fstat64:
7575 ret = get_errno(fstat(arg1, &st));
7576 if (!is_error(ret))
7577 ret = host_to_target_stat64(cpu_env, arg2, &st);
7578 break;
7579 #endif
7580 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7581 #ifdef TARGET_NR_fstatat64
7582 case TARGET_NR_fstatat64:
7583 #endif
7584 #ifdef TARGET_NR_newfstatat
7585 case TARGET_NR_newfstatat:
7586 #endif
7587 if (!(p = lock_user_string(arg2)))
7588 goto efault;
7589 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7590 if (!is_error(ret))
7591 ret = host_to_target_stat64(cpu_env, arg3, &st);
7592 break;
7593 #endif
7594 case TARGET_NR_lchown:
7595 if (!(p = lock_user_string(arg1)))
7596 goto efault;
7597 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7598 unlock_user(p, arg1, 0);
7599 break;
7600 #ifdef TARGET_NR_getuid
7601 case TARGET_NR_getuid:
7602 ret = get_errno(high2lowuid(getuid()));
7603 break;
7604 #endif
7605 #ifdef TARGET_NR_getgid
7606 case TARGET_NR_getgid:
7607 ret = get_errno(high2lowgid(getgid()));
7608 break;
7609 #endif
7610 #ifdef TARGET_NR_geteuid
7611 case TARGET_NR_geteuid:
7612 ret = get_errno(high2lowuid(geteuid()));
7613 break;
7614 #endif
7615 #ifdef TARGET_NR_getegid
7616 case TARGET_NR_getegid:
7617 ret = get_errno(high2lowgid(getegid()));
7618 break;
7619 #endif
7620 case TARGET_NR_setreuid:
7621 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7622 break;
7623 case TARGET_NR_setregid:
7624 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7625 break;
7626 case TARGET_NR_getgroups:
7628 int gidsetsize = arg1;
7629 target_id *target_grouplist;
7630 gid_t *grouplist;
7631 int i;
7633 grouplist = alloca(gidsetsize * sizeof(gid_t));
7634 ret = get_errno(getgroups(gidsetsize, grouplist));
7635 if (gidsetsize == 0)
7636 break;
7637 if (!is_error(ret)) {
7638 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7639 if (!target_grouplist)
7640 goto efault;
7641 for(i = 0;i < ret; i++)
7642 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7643 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7646 break;
7647 case TARGET_NR_setgroups:
7649 int gidsetsize = arg1;
7650 target_id *target_grouplist;
7651 gid_t *grouplist = NULL;
7652 int i;
7653 if (gidsetsize) {
7654 grouplist = alloca(gidsetsize * sizeof(gid_t));
7655 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7656 if (!target_grouplist) {
7657 ret = -TARGET_EFAULT;
7658 goto fail;
7660 for (i = 0; i < gidsetsize; i++) {
7661 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7663 unlock_user(target_grouplist, arg2, 0);
7665 ret = get_errno(setgroups(gidsetsize, grouplist));
7667 break;
7668 case TARGET_NR_fchown:
7669 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7670 break;
7671 #if defined(TARGET_NR_fchownat)
7672 case TARGET_NR_fchownat:
7673 if (!(p = lock_user_string(arg2)))
7674 goto efault;
7675 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7676 low2highgid(arg4), arg5));
7677 unlock_user(p, arg2, 0);
7678 break;
7679 #endif
7680 #ifdef TARGET_NR_setresuid
7681 case TARGET_NR_setresuid:
7682 ret = get_errno(setresuid(low2highuid(arg1),
7683 low2highuid(arg2),
7684 low2highuid(arg3)));
7685 break;
7686 #endif
7687 #ifdef TARGET_NR_getresuid
7688 case TARGET_NR_getresuid:
7690 uid_t ruid, euid, suid;
7691 ret = get_errno(getresuid(&ruid, &euid, &suid));
7692 if (!is_error(ret)) {
7693 if (put_user_u16(high2lowuid(ruid), arg1)
7694 || put_user_u16(high2lowuid(euid), arg2)
7695 || put_user_u16(high2lowuid(suid), arg3))
7696 goto efault;
7699 break;
7700 #endif
7701 #ifdef TARGET_NR_getresgid
7702 case TARGET_NR_setresgid:
7703 ret = get_errno(setresgid(low2highgid(arg1),
7704 low2highgid(arg2),
7705 low2highgid(arg3)));
7706 break;
7707 #endif
7708 #ifdef TARGET_NR_getresgid
7709 case TARGET_NR_getresgid:
7711 gid_t rgid, egid, sgid;
7712 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7713 if (!is_error(ret)) {
7714 if (put_user_u16(high2lowgid(rgid), arg1)
7715 || put_user_u16(high2lowgid(egid), arg2)
7716 || put_user_u16(high2lowgid(sgid), arg3))
7717 goto efault;
7720 break;
7721 #endif
7722 case TARGET_NR_chown:
7723 if (!(p = lock_user_string(arg1)))
7724 goto efault;
7725 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7726 unlock_user(p, arg1, 0);
7727 break;
7728 case TARGET_NR_setuid:
7729 ret = get_errno(setuid(low2highuid(arg1)));
7730 break;
7731 case TARGET_NR_setgid:
7732 ret = get_errno(setgid(low2highgid(arg1)));
7733 break;
7734 case TARGET_NR_setfsuid:
7735 ret = get_errno(setfsuid(arg1));
7736 break;
7737 case TARGET_NR_setfsgid:
7738 ret = get_errno(setfsgid(arg1));
7739 break;
7741 #ifdef TARGET_NR_lchown32
7742 case TARGET_NR_lchown32:
7743 if (!(p = lock_user_string(arg1)))
7744 goto efault;
7745 ret = get_errno(lchown(p, arg2, arg3));
7746 unlock_user(p, arg1, 0);
7747 break;
7748 #endif
7749 #ifdef TARGET_NR_getuid32
7750 case TARGET_NR_getuid32:
7751 ret = get_errno(getuid());
7752 break;
7753 #endif
7755 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7756 /* Alpha specific */
7757 case TARGET_NR_getxuid:
7759 uid_t euid;
7760 euid=geteuid();
7761 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7763 ret = get_errno(getuid());
7764 break;
7765 #endif
7766 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7767 /* Alpha specific */
7768 case TARGET_NR_getxgid:
7770 uid_t egid;
7771 egid=getegid();
7772 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7774 ret = get_errno(getgid());
7775 break;
7776 #endif
7777 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7778 /* Alpha specific */
7779 case TARGET_NR_osf_getsysinfo:
7780 ret = -TARGET_EOPNOTSUPP;
7781 switch (arg1) {
7782 case TARGET_GSI_IEEE_FP_CONTROL:
7784 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7786 /* Copied from linux ieee_fpcr_to_swcr. */
7787 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7788 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7789 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7790 | SWCR_TRAP_ENABLE_DZE
7791 | SWCR_TRAP_ENABLE_OVF);
7792 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7793 | SWCR_TRAP_ENABLE_INE);
7794 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7795 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7797 if (put_user_u64 (swcr, arg2))
7798 goto efault;
7799 ret = 0;
7801 break;
7803 /* case GSI_IEEE_STATE_AT_SIGNAL:
7804 -- Not implemented in linux kernel.
7805 case GSI_UACPROC:
7806 -- Retrieves current unaligned access state; not much used.
7807 case GSI_PROC_TYPE:
7808 -- Retrieves implver information; surely not used.
7809 case GSI_GET_HWRPB:
7810 -- Grabs a copy of the HWRPB; surely not used.
7813 break;
7814 #endif
7815 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7816 /* Alpha specific */
7817 case TARGET_NR_osf_setsysinfo:
7818 ret = -TARGET_EOPNOTSUPP;
7819 switch (arg1) {
7820 case TARGET_SSI_IEEE_FP_CONTROL:
7822 uint64_t swcr, fpcr, orig_fpcr;
7824 if (get_user_u64 (swcr, arg2)) {
7825 goto efault;
7827 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7828 fpcr = orig_fpcr & FPCR_DYN_MASK;
7830 /* Copied from linux ieee_swcr_to_fpcr. */
7831 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7832 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7833 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7834 | SWCR_TRAP_ENABLE_DZE
7835 | SWCR_TRAP_ENABLE_OVF)) << 48;
7836 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7837 | SWCR_TRAP_ENABLE_INE)) << 57;
7838 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7839 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7841 cpu_alpha_store_fpcr(cpu_env, fpcr);
7842 ret = 0;
7844 break;
7846 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7848 uint64_t exc, fpcr, orig_fpcr;
7849 int si_code;
7851 if (get_user_u64(exc, arg2)) {
7852 goto efault;
7855 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7857 /* We only add to the exception status here. */
7858 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7860 cpu_alpha_store_fpcr(cpu_env, fpcr);
7861 ret = 0;
7863 /* Old exceptions are not signaled. */
7864 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7866 /* If any exceptions set by this call,
7867 and are unmasked, send a signal. */
7868 si_code = 0;
7869 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7870 si_code = TARGET_FPE_FLTRES;
7872 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7873 si_code = TARGET_FPE_FLTUND;
7875 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7876 si_code = TARGET_FPE_FLTOVF;
7878 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7879 si_code = TARGET_FPE_FLTDIV;
7881 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7882 si_code = TARGET_FPE_FLTINV;
7884 if (si_code != 0) {
7885 target_siginfo_t info;
7886 info.si_signo = SIGFPE;
7887 info.si_errno = 0;
7888 info.si_code = si_code;
7889 info._sifields._sigfault._addr
7890 = ((CPUArchState *)cpu_env)->pc;
7891 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7894 break;
7896 /* case SSI_NVPAIRS:
7897 -- Used with SSIN_UACPROC to enable unaligned accesses.
7898 case SSI_IEEE_STATE_AT_SIGNAL:
7899 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7900 -- Not implemented in linux kernel
7903 break;
7904 #endif
7905 #ifdef TARGET_NR_osf_sigprocmask
7906 /* Alpha specific. */
7907 case TARGET_NR_osf_sigprocmask:
7909 abi_ulong mask;
7910 int how;
7911 sigset_t set, oldset;
7913 switch(arg1) {
7914 case TARGET_SIG_BLOCK:
7915 how = SIG_BLOCK;
7916 break;
7917 case TARGET_SIG_UNBLOCK:
7918 how = SIG_UNBLOCK;
7919 break;
7920 case TARGET_SIG_SETMASK:
7921 how = SIG_SETMASK;
7922 break;
7923 default:
7924 ret = -TARGET_EINVAL;
7925 goto fail;
7927 mask = arg2;
7928 target_to_host_old_sigset(&set, &mask);
7929 sigprocmask(how, &set, &oldset);
7930 host_to_target_old_sigset(&mask, &oldset);
7931 ret = mask;
7933 break;
7934 #endif
7936 #ifdef TARGET_NR_getgid32
7937 case TARGET_NR_getgid32:
7938 ret = get_errno(getgid());
7939 break;
7940 #endif
7941 #ifdef TARGET_NR_geteuid32
7942 case TARGET_NR_geteuid32:
7943 ret = get_errno(geteuid());
7944 break;
7945 #endif
7946 #ifdef TARGET_NR_getegid32
7947 case TARGET_NR_getegid32:
7948 ret = get_errno(getegid());
7949 break;
7950 #endif
7951 #ifdef TARGET_NR_setreuid32
7952 case TARGET_NR_setreuid32:
7953 ret = get_errno(setreuid(arg1, arg2));
7954 break;
7955 #endif
7956 #ifdef TARGET_NR_setregid32
7957 case TARGET_NR_setregid32:
7958 ret = get_errno(setregid(arg1, arg2));
7959 break;
7960 #endif
7961 #ifdef TARGET_NR_getgroups32
7962 case TARGET_NR_getgroups32:
7964 int gidsetsize = arg1;
7965 uint32_t *target_grouplist;
7966 gid_t *grouplist;
7967 int i;
7969 grouplist = alloca(gidsetsize * sizeof(gid_t));
7970 ret = get_errno(getgroups(gidsetsize, grouplist));
7971 if (gidsetsize == 0)
7972 break;
7973 if (!is_error(ret)) {
7974 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7975 if (!target_grouplist) {
7976 ret = -TARGET_EFAULT;
7977 goto fail;
7979 for(i = 0;i < ret; i++)
7980 target_grouplist[i] = tswap32(grouplist[i]);
7981 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7984 break;
7985 #endif
7986 #ifdef TARGET_NR_setgroups32
7987 case TARGET_NR_setgroups32:
7989 int gidsetsize = arg1;
7990 uint32_t *target_grouplist;
7991 gid_t *grouplist;
7992 int i;
7994 grouplist = alloca(gidsetsize * sizeof(gid_t));
7995 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7996 if (!target_grouplist) {
7997 ret = -TARGET_EFAULT;
7998 goto fail;
8000 for(i = 0;i < gidsetsize; i++)
8001 grouplist[i] = tswap32(target_grouplist[i]);
8002 unlock_user(target_grouplist, arg2, 0);
8003 ret = get_errno(setgroups(gidsetsize, grouplist));
8005 break;
8006 #endif
8007 #ifdef TARGET_NR_fchown32
8008 case TARGET_NR_fchown32:
8009 ret = get_errno(fchown(arg1, arg2, arg3));
8010 break;
8011 #endif
8012 #ifdef TARGET_NR_setresuid32
8013 case TARGET_NR_setresuid32:
8014 ret = get_errno(setresuid(arg1, arg2, arg3));
8015 break;
8016 #endif
8017 #ifdef TARGET_NR_getresuid32
8018 case TARGET_NR_getresuid32:
8020 uid_t ruid, euid, suid;
8021 ret = get_errno(getresuid(&ruid, &euid, &suid));
8022 if (!is_error(ret)) {
8023 if (put_user_u32(ruid, arg1)
8024 || put_user_u32(euid, arg2)
8025 || put_user_u32(suid, arg3))
8026 goto efault;
8029 break;
8030 #endif
8031 #ifdef TARGET_NR_setresgid32
8032 case TARGET_NR_setresgid32:
8033 ret = get_errno(setresgid(arg1, arg2, arg3));
8034 break;
8035 #endif
8036 #ifdef TARGET_NR_getresgid32
8037 case TARGET_NR_getresgid32:
8039 gid_t rgid, egid, sgid;
8040 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8041 if (!is_error(ret)) {
8042 if (put_user_u32(rgid, arg1)
8043 || put_user_u32(egid, arg2)
8044 || put_user_u32(sgid, arg3))
8045 goto efault;
8048 break;
8049 #endif
8050 #ifdef TARGET_NR_chown32
8051 case TARGET_NR_chown32:
8052 if (!(p = lock_user_string(arg1)))
8053 goto efault;
8054 ret = get_errno(chown(p, arg2, arg3));
8055 unlock_user(p, arg1, 0);
8056 break;
8057 #endif
8058 #ifdef TARGET_NR_setuid32
8059 case TARGET_NR_setuid32:
8060 ret = get_errno(setuid(arg1));
8061 break;
8062 #endif
8063 #ifdef TARGET_NR_setgid32
8064 case TARGET_NR_setgid32:
8065 ret = get_errno(setgid(arg1));
8066 break;
8067 #endif
8068 #ifdef TARGET_NR_setfsuid32
8069 case TARGET_NR_setfsuid32:
8070 ret = get_errno(setfsuid(arg1));
8071 break;
8072 #endif
8073 #ifdef TARGET_NR_setfsgid32
8074 case TARGET_NR_setfsgid32:
8075 ret = get_errno(setfsgid(arg1));
8076 break;
8077 #endif
8079 case TARGET_NR_pivot_root:
8080 goto unimplemented;
8081 #ifdef TARGET_NR_mincore
8082 case TARGET_NR_mincore:
8084 void *a;
8085 ret = -TARGET_EFAULT;
8086 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8087 goto efault;
8088 if (!(p = lock_user_string(arg3)))
8089 goto mincore_fail;
8090 ret = get_errno(mincore(a, arg2, p));
8091 unlock_user(p, arg3, ret);
8092 mincore_fail:
8093 unlock_user(a, arg1, 0);
8095 break;
8096 #endif
8097 #ifdef TARGET_NR_arm_fadvise64_64
8098 case TARGET_NR_arm_fadvise64_64:
8101 * arm_fadvise64_64 looks like fadvise64_64 but
8102 * with different argument order
8104 abi_long temp;
8105 temp = arg3;
8106 arg3 = arg4;
8107 arg4 = temp;
8109 #endif
8110 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8111 #ifdef TARGET_NR_fadvise64_64
8112 case TARGET_NR_fadvise64_64:
8113 #endif
8114 #ifdef TARGET_NR_fadvise64
8115 case TARGET_NR_fadvise64:
8116 #endif
8117 #ifdef TARGET_S390X
8118 switch (arg4) {
8119 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8120 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8121 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8122 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8123 default: break;
8125 #endif
8126 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8127 break;
8128 #endif
8129 #ifdef TARGET_NR_madvise
8130 case TARGET_NR_madvise:
8131 /* A straight passthrough may not be safe because qemu sometimes
8132 turns private file-backed mappings into anonymous mappings.
8133 This will break MADV_DONTNEED.
8134 This is a hint, so ignoring and returning success is ok. */
8135 ret = get_errno(0);
8136 break;
8137 #endif
8138 #if TARGET_ABI_BITS == 32
8139 case TARGET_NR_fcntl64:
8141 int cmd;
8142 struct flock64 fl;
8143 struct target_flock64 *target_fl;
8144 #ifdef TARGET_ARM
8145 struct target_eabi_flock64 *target_efl;
8146 #endif
8148 cmd = target_to_host_fcntl_cmd(arg2);
8149 if (cmd == -TARGET_EINVAL) {
8150 ret = cmd;
8151 break;
8154 switch(arg2) {
8155 case TARGET_F_GETLK64:
8156 #ifdef TARGET_ARM
8157 if (((CPUARMState *)cpu_env)->eabi) {
8158 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8159 goto efault;
8160 fl.l_type = tswap16(target_efl->l_type);
8161 fl.l_whence = tswap16(target_efl->l_whence);
8162 fl.l_start = tswap64(target_efl->l_start);
8163 fl.l_len = tswap64(target_efl->l_len);
8164 fl.l_pid = tswap32(target_efl->l_pid);
8165 unlock_user_struct(target_efl, arg3, 0);
8166 } else
8167 #endif
8169 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8170 goto efault;
8171 fl.l_type = tswap16(target_fl->l_type);
8172 fl.l_whence = tswap16(target_fl->l_whence);
8173 fl.l_start = tswap64(target_fl->l_start);
8174 fl.l_len = tswap64(target_fl->l_len);
8175 fl.l_pid = tswap32(target_fl->l_pid);
8176 unlock_user_struct(target_fl, arg3, 0);
8178 ret = get_errno(fcntl(arg1, cmd, &fl));
8179 if (ret == 0) {
8180 #ifdef TARGET_ARM
8181 if (((CPUARMState *)cpu_env)->eabi) {
8182 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8183 goto efault;
8184 target_efl->l_type = tswap16(fl.l_type);
8185 target_efl->l_whence = tswap16(fl.l_whence);
8186 target_efl->l_start = tswap64(fl.l_start);
8187 target_efl->l_len = tswap64(fl.l_len);
8188 target_efl->l_pid = tswap32(fl.l_pid);
8189 unlock_user_struct(target_efl, arg3, 1);
8190 } else
8191 #endif
8193 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8194 goto efault;
8195 target_fl->l_type = tswap16(fl.l_type);
8196 target_fl->l_whence = tswap16(fl.l_whence);
8197 target_fl->l_start = tswap64(fl.l_start);
8198 target_fl->l_len = tswap64(fl.l_len);
8199 target_fl->l_pid = tswap32(fl.l_pid);
8200 unlock_user_struct(target_fl, arg3, 1);
8203 break;
8205 case TARGET_F_SETLK64:
8206 case TARGET_F_SETLKW64:
8207 #ifdef TARGET_ARM
8208 if (((CPUARMState *)cpu_env)->eabi) {
8209 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8210 goto efault;
8211 fl.l_type = tswap16(target_efl->l_type);
8212 fl.l_whence = tswap16(target_efl->l_whence);
8213 fl.l_start = tswap64(target_efl->l_start);
8214 fl.l_len = tswap64(target_efl->l_len);
8215 fl.l_pid = tswap32(target_efl->l_pid);
8216 unlock_user_struct(target_efl, arg3, 0);
8217 } else
8218 #endif
8220 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8221 goto efault;
8222 fl.l_type = tswap16(target_fl->l_type);
8223 fl.l_whence = tswap16(target_fl->l_whence);
8224 fl.l_start = tswap64(target_fl->l_start);
8225 fl.l_len = tswap64(target_fl->l_len);
8226 fl.l_pid = tswap32(target_fl->l_pid);
8227 unlock_user_struct(target_fl, arg3, 0);
8229 ret = get_errno(fcntl(arg1, cmd, &fl));
8230 break;
8231 default:
8232 ret = do_fcntl(arg1, arg2, arg3);
8233 break;
8235 break;
8237 #endif
8238 #ifdef TARGET_NR_cacheflush
8239 case TARGET_NR_cacheflush:
8240 /* self-modifying code is handled automatically, so nothing needed */
8241 ret = 0;
8242 break;
8243 #endif
8244 #ifdef TARGET_NR_security
8245 case TARGET_NR_security:
8246 goto unimplemented;
8247 #endif
8248 #ifdef TARGET_NR_getpagesize
8249 case TARGET_NR_getpagesize:
8250 ret = TARGET_PAGE_SIZE;
8251 break;
8252 #endif
8253 case TARGET_NR_gettid:
8254 ret = get_errno(gettid());
8255 break;
8256 #ifdef TARGET_NR_readahead
8257 case TARGET_NR_readahead:
8258 #if TARGET_ABI_BITS == 32
8259 if (regpairs_aligned(cpu_env)) {
8260 arg2 = arg3;
8261 arg3 = arg4;
8262 arg4 = arg5;
8264 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8265 #else
8266 ret = get_errno(readahead(arg1, arg2, arg3));
8267 #endif
8268 break;
8269 #endif
8270 #ifdef CONFIG_ATTR
8271 #ifdef TARGET_NR_setxattr
8272 case TARGET_NR_listxattr:
8273 case TARGET_NR_llistxattr:
8275 void *p, *b = 0;
8276 if (arg2) {
8277 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8278 if (!b) {
8279 ret = -TARGET_EFAULT;
8280 break;
8283 p = lock_user_string(arg1);
8284 if (p) {
8285 if (num == TARGET_NR_listxattr) {
8286 ret = get_errno(listxattr(p, b, arg3));
8287 } else {
8288 ret = get_errno(llistxattr(p, b, arg3));
8290 } else {
8291 ret = -TARGET_EFAULT;
8293 unlock_user(p, arg1, 0);
8294 unlock_user(b, arg2, arg3);
8295 break;
8297 case TARGET_NR_flistxattr:
8299 void *b = 0;
8300 if (arg2) {
8301 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8302 if (!b) {
8303 ret = -TARGET_EFAULT;
8304 break;
8307 ret = get_errno(flistxattr(arg1, b, arg3));
8308 unlock_user(b, arg2, arg3);
8309 break;
8311 case TARGET_NR_setxattr:
8312 case TARGET_NR_lsetxattr:
8314 void *p, *n, *v = 0;
8315 if (arg3) {
8316 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8317 if (!v) {
8318 ret = -TARGET_EFAULT;
8319 break;
8322 p = lock_user_string(arg1);
8323 n = lock_user_string(arg2);
8324 if (p && n) {
8325 if (num == TARGET_NR_setxattr) {
8326 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8327 } else {
8328 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8330 } else {
8331 ret = -TARGET_EFAULT;
8333 unlock_user(p, arg1, 0);
8334 unlock_user(n, arg2, 0);
8335 unlock_user(v, arg3, 0);
8337 break;
8338 case TARGET_NR_fsetxattr:
8340 void *n, *v = 0;
8341 if (arg3) {
8342 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8343 if (!v) {
8344 ret = -TARGET_EFAULT;
8345 break;
8348 n = lock_user_string(arg2);
8349 if (n) {
8350 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8351 } else {
8352 ret = -TARGET_EFAULT;
8354 unlock_user(n, arg2, 0);
8355 unlock_user(v, arg3, 0);
8357 break;
8358 case TARGET_NR_getxattr:
8359 case TARGET_NR_lgetxattr:
8361 void *p, *n, *v = 0;
8362 if (arg3) {
8363 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8364 if (!v) {
8365 ret = -TARGET_EFAULT;
8366 break;
8369 p = lock_user_string(arg1);
8370 n = lock_user_string(arg2);
8371 if (p && n) {
8372 if (num == TARGET_NR_getxattr) {
8373 ret = get_errno(getxattr(p, n, v, arg4));
8374 } else {
8375 ret = get_errno(lgetxattr(p, n, v, arg4));
8377 } else {
8378 ret = -TARGET_EFAULT;
8380 unlock_user(p, arg1, 0);
8381 unlock_user(n, arg2, 0);
8382 unlock_user(v, arg3, arg4);
8384 break;
8385 case TARGET_NR_fgetxattr:
8387 void *n, *v = 0;
8388 if (arg3) {
8389 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8390 if (!v) {
8391 ret = -TARGET_EFAULT;
8392 break;
8395 n = lock_user_string(arg2);
8396 if (n) {
8397 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8398 } else {
8399 ret = -TARGET_EFAULT;
8401 unlock_user(n, arg2, 0);
8402 unlock_user(v, arg3, arg4);
8404 break;
8405 case TARGET_NR_removexattr:
8406 case TARGET_NR_lremovexattr:
8408 void *p, *n;
8409 p = lock_user_string(arg1);
8410 n = lock_user_string(arg2);
8411 if (p && n) {
8412 if (num == TARGET_NR_removexattr) {
8413 ret = get_errno(removexattr(p, n));
8414 } else {
8415 ret = get_errno(lremovexattr(p, n));
8417 } else {
8418 ret = -TARGET_EFAULT;
8420 unlock_user(p, arg1, 0);
8421 unlock_user(n, arg2, 0);
8423 break;
8424 case TARGET_NR_fremovexattr:
8426 void *n;
8427 n = lock_user_string(arg2);
8428 if (n) {
8429 ret = get_errno(fremovexattr(arg1, n));
8430 } else {
8431 ret = -TARGET_EFAULT;
8433 unlock_user(n, arg2, 0);
8435 break;
8436 #endif
8437 #endif /* CONFIG_ATTR */
8438 #ifdef TARGET_NR_set_thread_area
8439 case TARGET_NR_set_thread_area:
8440 #if defined(TARGET_MIPS)
8441 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8442 ret = 0;
8443 break;
8444 #elif defined(TARGET_CRIS)
8445 if (arg1 & 0xff)
8446 ret = -TARGET_EINVAL;
8447 else {
8448 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8449 ret = 0;
8451 break;
8452 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8453 ret = do_set_thread_area(cpu_env, arg1);
8454 break;
8455 #else
8456 goto unimplemented_nowarn;
8457 #endif
8458 #endif
8459 #ifdef TARGET_NR_get_thread_area
8460 case TARGET_NR_get_thread_area:
8461 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8462 ret = do_get_thread_area(cpu_env, arg1);
8463 #else
8464 goto unimplemented_nowarn;
8465 #endif
8466 #endif
8467 #ifdef TARGET_NR_getdomainname
8468 case TARGET_NR_getdomainname:
8469 goto unimplemented_nowarn;
8470 #endif
8472 #ifdef TARGET_NR_clock_gettime
8473 case TARGET_NR_clock_gettime:
8475 struct timespec ts;
8476 ret = get_errno(clock_gettime(arg1, &ts));
8477 if (!is_error(ret)) {
8478 host_to_target_timespec(arg2, &ts);
8480 break;
8482 #endif
8483 #ifdef TARGET_NR_clock_getres
8484 case TARGET_NR_clock_getres:
8486 struct timespec ts;
8487 ret = get_errno(clock_getres(arg1, &ts));
8488 if (!is_error(ret)) {
8489 host_to_target_timespec(arg2, &ts);
8491 break;
8493 #endif
8494 #ifdef TARGET_NR_clock_nanosleep
8495 case TARGET_NR_clock_nanosleep:
8497 struct timespec ts;
8498 target_to_host_timespec(&ts, arg3);
8499 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8500 if (arg4)
8501 host_to_target_timespec(arg4, &ts);
8502 break;
8504 #endif
8506 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8507 case TARGET_NR_set_tid_address:
8508 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8509 break;
8510 #endif
8512 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8513 case TARGET_NR_tkill:
8514 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8515 break;
8516 #endif
8518 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8519 case TARGET_NR_tgkill:
8520 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8521 target_to_host_signal(arg3)));
8522 break;
8523 #endif
8525 #ifdef TARGET_NR_set_robust_list
8526 case TARGET_NR_set_robust_list:
8527 case TARGET_NR_get_robust_list:
8528 /* The ABI for supporting robust futexes has userspace pass
8529 * the kernel a pointer to a linked list which is updated by
8530 * userspace after the syscall; the list is walked by the kernel
8531 * when the thread exits. Since the linked list in QEMU guest
8532 * memory isn't a valid linked list for the host and we have
8533 * no way to reliably intercept the thread-death event, we can't
8534 * support these. Silently return ENOSYS so that guest userspace
8535 * falls back to a non-robust futex implementation (which should
8536 * be OK except in the corner case of the guest crashing while
8537 * holding a mutex that is shared with another process via
8538 * shared memory).
8540 goto unimplemented_nowarn;
8541 #endif
8543 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8544 case TARGET_NR_utimensat:
8546 struct timespec *tsp, ts[2];
8547 if (!arg3) {
8548 tsp = NULL;
8549 } else {
8550 target_to_host_timespec(ts, arg3);
8551 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8552 tsp = ts;
8554 if (!arg2)
8555 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8556 else {
8557 if (!(p = lock_user_string(arg2))) {
8558 ret = -TARGET_EFAULT;
8559 goto fail;
8561 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8562 unlock_user(p, arg2, 0);
8565 break;
8566 #endif
8567 #if defined(CONFIG_USE_NPTL)
8568 case TARGET_NR_futex:
8569 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8570 break;
8571 #endif
8572 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8573 case TARGET_NR_inotify_init:
8574 ret = get_errno(sys_inotify_init());
8575 break;
8576 #endif
8577 #ifdef CONFIG_INOTIFY1
8578 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8579 case TARGET_NR_inotify_init1:
8580 ret = get_errno(sys_inotify_init1(arg1));
8581 break;
8582 #endif
8583 #endif
8584 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8585 case TARGET_NR_inotify_add_watch:
8586 p = lock_user_string(arg2);
8587 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8588 unlock_user(p, arg2, 0);
8589 break;
8590 #endif
8591 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8592 case TARGET_NR_inotify_rm_watch:
8593 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8594 break;
8595 #endif
8597 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8598 case TARGET_NR_mq_open:
8600 struct mq_attr posix_mq_attr;
8602 p = lock_user_string(arg1 - 1);
8603 if (arg4 != 0)
8604 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8605 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8606 unlock_user (p, arg1, 0);
8608 break;
8610 case TARGET_NR_mq_unlink:
8611 p = lock_user_string(arg1 - 1);
8612 ret = get_errno(mq_unlink(p));
8613 unlock_user (p, arg1, 0);
8614 break;
8616 case TARGET_NR_mq_timedsend:
8618 struct timespec ts;
8620 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8621 if (arg5 != 0) {
8622 target_to_host_timespec(&ts, arg5);
8623 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8624 host_to_target_timespec(arg5, &ts);
8626 else
8627 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8628 unlock_user (p, arg2, arg3);
8630 break;
8632 case TARGET_NR_mq_timedreceive:
8634 struct timespec ts;
8635 unsigned int prio;
8637 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8638 if (arg5 != 0) {
8639 target_to_host_timespec(&ts, arg5);
8640 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8641 host_to_target_timespec(arg5, &ts);
8643 else
8644 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8645 unlock_user (p, arg2, arg3);
8646 if (arg4 != 0)
8647 put_user_u32(prio, arg4);
8649 break;
8651 /* Not implemented for now... */
8652 /* case TARGET_NR_mq_notify: */
8653 /* break; */
8655 case TARGET_NR_mq_getsetattr:
8657 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8658 ret = 0;
8659 if (arg3 != 0) {
8660 ret = mq_getattr(arg1, &posix_mq_attr_out);
8661 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8663 if (arg2 != 0) {
8664 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8665 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8669 break;
8670 #endif
8672 #ifdef CONFIG_SPLICE
8673 #ifdef TARGET_NR_tee
8674 case TARGET_NR_tee:
8676 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8678 break;
8679 #endif
8680 #ifdef TARGET_NR_splice
8681 case TARGET_NR_splice:
8683 loff_t loff_in, loff_out;
8684 loff_t *ploff_in = NULL, *ploff_out = NULL;
8685 if(arg2) {
8686 get_user_u64(loff_in, arg2);
8687 ploff_in = &loff_in;
8689 if(arg4) {
8690 get_user_u64(loff_out, arg2);
8691 ploff_out = &loff_out;
8693 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8695 break;
8696 #endif
8697 #ifdef TARGET_NR_vmsplice
8698 case TARGET_NR_vmsplice:
8700 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8701 if (vec != NULL) {
8702 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8703 unlock_iovec(vec, arg2, arg3, 0);
8704 } else {
8705 ret = -host_to_target_errno(errno);
8708 break;
8709 #endif
8710 #endif /* CONFIG_SPLICE */
8711 #ifdef CONFIG_EVENTFD
8712 #if defined(TARGET_NR_eventfd)
8713 case TARGET_NR_eventfd:
8714 ret = get_errno(eventfd(arg1, 0));
8715 break;
8716 #endif
8717 #if defined(TARGET_NR_eventfd2)
8718 case TARGET_NR_eventfd2:
8720 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8721 if (arg2 & TARGET_O_NONBLOCK) {
8722 host_flags |= O_NONBLOCK;
8724 if (arg2 & TARGET_O_CLOEXEC) {
8725 host_flags |= O_CLOEXEC;
8727 ret = get_errno(eventfd(arg1, host_flags));
8728 break;
8730 #endif
8731 #endif /* CONFIG_EVENTFD */
8732 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8733 case TARGET_NR_fallocate:
8734 #if TARGET_ABI_BITS == 32
8735 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8736 target_offset64(arg5, arg6)));
8737 #else
8738 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8739 #endif
8740 break;
8741 #endif
8742 #if defined(CONFIG_SYNC_FILE_RANGE)
8743 #if defined(TARGET_NR_sync_file_range)
8744 case TARGET_NR_sync_file_range:
8745 #if TARGET_ABI_BITS == 32
8746 #if defined(TARGET_MIPS)
8747 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8748 target_offset64(arg5, arg6), arg7));
8749 #else
8750 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8751 target_offset64(arg4, arg5), arg6));
8752 #endif /* !TARGET_MIPS */
8753 #else
8754 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8755 #endif
8756 break;
8757 #endif
8758 #if defined(TARGET_NR_sync_file_range2)
8759 case TARGET_NR_sync_file_range2:
8760 /* This is like sync_file_range but the arguments are reordered */
8761 #if TARGET_ABI_BITS == 32
8762 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8763 target_offset64(arg5, arg6), arg2));
8764 #else
8765 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8766 #endif
8767 break;
8768 #endif
8769 #endif
8770 #if defined(CONFIG_EPOLL)
8771 #if defined(TARGET_NR_epoll_create)
8772 case TARGET_NR_epoll_create:
8773 ret = get_errno(epoll_create(arg1));
8774 break;
8775 #endif
8776 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8777 case TARGET_NR_epoll_create1:
8778 ret = get_errno(epoll_create1(arg1));
8779 break;
8780 #endif
8781 #if defined(TARGET_NR_epoll_ctl)
8782 case TARGET_NR_epoll_ctl:
8784 struct epoll_event ep;
8785 struct epoll_event *epp = 0;
8786 if (arg4) {
8787 struct target_epoll_event *target_ep;
8788 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8789 goto efault;
8791 ep.events = tswap32(target_ep->events);
8792 /* The epoll_data_t union is just opaque data to the kernel,
8793 * so we transfer all 64 bits across and need not worry what
8794 * actual data type it is.
8796 ep.data.u64 = tswap64(target_ep->data.u64);
8797 unlock_user_struct(target_ep, arg4, 0);
8798 epp = &ep;
8800 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8801 break;
8803 #endif
8805 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8806 #define IMPLEMENT_EPOLL_PWAIT
8807 #endif
8808 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8809 #if defined(TARGET_NR_epoll_wait)
8810 case TARGET_NR_epoll_wait:
8811 #endif
8812 #if defined(IMPLEMENT_EPOLL_PWAIT)
8813 case TARGET_NR_epoll_pwait:
8814 #endif
8816 struct target_epoll_event *target_ep;
8817 struct epoll_event *ep;
8818 int epfd = arg1;
8819 int maxevents = arg3;
8820 int timeout = arg4;
8822 target_ep = lock_user(VERIFY_WRITE, arg2,
8823 maxevents * sizeof(struct target_epoll_event), 1);
8824 if (!target_ep) {
8825 goto efault;
8828 ep = alloca(maxevents * sizeof(struct epoll_event));
8830 switch (num) {
8831 #if defined(IMPLEMENT_EPOLL_PWAIT)
8832 case TARGET_NR_epoll_pwait:
8834 target_sigset_t *target_set;
8835 sigset_t _set, *set = &_set;
8837 if (arg5) {
8838 target_set = lock_user(VERIFY_READ, arg5,
8839 sizeof(target_sigset_t), 1);
8840 if (!target_set) {
8841 unlock_user(target_ep, arg2, 0);
8842 goto efault;
8844 target_to_host_sigset(set, target_set);
8845 unlock_user(target_set, arg5, 0);
8846 } else {
8847 set = NULL;
8850 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8851 break;
8853 #endif
8854 #if defined(TARGET_NR_epoll_wait)
8855 case TARGET_NR_epoll_wait:
8856 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8857 break;
8858 #endif
8859 default:
8860 ret = -TARGET_ENOSYS;
8862 if (!is_error(ret)) {
8863 int i;
8864 for (i = 0; i < ret; i++) {
8865 target_ep[i].events = tswap32(ep[i].events);
8866 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8869 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8870 break;
8872 #endif
8873 #endif
8874 #ifdef TARGET_NR_prlimit64
8875 case TARGET_NR_prlimit64:
8877 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8878 struct target_rlimit64 *target_rnew, *target_rold;
8879 struct host_rlimit64 rnew, rold, *rnewp = 0;
8880 if (arg3) {
8881 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8882 goto efault;
8884 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8885 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8886 unlock_user_struct(target_rnew, arg3, 0);
8887 rnewp = &rnew;
8890 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8891 if (!is_error(ret) && arg4) {
8892 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8893 goto efault;
8895 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8896 target_rold->rlim_max = tswap64(rold.rlim_max);
8897 unlock_user_struct(target_rold, arg4, 1);
8899 break;
8901 #endif
8902 #ifdef TARGET_NR_gethostname
8903 case TARGET_NR_gethostname:
8905 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8906 if (name) {
8907 ret = get_errno(gethostname(name, arg2));
8908 unlock_user(name, arg1, arg2);
8909 } else {
8910 ret = -TARGET_EFAULT;
8912 break;
8914 #endif
8915 default:
8916 unimplemented:
8917 gemu_log("qemu: Unsupported syscall: %d\n", num);
8918 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8919 unimplemented_nowarn:
8920 #endif
8921 ret = -TARGET_ENOSYS;
8922 break;
8924 fail:
8925 #ifdef DEBUG
8926 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8927 #endif
8928 if(do_strace)
8929 print_syscall_ret(num, ret);
8930 return ret;
8931 efault:
8932 ret = -TARGET_EFAULT;
8933 goto fail;