linux-user: Implement *listxattr syscalls
[qemu/wangdongxu.git] / linux-user / syscall.c
blobee8899ef3d38ed7b1d115ee1da98df3e9f09dba3
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu-xattr.h"
75 #endif
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include "linux_loop.h"
99 #include "cpu-uname.h"
101 #include "qemu.h"
103 #if defined(CONFIG_USE_NPTL)
104 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
105 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
106 #else
107 /* XXX: Hardcode the above values. */
108 #define CLONE_NPTL_FLAGS2 0
109 #endif
111 //#define DEBUG
113 //#include <linux/msdos_fs.h>
114 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
115 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
118 #undef _syscall0
119 #undef _syscall1
120 #undef _syscall2
121 #undef _syscall3
122 #undef _syscall4
123 #undef _syscall5
124 #undef _syscall6
126 #define _syscall0(type,name) \
127 static type name (void) \
129 return syscall(__NR_##name); \
132 #define _syscall1(type,name,type1,arg1) \
133 static type name (type1 arg1) \
135 return syscall(__NR_##name, arg1); \
138 #define _syscall2(type,name,type1,arg1,type2,arg2) \
139 static type name (type1 arg1,type2 arg2) \
141 return syscall(__NR_##name, arg1, arg2); \
144 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
145 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 return syscall(__NR_##name, arg1, arg2, arg3); \
150 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
151 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
156 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
157 type5,arg5) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
164 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5,type6,arg6) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 type6 arg6) \
169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
173 #define __NR_sys_uname __NR_uname
174 #define __NR_sys_faccessat __NR_faccessat
175 #define __NR_sys_fchmodat __NR_fchmodat
176 #define __NR_sys_fchownat __NR_fchownat
177 #define __NR_sys_fstatat64 __NR_fstatat64
178 #define __NR_sys_futimesat __NR_futimesat
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_linkat __NR_linkat
184 #define __NR_sys_mkdirat __NR_mkdirat
185 #define __NR_sys_mknodat __NR_mknodat
186 #define __NR_sys_newfstatat __NR_newfstatat
187 #define __NR_sys_openat __NR_openat
188 #define __NR_sys_readlinkat __NR_readlinkat
189 #define __NR_sys_renameat __NR_renameat
190 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
191 #define __NR_sys_symlinkat __NR_symlinkat
192 #define __NR_sys_syslog __NR_syslog
193 #define __NR_sys_tgkill __NR_tgkill
194 #define __NR_sys_tkill __NR_tkill
195 #define __NR_sys_unlinkat __NR_unlinkat
196 #define __NR_sys_utimensat __NR_utimensat
197 #define __NR_sys_futex __NR_futex
198 #define __NR_sys_inotify_init __NR_inotify_init
199 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
200 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
203 defined(__s390x__)
204 #define __NR__llseek __NR_lseek
205 #endif
207 #ifdef __NR_gettid
208 _syscall0(int, gettid)
209 #else
210 /* This is a replacement for the host gettid() and must return a host
211 errno. */
212 static int gettid(void) {
213 return -ENOSYS;
215 #endif
216 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
217 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
218 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
219 #endif
220 _syscall2(int, sys_getpriority, int, which, int, who);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
229 #endif
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill,int,tid,int,sig)
232 #endif
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group,int,error_code)
235 #endif
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address,int *,tidptr)
238 #endif
239 #if defined(CONFIG_USE_NPTL)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
242 const struct timespec *,timeout,int *,uaddr2,int,val3)
243 #endif
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
254 static bitmask_transtbl fcntl_flags_tbl[] = {
255 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
256 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
257 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
258 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
259 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
260 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
261 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
262 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
263 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
264 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
265 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
266 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
267 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
268 #if defined(O_DIRECT)
269 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
270 #endif
271 { 0, 0, 0, 0 }
274 #define COPY_UTSNAME_FIELD(dest, src) \
275 do { \
276 /* __NEW_UTS_LEN doesn't include terminating null */ \
277 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
278 (dest)[__NEW_UTS_LEN] = '\0'; \
279 } while (0)
281 static int sys_uname(struct new_utsname *buf)
283 struct utsname uts_buf;
285 if (uname(&uts_buf) < 0)
286 return (-1);
289 * Just in case these have some differences, we
290 * translate utsname to new_utsname (which is the
291 * struct linux kernel uses).
294 memset(buf, 0, sizeof(*buf));
295 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
296 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
297 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
298 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
299 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
300 #ifdef _GNU_SOURCE
301 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
302 #endif
303 return (0);
305 #undef COPY_UTSNAME_FIELD
308 static int sys_getcwd1(char *buf, size_t size)
310 if (getcwd(buf, size) == NULL) {
311 /* getcwd() sets errno */
312 return (-1);
314 return strlen(buf)+1;
317 #ifdef CONFIG_ATFILE
319 * Host system seems to have atfile syscall stubs available. We
320 * now enable them one by one as specified by target syscall_nr.h.
323 #ifdef TARGET_NR_faccessat
324 static int sys_faccessat(int dirfd, const char *pathname, int mode)
326 return (faccessat(dirfd, pathname, mode, 0));
328 #endif
329 #ifdef TARGET_NR_fchmodat
330 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
332 return (fchmodat(dirfd, pathname, mode, 0));
334 #endif
335 #if defined(TARGET_NR_fchownat)
336 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
337 gid_t group, int flags)
339 return (fchownat(dirfd, pathname, owner, group, flags));
341 #endif
342 #ifdef __NR_fstatat64
343 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
344 int flags)
346 return (fstatat(dirfd, pathname, buf, flags));
348 #endif
349 #ifdef __NR_newfstatat
350 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
351 int flags)
353 return (fstatat(dirfd, pathname, buf, flags));
355 #endif
356 #ifdef TARGET_NR_futimesat
357 static int sys_futimesat(int dirfd, const char *pathname,
358 const struct timeval times[2])
360 return (futimesat(dirfd, pathname, times));
362 #endif
363 #ifdef TARGET_NR_linkat
364 static int sys_linkat(int olddirfd, const char *oldpath,
365 int newdirfd, const char *newpath, int flags)
367 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
369 #endif
370 #ifdef TARGET_NR_mkdirat
371 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
373 return (mkdirat(dirfd, pathname, mode));
375 #endif
376 #ifdef TARGET_NR_mknodat
377 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
378 dev_t dev)
380 return (mknodat(dirfd, pathname, mode, dev));
382 #endif
383 #ifdef TARGET_NR_openat
384 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
387 * open(2) has extra parameter 'mode' when called with
388 * flag O_CREAT.
390 if ((flags & O_CREAT) != 0) {
391 return (openat(dirfd, pathname, flags, mode));
393 return (openat(dirfd, pathname, flags));
395 #endif
396 #ifdef TARGET_NR_readlinkat
397 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
399 return (readlinkat(dirfd, pathname, buf, bufsiz));
401 #endif
402 #ifdef TARGET_NR_renameat
403 static int sys_renameat(int olddirfd, const char *oldpath,
404 int newdirfd, const char *newpath)
406 return (renameat(olddirfd, oldpath, newdirfd, newpath));
408 #endif
409 #ifdef TARGET_NR_symlinkat
410 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
412 return (symlinkat(oldpath, newdirfd, newpath));
414 #endif
415 #ifdef TARGET_NR_unlinkat
416 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
418 return (unlinkat(dirfd, pathname, flags));
420 #endif
421 #else /* !CONFIG_ATFILE */
424 * Try direct syscalls instead
426 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
427 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
428 #endif
429 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
430 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
431 #endif
432 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
433 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
434 uid_t,owner,gid_t,group,int,flags)
435 #endif
436 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
437 defined(__NR_fstatat64)
438 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
439 struct stat *,buf,int,flags)
440 #endif
441 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
442 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
443 const struct timeval *,times)
444 #endif
445 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
446 defined(__NR_newfstatat)
447 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
448 struct stat *,buf,int,flags)
449 #endif
450 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
451 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
452 int,newdirfd,const char *,newpath,int,flags)
453 #endif
454 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
455 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
456 #endif
457 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
458 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
459 mode_t,mode,dev_t,dev)
460 #endif
461 #if defined(TARGET_NR_openat) && defined(__NR_openat)
462 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
463 #endif
464 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
465 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
466 char *,buf,size_t,bufsize)
467 #endif
468 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
469 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
470 int,newdirfd,const char *,newpath)
471 #endif
472 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
473 _syscall3(int,sys_symlinkat,const char *,oldpath,
474 int,newdirfd,const char *,newpath)
475 #endif
476 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
477 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
478 #endif
480 #endif /* CONFIG_ATFILE */
482 #ifdef CONFIG_UTIMENSAT
483 static int sys_utimensat(int dirfd, const char *pathname,
484 const struct timespec times[2], int flags)
486 if (pathname == NULL)
487 return futimens(dirfd, times);
488 else
489 return utimensat(dirfd, pathname, times, flags);
491 #else
492 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
493 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
494 const struct timespec *,tsp,int,flags)
495 #endif
496 #endif /* CONFIG_UTIMENSAT */
498 #ifdef CONFIG_INOTIFY
499 #include <sys/inotify.h>
501 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
502 static int sys_inotify_init(void)
504 return (inotify_init());
506 #endif
507 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
508 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
510 return (inotify_add_watch(fd, pathname, mask));
512 #endif
513 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
514 static int sys_inotify_rm_watch(int fd, int32_t wd)
516 return (inotify_rm_watch(fd, wd));
518 #endif
519 #ifdef CONFIG_INOTIFY1
520 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
521 static int sys_inotify_init1(int flags)
523 return (inotify_init1(flags));
525 #endif
526 #endif
527 #else
528 /* Userspace can usually survive runtime without inotify */
529 #undef TARGET_NR_inotify_init
530 #undef TARGET_NR_inotify_init1
531 #undef TARGET_NR_inotify_add_watch
532 #undef TARGET_NR_inotify_rm_watch
533 #endif /* CONFIG_INOTIFY */
535 #if defined(TARGET_NR_ppoll)
536 #ifndef __NR_ppoll
537 # define __NR_ppoll -1
538 #endif
539 #define __NR_sys_ppoll __NR_ppoll
540 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
541 struct timespec *, timeout, const __sigset_t *, sigmask,
542 size_t, sigsetsize)
543 #endif
545 #if defined(TARGET_NR_pselect6)
546 #ifndef __NR_pselect6
547 # define __NR_pselect6 -1
548 #endif
549 #define __NR_sys_pselect6 __NR_pselect6
550 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
551 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
552 #endif
554 #if defined(TARGET_NR_prlimit64)
555 #ifndef __NR_prlimit64
556 # define __NR_prlimit64 -1
557 #endif
558 #define __NR_sys_prlimit64 __NR_prlimit64
559 /* The glibc rlimit structure may not be that used by the underlying syscall */
560 struct host_rlimit64 {
561 uint64_t rlim_cur;
562 uint64_t rlim_max;
564 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
565 const struct host_rlimit64 *, new_limit,
566 struct host_rlimit64 *, old_limit)
567 #endif
569 extern int personality(int);
570 extern int flock(int, int);
571 extern int setfsuid(int);
572 extern int setfsgid(int);
573 extern int setgroups(int, gid_t *);
575 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
576 #ifdef TARGET_ARM
577 static inline int regpairs_aligned(void *cpu_env) {
578 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
580 #elif defined(TARGET_MIPS)
581 static inline int regpairs_aligned(void *cpu_env) { return 1; }
582 #else
583 static inline int regpairs_aligned(void *cpu_env) { return 0; }
584 #endif
586 #define ERRNO_TABLE_SIZE 1200
588 /* target_to_host_errno_table[] is initialized from
589 * host_to_target_errno_table[] in syscall_init(). */
590 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
594 * This list is the union of errno values overridden in asm-<arch>/errno.h
595 * minus the errnos that are not actually generic to all archs.
597 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
598 [EIDRM] = TARGET_EIDRM,
599 [ECHRNG] = TARGET_ECHRNG,
600 [EL2NSYNC] = TARGET_EL2NSYNC,
601 [EL3HLT] = TARGET_EL3HLT,
602 [EL3RST] = TARGET_EL3RST,
603 [ELNRNG] = TARGET_ELNRNG,
604 [EUNATCH] = TARGET_EUNATCH,
605 [ENOCSI] = TARGET_ENOCSI,
606 [EL2HLT] = TARGET_EL2HLT,
607 [EDEADLK] = TARGET_EDEADLK,
608 [ENOLCK] = TARGET_ENOLCK,
609 [EBADE] = TARGET_EBADE,
610 [EBADR] = TARGET_EBADR,
611 [EXFULL] = TARGET_EXFULL,
612 [ENOANO] = TARGET_ENOANO,
613 [EBADRQC] = TARGET_EBADRQC,
614 [EBADSLT] = TARGET_EBADSLT,
615 [EBFONT] = TARGET_EBFONT,
616 [ENOSTR] = TARGET_ENOSTR,
617 [ENODATA] = TARGET_ENODATA,
618 [ETIME] = TARGET_ETIME,
619 [ENOSR] = TARGET_ENOSR,
620 [ENONET] = TARGET_ENONET,
621 [ENOPKG] = TARGET_ENOPKG,
622 [EREMOTE] = TARGET_EREMOTE,
623 [ENOLINK] = TARGET_ENOLINK,
624 [EADV] = TARGET_EADV,
625 [ESRMNT] = TARGET_ESRMNT,
626 [ECOMM] = TARGET_ECOMM,
627 [EPROTO] = TARGET_EPROTO,
628 [EDOTDOT] = TARGET_EDOTDOT,
629 [EMULTIHOP] = TARGET_EMULTIHOP,
630 [EBADMSG] = TARGET_EBADMSG,
631 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
632 [EOVERFLOW] = TARGET_EOVERFLOW,
633 [ENOTUNIQ] = TARGET_ENOTUNIQ,
634 [EBADFD] = TARGET_EBADFD,
635 [EREMCHG] = TARGET_EREMCHG,
636 [ELIBACC] = TARGET_ELIBACC,
637 [ELIBBAD] = TARGET_ELIBBAD,
638 [ELIBSCN] = TARGET_ELIBSCN,
639 [ELIBMAX] = TARGET_ELIBMAX,
640 [ELIBEXEC] = TARGET_ELIBEXEC,
641 [EILSEQ] = TARGET_EILSEQ,
642 [ENOSYS] = TARGET_ENOSYS,
643 [ELOOP] = TARGET_ELOOP,
644 [ERESTART] = TARGET_ERESTART,
645 [ESTRPIPE] = TARGET_ESTRPIPE,
646 [ENOTEMPTY] = TARGET_ENOTEMPTY,
647 [EUSERS] = TARGET_EUSERS,
648 [ENOTSOCK] = TARGET_ENOTSOCK,
649 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
650 [EMSGSIZE] = TARGET_EMSGSIZE,
651 [EPROTOTYPE] = TARGET_EPROTOTYPE,
652 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
653 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
654 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
655 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
656 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
657 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
658 [EADDRINUSE] = TARGET_EADDRINUSE,
659 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
660 [ENETDOWN] = TARGET_ENETDOWN,
661 [ENETUNREACH] = TARGET_ENETUNREACH,
662 [ENETRESET] = TARGET_ENETRESET,
663 [ECONNABORTED] = TARGET_ECONNABORTED,
664 [ECONNRESET] = TARGET_ECONNRESET,
665 [ENOBUFS] = TARGET_ENOBUFS,
666 [EISCONN] = TARGET_EISCONN,
667 [ENOTCONN] = TARGET_ENOTCONN,
668 [EUCLEAN] = TARGET_EUCLEAN,
669 [ENOTNAM] = TARGET_ENOTNAM,
670 [ENAVAIL] = TARGET_ENAVAIL,
671 [EISNAM] = TARGET_EISNAM,
672 [EREMOTEIO] = TARGET_EREMOTEIO,
673 [ESHUTDOWN] = TARGET_ESHUTDOWN,
674 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
675 [ETIMEDOUT] = TARGET_ETIMEDOUT,
676 [ECONNREFUSED] = TARGET_ECONNREFUSED,
677 [EHOSTDOWN] = TARGET_EHOSTDOWN,
678 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
679 [EALREADY] = TARGET_EALREADY,
680 [EINPROGRESS] = TARGET_EINPROGRESS,
681 [ESTALE] = TARGET_ESTALE,
682 [ECANCELED] = TARGET_ECANCELED,
683 [ENOMEDIUM] = TARGET_ENOMEDIUM,
684 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
685 #ifdef ENOKEY
686 [ENOKEY] = TARGET_ENOKEY,
687 #endif
688 #ifdef EKEYEXPIRED
689 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
690 #endif
691 #ifdef EKEYREVOKED
692 [EKEYREVOKED] = TARGET_EKEYREVOKED,
693 #endif
694 #ifdef EKEYREJECTED
695 [EKEYREJECTED] = TARGET_EKEYREJECTED,
696 #endif
697 #ifdef EOWNERDEAD
698 [EOWNERDEAD] = TARGET_EOWNERDEAD,
699 #endif
700 #ifdef ENOTRECOVERABLE
701 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
702 #endif
705 static inline int host_to_target_errno(int err)
707 if(host_to_target_errno_table[err])
708 return host_to_target_errno_table[err];
709 return err;
712 static inline int target_to_host_errno(int err)
714 if (target_to_host_errno_table[err])
715 return target_to_host_errno_table[err];
716 return err;
719 static inline abi_long get_errno(abi_long ret)
721 if (ret == -1)
722 return -host_to_target_errno(errno);
723 else
724 return ret;
727 static inline int is_error(abi_long ret)
729 return (abi_ulong)ret >= (abi_ulong)(-4096);
732 char *target_strerror(int err)
734 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
735 return NULL;
737 return strerror(target_to_host_errno(err));
740 static abi_ulong target_brk;
741 static abi_ulong target_original_brk;
742 static abi_ulong brk_page;
744 void target_set_brk(abi_ulong new_brk)
746 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
747 brk_page = HOST_PAGE_ALIGN(target_brk);
750 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
751 #define DEBUGF_BRK(message, args...)
753 /* do_brk() must return target values and target errnos. */
754 abi_long do_brk(abi_ulong new_brk)
756 abi_long mapped_addr;
757 int new_alloc_size;
759 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
761 if (!new_brk) {
762 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
763 return target_brk;
765 if (new_brk < target_original_brk) {
766 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
767 return target_brk;
770 /* If the new brk is less than the highest page reserved to the
771 * target heap allocation, set it and we're almost done... */
772 if (new_brk <= brk_page) {
773 /* Heap contents are initialized to zero, as for anonymous
774 * mapped pages. */
775 if (new_brk > target_brk) {
776 memset(g2h(target_brk), 0, new_brk - target_brk);
778 target_brk = new_brk;
779 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
780 return target_brk;
783 /* We need to allocate more memory after the brk... Note that
784 * we don't use MAP_FIXED because that will map over the top of
785 * any existing mapping (like the one with the host libc or qemu
786 * itself); instead we treat "mapped but at wrong address" as
787 * a failure and unmap again.
789 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
790 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
791 PROT_READ|PROT_WRITE,
792 MAP_ANON|MAP_PRIVATE, 0, 0));
794 if (mapped_addr == brk_page) {
795 /* Heap contents are initialized to zero, as for anonymous
796 * mapped pages. Technically the new pages are already
797 * initialized to zero since they *are* anonymous mapped
798 * pages, however we have to take care with the contents that
799 * come from the remaining part of the previous page: it may
800 * contains garbage data due to a previous heap usage (grown
801 * then shrunken). */
802 memset(g2h(target_brk), 0, brk_page - target_brk);
804 target_brk = new_brk;
805 brk_page = HOST_PAGE_ALIGN(target_brk);
806 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
807 return target_brk;
808 } else if (mapped_addr != -1) {
809 /* Mapped but at wrong address, meaning there wasn't actually
810 * enough space for this brk.
812 target_munmap(mapped_addr, new_alloc_size);
813 mapped_addr = -1;
814 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
816 else {
817 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
820 #if defined(TARGET_ALPHA)
821 /* We (partially) emulate OSF/1 on Alpha, which requires we
822 return a proper errno, not an unchanged brk value. */
823 return -TARGET_ENOMEM;
824 #endif
825 /* For everything else, return the previous break. */
826 return target_brk;
829 static inline abi_long copy_from_user_fdset(fd_set *fds,
830 abi_ulong target_fds_addr,
831 int n)
833 int i, nw, j, k;
834 abi_ulong b, *target_fds;
836 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
837 if (!(target_fds = lock_user(VERIFY_READ,
838 target_fds_addr,
839 sizeof(abi_ulong) * nw,
840 1)))
841 return -TARGET_EFAULT;
843 FD_ZERO(fds);
844 k = 0;
845 for (i = 0; i < nw; i++) {
846 /* grab the abi_ulong */
847 __get_user(b, &target_fds[i]);
848 for (j = 0; j < TARGET_ABI_BITS; j++) {
849 /* check the bit inside the abi_ulong */
850 if ((b >> j) & 1)
851 FD_SET(k, fds);
852 k++;
856 unlock_user(target_fds, target_fds_addr, 0);
858 return 0;
861 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
862 abi_ulong target_fds_addr,
863 int n)
865 if (target_fds_addr) {
866 if (copy_from_user_fdset(fds, target_fds_addr, n))
867 return -TARGET_EFAULT;
868 *fds_ptr = fds;
869 } else {
870 *fds_ptr = NULL;
872 return 0;
875 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
876 const fd_set *fds,
877 int n)
879 int i, nw, j, k;
880 abi_long v;
881 abi_ulong *target_fds;
883 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
884 if (!(target_fds = lock_user(VERIFY_WRITE,
885 target_fds_addr,
886 sizeof(abi_ulong) * nw,
887 0)))
888 return -TARGET_EFAULT;
890 k = 0;
891 for (i = 0; i < nw; i++) {
892 v = 0;
893 for (j = 0; j < TARGET_ABI_BITS; j++) {
894 v |= ((FD_ISSET(k, fds) != 0) << j);
895 k++;
897 __put_user(v, &target_fds[i]);
900 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
902 return 0;
905 #if defined(__alpha__)
906 #define HOST_HZ 1024
907 #else
908 #define HOST_HZ 100
909 #endif
911 static inline abi_long host_to_target_clock_t(long ticks)
913 #if HOST_HZ == TARGET_HZ
914 return ticks;
915 #else
916 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
917 #endif
920 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
921 const struct rusage *rusage)
923 struct target_rusage *target_rusage;
925 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
926 return -TARGET_EFAULT;
927 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
928 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
929 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
930 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
931 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
932 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
933 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
934 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
935 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
936 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
937 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
938 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
939 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
940 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
941 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
942 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
943 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
944 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
945 unlock_user_struct(target_rusage, target_addr, 1);
947 return 0;
950 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
952 abi_ulong target_rlim_swap;
953 rlim_t result;
955 target_rlim_swap = tswapal(target_rlim);
956 if (target_rlim_swap == TARGET_RLIM_INFINITY)
957 return RLIM_INFINITY;
959 result = target_rlim_swap;
960 if (target_rlim_swap != (rlim_t)result)
961 return RLIM_INFINITY;
963 return result;
966 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
968 abi_ulong target_rlim_swap;
969 abi_ulong result;
971 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
972 target_rlim_swap = TARGET_RLIM_INFINITY;
973 else
974 target_rlim_swap = rlim;
975 result = tswapal(target_rlim_swap);
977 return result;
980 static inline int target_to_host_resource(int code)
982 switch (code) {
983 case TARGET_RLIMIT_AS:
984 return RLIMIT_AS;
985 case TARGET_RLIMIT_CORE:
986 return RLIMIT_CORE;
987 case TARGET_RLIMIT_CPU:
988 return RLIMIT_CPU;
989 case TARGET_RLIMIT_DATA:
990 return RLIMIT_DATA;
991 case TARGET_RLIMIT_FSIZE:
992 return RLIMIT_FSIZE;
993 case TARGET_RLIMIT_LOCKS:
994 return RLIMIT_LOCKS;
995 case TARGET_RLIMIT_MEMLOCK:
996 return RLIMIT_MEMLOCK;
997 case TARGET_RLIMIT_MSGQUEUE:
998 return RLIMIT_MSGQUEUE;
999 case TARGET_RLIMIT_NICE:
1000 return RLIMIT_NICE;
1001 case TARGET_RLIMIT_NOFILE:
1002 return RLIMIT_NOFILE;
1003 case TARGET_RLIMIT_NPROC:
1004 return RLIMIT_NPROC;
1005 case TARGET_RLIMIT_RSS:
1006 return RLIMIT_RSS;
1007 case TARGET_RLIMIT_RTPRIO:
1008 return RLIMIT_RTPRIO;
1009 case TARGET_RLIMIT_SIGPENDING:
1010 return RLIMIT_SIGPENDING;
1011 case TARGET_RLIMIT_STACK:
1012 return RLIMIT_STACK;
1013 default:
1014 return code;
1018 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1019 abi_ulong target_tv_addr)
1021 struct target_timeval *target_tv;
1023 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1024 return -TARGET_EFAULT;
1026 __get_user(tv->tv_sec, &target_tv->tv_sec);
1027 __get_user(tv->tv_usec, &target_tv->tv_usec);
1029 unlock_user_struct(target_tv, target_tv_addr, 0);
1031 return 0;
1034 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1035 const struct timeval *tv)
1037 struct target_timeval *target_tv;
1039 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1040 return -TARGET_EFAULT;
1042 __put_user(tv->tv_sec, &target_tv->tv_sec);
1043 __put_user(tv->tv_usec, &target_tv->tv_usec);
1045 unlock_user_struct(target_tv, target_tv_addr, 1);
1047 return 0;
1050 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1051 #include <mqueue.h>
1053 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1054 abi_ulong target_mq_attr_addr)
1056 struct target_mq_attr *target_mq_attr;
1058 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1059 target_mq_attr_addr, 1))
1060 return -TARGET_EFAULT;
1062 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1063 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1064 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1065 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1067 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1069 return 0;
1072 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1073 const struct mq_attr *attr)
1075 struct target_mq_attr *target_mq_attr;
1077 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1078 target_mq_attr_addr, 0))
1079 return -TARGET_EFAULT;
1081 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1082 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1083 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1084 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1086 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1088 return 0;
1090 #endif
1092 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1093 /* do_select() must return target values and target errnos. */
1094 static abi_long do_select(int n,
1095 abi_ulong rfd_addr, abi_ulong wfd_addr,
1096 abi_ulong efd_addr, abi_ulong target_tv_addr)
1098 fd_set rfds, wfds, efds;
1099 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1100 struct timeval tv, *tv_ptr;
1101 abi_long ret;
1103 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1104 if (ret) {
1105 return ret;
1107 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1108 if (ret) {
1109 return ret;
1111 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1112 if (ret) {
1113 return ret;
1116 if (target_tv_addr) {
1117 if (copy_from_user_timeval(&tv, target_tv_addr))
1118 return -TARGET_EFAULT;
1119 tv_ptr = &tv;
1120 } else {
1121 tv_ptr = NULL;
1124 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1126 if (!is_error(ret)) {
1127 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1128 return -TARGET_EFAULT;
1129 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1130 return -TARGET_EFAULT;
1131 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1132 return -TARGET_EFAULT;
1134 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1135 return -TARGET_EFAULT;
1138 return ret;
1140 #endif
1142 static abi_long do_pipe2(int host_pipe[], int flags)
1144 #ifdef CONFIG_PIPE2
1145 return pipe2(host_pipe, flags);
1146 #else
1147 return -ENOSYS;
1148 #endif
1151 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1152 int flags, int is_pipe2)
1154 int host_pipe[2];
1155 abi_long ret;
1156 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1158 if (is_error(ret))
1159 return get_errno(ret);
1161 /* Several targets have special calling conventions for the original
1162 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1163 if (!is_pipe2) {
1164 #if defined(TARGET_ALPHA)
1165 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1166 return host_pipe[0];
1167 #elif defined(TARGET_MIPS)
1168 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1169 return host_pipe[0];
1170 #elif defined(TARGET_SH4)
1171 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1172 return host_pipe[0];
1173 #endif
1176 if (put_user_s32(host_pipe[0], pipedes)
1177 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1178 return -TARGET_EFAULT;
1179 return get_errno(ret);
1182 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1183 abi_ulong target_addr,
1184 socklen_t len)
1186 struct target_ip_mreqn *target_smreqn;
1188 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1189 if (!target_smreqn)
1190 return -TARGET_EFAULT;
1191 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1192 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1193 if (len == sizeof(struct target_ip_mreqn))
1194 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1195 unlock_user(target_smreqn, target_addr, 0);
1197 return 0;
1200 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1201 abi_ulong target_addr,
1202 socklen_t len)
1204 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1205 sa_family_t sa_family;
1206 struct target_sockaddr *target_saddr;
1208 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1209 if (!target_saddr)
1210 return -TARGET_EFAULT;
1212 sa_family = tswap16(target_saddr->sa_family);
1214 /* Oops. The caller might send a incomplete sun_path; sun_path
1215 * must be terminated by \0 (see the manual page), but
1216 * unfortunately it is quite common to specify sockaddr_un
1217 * length as "strlen(x->sun_path)" while it should be
1218 * "strlen(...) + 1". We'll fix that here if needed.
1219 * Linux kernel has a similar feature.
1222 if (sa_family == AF_UNIX) {
1223 if (len < unix_maxlen && len > 0) {
1224 char *cp = (char*)target_saddr;
1226 if ( cp[len-1] && !cp[len] )
1227 len++;
1229 if (len > unix_maxlen)
1230 len = unix_maxlen;
1233 memcpy(addr, target_saddr, len);
1234 addr->sa_family = sa_family;
1235 unlock_user(target_saddr, target_addr, 0);
1237 return 0;
1240 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1241 struct sockaddr *addr,
1242 socklen_t len)
1244 struct target_sockaddr *target_saddr;
1246 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1247 if (!target_saddr)
1248 return -TARGET_EFAULT;
1249 memcpy(target_saddr, addr, len);
1250 target_saddr->sa_family = tswap16(addr->sa_family);
1251 unlock_user(target_saddr, target_addr, len);
1253 return 0;
1256 /* ??? Should this also swap msgh->name? */
1257 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1258 struct target_msghdr *target_msgh)
1260 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1261 abi_long msg_controllen;
1262 abi_ulong target_cmsg_addr;
1263 struct target_cmsghdr *target_cmsg;
1264 socklen_t space = 0;
1266 msg_controllen = tswapal(target_msgh->msg_controllen);
1267 if (msg_controllen < sizeof (struct target_cmsghdr))
1268 goto the_end;
1269 target_cmsg_addr = tswapal(target_msgh->msg_control);
1270 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1271 if (!target_cmsg)
1272 return -TARGET_EFAULT;
1274 while (cmsg && target_cmsg) {
1275 void *data = CMSG_DATA(cmsg);
1276 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1278 int len = tswapal(target_cmsg->cmsg_len)
1279 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1281 space += CMSG_SPACE(len);
1282 if (space > msgh->msg_controllen) {
1283 space -= CMSG_SPACE(len);
1284 gemu_log("Host cmsg overflow\n");
1285 break;
1288 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1289 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1290 cmsg->cmsg_len = CMSG_LEN(len);
1292 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1293 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1294 memcpy(data, target_data, len);
1295 } else {
1296 int *fd = (int *)data;
1297 int *target_fd = (int *)target_data;
1298 int i, numfds = len / sizeof(int);
1300 for (i = 0; i < numfds; i++)
1301 fd[i] = tswap32(target_fd[i]);
1304 cmsg = CMSG_NXTHDR(msgh, cmsg);
1305 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1307 unlock_user(target_cmsg, target_cmsg_addr, 0);
1308 the_end:
1309 msgh->msg_controllen = space;
1310 return 0;
1313 /* ??? Should this also swap msgh->name? */
1314 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1315 struct msghdr *msgh)
1317 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1318 abi_long msg_controllen;
1319 abi_ulong target_cmsg_addr;
1320 struct target_cmsghdr *target_cmsg;
1321 socklen_t space = 0;
1323 msg_controllen = tswapal(target_msgh->msg_controllen);
1324 if (msg_controllen < sizeof (struct target_cmsghdr))
1325 goto the_end;
1326 target_cmsg_addr = tswapal(target_msgh->msg_control);
1327 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1328 if (!target_cmsg)
1329 return -TARGET_EFAULT;
1331 while (cmsg && target_cmsg) {
1332 void *data = CMSG_DATA(cmsg);
1333 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1335 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1337 space += TARGET_CMSG_SPACE(len);
1338 if (space > msg_controllen) {
1339 space -= TARGET_CMSG_SPACE(len);
1340 gemu_log("Target cmsg overflow\n");
1341 break;
1344 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1345 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1346 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1348 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1349 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1350 memcpy(target_data, data, len);
1351 } else {
1352 int *fd = (int *)data;
1353 int *target_fd = (int *)target_data;
1354 int i, numfds = len / sizeof(int);
1356 for (i = 0; i < numfds; i++)
1357 target_fd[i] = tswap32(fd[i]);
1360 cmsg = CMSG_NXTHDR(msgh, cmsg);
1361 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1363 unlock_user(target_cmsg, target_cmsg_addr, space);
1364 the_end:
1365 target_msgh->msg_controllen = tswapal(space);
1366 return 0;
1369 /* do_setsockopt() Must return target values and target errnos. */
1370 static abi_long do_setsockopt(int sockfd, int level, int optname,
1371 abi_ulong optval_addr, socklen_t optlen)
1373 abi_long ret;
1374 int val;
1375 struct ip_mreqn *ip_mreq;
1376 struct ip_mreq_source *ip_mreq_source;
1378 switch(level) {
1379 case SOL_TCP:
1380 /* TCP options all take an 'int' value. */
1381 if (optlen < sizeof(uint32_t))
1382 return -TARGET_EINVAL;
1384 if (get_user_u32(val, optval_addr))
1385 return -TARGET_EFAULT;
1386 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1387 break;
1388 case SOL_IP:
1389 switch(optname) {
1390 case IP_TOS:
1391 case IP_TTL:
1392 case IP_HDRINCL:
1393 case IP_ROUTER_ALERT:
1394 case IP_RECVOPTS:
1395 case IP_RETOPTS:
1396 case IP_PKTINFO:
1397 case IP_MTU_DISCOVER:
1398 case IP_RECVERR:
1399 case IP_RECVTOS:
1400 #ifdef IP_FREEBIND
1401 case IP_FREEBIND:
1402 #endif
1403 case IP_MULTICAST_TTL:
1404 case IP_MULTICAST_LOOP:
1405 val = 0;
1406 if (optlen >= sizeof(uint32_t)) {
1407 if (get_user_u32(val, optval_addr))
1408 return -TARGET_EFAULT;
1409 } else if (optlen >= 1) {
1410 if (get_user_u8(val, optval_addr))
1411 return -TARGET_EFAULT;
1413 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1414 break;
1415 case IP_ADD_MEMBERSHIP:
1416 case IP_DROP_MEMBERSHIP:
1417 if (optlen < sizeof (struct target_ip_mreq) ||
1418 optlen > sizeof (struct target_ip_mreqn))
1419 return -TARGET_EINVAL;
1421 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1422 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1423 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1424 break;
1426 case IP_BLOCK_SOURCE:
1427 case IP_UNBLOCK_SOURCE:
1428 case IP_ADD_SOURCE_MEMBERSHIP:
1429 case IP_DROP_SOURCE_MEMBERSHIP:
1430 if (optlen != sizeof (struct target_ip_mreq_source))
1431 return -TARGET_EINVAL;
1433 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1434 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1435 unlock_user (ip_mreq_source, optval_addr, 0);
1436 break;
1438 default:
1439 goto unimplemented;
1441 break;
1442 case TARGET_SOL_SOCKET:
1443 switch (optname) {
1444 /* Options with 'int' argument. */
1445 case TARGET_SO_DEBUG:
1446 optname = SO_DEBUG;
1447 break;
1448 case TARGET_SO_REUSEADDR:
1449 optname = SO_REUSEADDR;
1450 break;
1451 case TARGET_SO_TYPE:
1452 optname = SO_TYPE;
1453 break;
1454 case TARGET_SO_ERROR:
1455 optname = SO_ERROR;
1456 break;
1457 case TARGET_SO_DONTROUTE:
1458 optname = SO_DONTROUTE;
1459 break;
1460 case TARGET_SO_BROADCAST:
1461 optname = SO_BROADCAST;
1462 break;
1463 case TARGET_SO_SNDBUF:
1464 optname = SO_SNDBUF;
1465 break;
1466 case TARGET_SO_RCVBUF:
1467 optname = SO_RCVBUF;
1468 break;
1469 case TARGET_SO_KEEPALIVE:
1470 optname = SO_KEEPALIVE;
1471 break;
1472 case TARGET_SO_OOBINLINE:
1473 optname = SO_OOBINLINE;
1474 break;
1475 case TARGET_SO_NO_CHECK:
1476 optname = SO_NO_CHECK;
1477 break;
1478 case TARGET_SO_PRIORITY:
1479 optname = SO_PRIORITY;
1480 break;
1481 #ifdef SO_BSDCOMPAT
1482 case TARGET_SO_BSDCOMPAT:
1483 optname = SO_BSDCOMPAT;
1484 break;
1485 #endif
1486 case TARGET_SO_PASSCRED:
1487 optname = SO_PASSCRED;
1488 break;
1489 case TARGET_SO_TIMESTAMP:
1490 optname = SO_TIMESTAMP;
1491 break;
1492 case TARGET_SO_RCVLOWAT:
1493 optname = SO_RCVLOWAT;
1494 break;
1495 case TARGET_SO_RCVTIMEO:
1496 optname = SO_RCVTIMEO;
1497 break;
1498 case TARGET_SO_SNDTIMEO:
1499 optname = SO_SNDTIMEO;
1500 break;
1501 break;
1502 default:
1503 goto unimplemented;
1505 if (optlen < sizeof(uint32_t))
1506 return -TARGET_EINVAL;
1508 if (get_user_u32(val, optval_addr))
1509 return -TARGET_EFAULT;
1510 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1511 break;
1512 default:
1513 unimplemented:
1514 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1515 ret = -TARGET_ENOPROTOOPT;
1517 return ret;
1520 /* do_getsockopt() Must return target values and target errnos. */
1521 static abi_long do_getsockopt(int sockfd, int level, int optname,
1522 abi_ulong optval_addr, abi_ulong optlen)
1524 abi_long ret;
1525 int len, val;
1526 socklen_t lv;
1528 switch(level) {
1529 case TARGET_SOL_SOCKET:
1530 level = SOL_SOCKET;
1531 switch (optname) {
1532 /* These don't just return a single integer */
1533 case TARGET_SO_LINGER:
1534 case TARGET_SO_RCVTIMEO:
1535 case TARGET_SO_SNDTIMEO:
1536 case TARGET_SO_PEERNAME:
1537 goto unimplemented;
1538 case TARGET_SO_PEERCRED: {
1539 struct ucred cr;
1540 socklen_t crlen;
1541 struct target_ucred *tcr;
1543 if (get_user_u32(len, optlen)) {
1544 return -TARGET_EFAULT;
1546 if (len < 0) {
1547 return -TARGET_EINVAL;
1550 crlen = sizeof(cr);
1551 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1552 &cr, &crlen));
1553 if (ret < 0) {
1554 return ret;
1556 if (len > crlen) {
1557 len = crlen;
1559 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1560 return -TARGET_EFAULT;
1562 __put_user(cr.pid, &tcr->pid);
1563 __put_user(cr.uid, &tcr->uid);
1564 __put_user(cr.gid, &tcr->gid);
1565 unlock_user_struct(tcr, optval_addr, 1);
1566 if (put_user_u32(len, optlen)) {
1567 return -TARGET_EFAULT;
1569 break;
1571 /* Options with 'int' argument. */
1572 case TARGET_SO_DEBUG:
1573 optname = SO_DEBUG;
1574 goto int_case;
1575 case TARGET_SO_REUSEADDR:
1576 optname = SO_REUSEADDR;
1577 goto int_case;
1578 case TARGET_SO_TYPE:
1579 optname = SO_TYPE;
1580 goto int_case;
1581 case TARGET_SO_ERROR:
1582 optname = SO_ERROR;
1583 goto int_case;
1584 case TARGET_SO_DONTROUTE:
1585 optname = SO_DONTROUTE;
1586 goto int_case;
1587 case TARGET_SO_BROADCAST:
1588 optname = SO_BROADCAST;
1589 goto int_case;
1590 case TARGET_SO_SNDBUF:
1591 optname = SO_SNDBUF;
1592 goto int_case;
1593 case TARGET_SO_RCVBUF:
1594 optname = SO_RCVBUF;
1595 goto int_case;
1596 case TARGET_SO_KEEPALIVE:
1597 optname = SO_KEEPALIVE;
1598 goto int_case;
1599 case TARGET_SO_OOBINLINE:
1600 optname = SO_OOBINLINE;
1601 goto int_case;
1602 case TARGET_SO_NO_CHECK:
1603 optname = SO_NO_CHECK;
1604 goto int_case;
1605 case TARGET_SO_PRIORITY:
1606 optname = SO_PRIORITY;
1607 goto int_case;
1608 #ifdef SO_BSDCOMPAT
1609 case TARGET_SO_BSDCOMPAT:
1610 optname = SO_BSDCOMPAT;
1611 goto int_case;
1612 #endif
1613 case TARGET_SO_PASSCRED:
1614 optname = SO_PASSCRED;
1615 goto int_case;
1616 case TARGET_SO_TIMESTAMP:
1617 optname = SO_TIMESTAMP;
1618 goto int_case;
1619 case TARGET_SO_RCVLOWAT:
1620 optname = SO_RCVLOWAT;
1621 goto int_case;
1622 default:
1623 goto int_case;
1625 break;
1626 case SOL_TCP:
1627 /* TCP options all take an 'int' value. */
1628 int_case:
1629 if (get_user_u32(len, optlen))
1630 return -TARGET_EFAULT;
1631 if (len < 0)
1632 return -TARGET_EINVAL;
1633 lv = sizeof(lv);
1634 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1635 if (ret < 0)
1636 return ret;
1637 if (len > lv)
1638 len = lv;
1639 if (len == 4) {
1640 if (put_user_u32(val, optval_addr))
1641 return -TARGET_EFAULT;
1642 } else {
1643 if (put_user_u8(val, optval_addr))
1644 return -TARGET_EFAULT;
1646 if (put_user_u32(len, optlen))
1647 return -TARGET_EFAULT;
1648 break;
1649 case SOL_IP:
1650 switch(optname) {
1651 case IP_TOS:
1652 case IP_TTL:
1653 case IP_HDRINCL:
1654 case IP_ROUTER_ALERT:
1655 case IP_RECVOPTS:
1656 case IP_RETOPTS:
1657 case IP_PKTINFO:
1658 case IP_MTU_DISCOVER:
1659 case IP_RECVERR:
1660 case IP_RECVTOS:
1661 #ifdef IP_FREEBIND
1662 case IP_FREEBIND:
1663 #endif
1664 case IP_MULTICAST_TTL:
1665 case IP_MULTICAST_LOOP:
1666 if (get_user_u32(len, optlen))
1667 return -TARGET_EFAULT;
1668 if (len < 0)
1669 return -TARGET_EINVAL;
1670 lv = sizeof(lv);
1671 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1672 if (ret < 0)
1673 return ret;
1674 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1675 len = 1;
1676 if (put_user_u32(len, optlen)
1677 || put_user_u8(val, optval_addr))
1678 return -TARGET_EFAULT;
1679 } else {
1680 if (len > sizeof(int))
1681 len = sizeof(int);
1682 if (put_user_u32(len, optlen)
1683 || put_user_u32(val, optval_addr))
1684 return -TARGET_EFAULT;
1686 break;
1687 default:
1688 ret = -TARGET_ENOPROTOOPT;
1689 break;
1691 break;
1692 default:
1693 unimplemented:
1694 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1695 level, optname);
1696 ret = -TARGET_EOPNOTSUPP;
1697 break;
1699 return ret;
1702 /* FIXME
1703 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1704 * other lock functions have a return code of 0 for failure.
1706 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1707 int count, int copy)
1709 struct target_iovec *target_vec;
1710 abi_ulong base;
1711 int i;
1713 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1714 if (!target_vec)
1715 return -TARGET_EFAULT;
1716 for(i = 0;i < count; i++) {
1717 base = tswapal(target_vec[i].iov_base);
1718 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1719 if (vec[i].iov_len != 0) {
1720 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1721 /* Don't check lock_user return value. We must call writev even
1722 if a element has invalid base address. */
1723 } else {
1724 /* zero length pointer is ignored */
1725 vec[i].iov_base = NULL;
1728 unlock_user (target_vec, target_addr, 0);
1729 return 0;
1732 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1733 int count, int copy)
1735 struct target_iovec *target_vec;
1736 abi_ulong base;
1737 int i;
1739 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1740 if (!target_vec)
1741 return -TARGET_EFAULT;
1742 for(i = 0;i < count; i++) {
1743 if (target_vec[i].iov_base) {
1744 base = tswapal(target_vec[i].iov_base);
1745 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1748 unlock_user (target_vec, target_addr, 0);
1750 return 0;
1753 /* do_socket() Must return target values and target errnos. */
1754 static abi_long do_socket(int domain, int type, int protocol)
1756 #if defined(TARGET_MIPS)
1757 switch(type) {
1758 case TARGET_SOCK_DGRAM:
1759 type = SOCK_DGRAM;
1760 break;
1761 case TARGET_SOCK_STREAM:
1762 type = SOCK_STREAM;
1763 break;
1764 case TARGET_SOCK_RAW:
1765 type = SOCK_RAW;
1766 break;
1767 case TARGET_SOCK_RDM:
1768 type = SOCK_RDM;
1769 break;
1770 case TARGET_SOCK_SEQPACKET:
1771 type = SOCK_SEQPACKET;
1772 break;
1773 case TARGET_SOCK_PACKET:
1774 type = SOCK_PACKET;
1775 break;
1777 #endif
1778 if (domain == PF_NETLINK)
1779 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1780 return get_errno(socket(domain, type, protocol));
1783 /* do_bind() Must return target values and target errnos. */
1784 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1785 socklen_t addrlen)
1787 void *addr;
1788 abi_long ret;
1790 if ((int)addrlen < 0) {
1791 return -TARGET_EINVAL;
1794 addr = alloca(addrlen+1);
1796 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1797 if (ret)
1798 return ret;
1800 return get_errno(bind(sockfd, addr, addrlen));
1803 /* do_connect() Must return target values and target errnos. */
1804 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1805 socklen_t addrlen)
1807 void *addr;
1808 abi_long ret;
1810 if ((int)addrlen < 0) {
1811 return -TARGET_EINVAL;
1814 addr = alloca(addrlen);
1816 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1817 if (ret)
1818 return ret;
1820 return get_errno(connect(sockfd, addr, addrlen));
1823 /* do_sendrecvmsg() Must return target values and target errnos. */
1824 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1825 int flags, int send)
1827 abi_long ret, len;
1828 struct target_msghdr *msgp;
1829 struct msghdr msg;
1830 int count;
1831 struct iovec *vec;
1832 abi_ulong target_vec;
1834 /* FIXME */
1835 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1836 msgp,
1837 target_msg,
1838 send ? 1 : 0))
1839 return -TARGET_EFAULT;
1840 if (msgp->msg_name) {
1841 msg.msg_namelen = tswap32(msgp->msg_namelen);
1842 msg.msg_name = alloca(msg.msg_namelen);
1843 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1844 msg.msg_namelen);
1845 if (ret) {
1846 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1847 return ret;
1849 } else {
1850 msg.msg_name = NULL;
1851 msg.msg_namelen = 0;
1853 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1854 msg.msg_control = alloca(msg.msg_controllen);
1855 msg.msg_flags = tswap32(msgp->msg_flags);
1857 count = tswapal(msgp->msg_iovlen);
1858 vec = alloca(count * sizeof(struct iovec));
1859 target_vec = tswapal(msgp->msg_iov);
1860 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1861 msg.msg_iovlen = count;
1862 msg.msg_iov = vec;
1864 if (send) {
1865 ret = target_to_host_cmsg(&msg, msgp);
1866 if (ret == 0)
1867 ret = get_errno(sendmsg(fd, &msg, flags));
1868 } else {
1869 ret = get_errno(recvmsg(fd, &msg, flags));
1870 if (!is_error(ret)) {
1871 len = ret;
1872 ret = host_to_target_cmsg(msgp, &msg);
1873 if (!is_error(ret))
1874 ret = len;
1877 unlock_iovec(vec, target_vec, count, !send);
1878 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1879 return ret;
1882 /* do_accept() Must return target values and target errnos. */
1883 static abi_long do_accept(int fd, abi_ulong target_addr,
1884 abi_ulong target_addrlen_addr)
1886 socklen_t addrlen;
1887 void *addr;
1888 abi_long ret;
1890 if (target_addr == 0)
1891 return get_errno(accept(fd, NULL, NULL));
1893 /* linux returns EINVAL if addrlen pointer is invalid */
1894 if (get_user_u32(addrlen, target_addrlen_addr))
1895 return -TARGET_EINVAL;
1897 if ((int)addrlen < 0) {
1898 return -TARGET_EINVAL;
1901 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1902 return -TARGET_EINVAL;
1904 addr = alloca(addrlen);
1906 ret = get_errno(accept(fd, addr, &addrlen));
1907 if (!is_error(ret)) {
1908 host_to_target_sockaddr(target_addr, addr, addrlen);
1909 if (put_user_u32(addrlen, target_addrlen_addr))
1910 ret = -TARGET_EFAULT;
1912 return ret;
1915 /* do_getpeername() Must return target values and target errnos. */
1916 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1917 abi_ulong target_addrlen_addr)
1919 socklen_t addrlen;
1920 void *addr;
1921 abi_long ret;
1923 if (get_user_u32(addrlen, target_addrlen_addr))
1924 return -TARGET_EFAULT;
1926 if ((int)addrlen < 0) {
1927 return -TARGET_EINVAL;
1930 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1931 return -TARGET_EFAULT;
1933 addr = alloca(addrlen);
1935 ret = get_errno(getpeername(fd, addr, &addrlen));
1936 if (!is_error(ret)) {
1937 host_to_target_sockaddr(target_addr, addr, addrlen);
1938 if (put_user_u32(addrlen, target_addrlen_addr))
1939 ret = -TARGET_EFAULT;
1941 return ret;
1944 /* do_getsockname() Must return target values and target errnos. */
1945 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1946 abi_ulong target_addrlen_addr)
1948 socklen_t addrlen;
1949 void *addr;
1950 abi_long ret;
1952 if (get_user_u32(addrlen, target_addrlen_addr))
1953 return -TARGET_EFAULT;
1955 if ((int)addrlen < 0) {
1956 return -TARGET_EINVAL;
1959 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1960 return -TARGET_EFAULT;
1962 addr = alloca(addrlen);
1964 ret = get_errno(getsockname(fd, addr, &addrlen));
1965 if (!is_error(ret)) {
1966 host_to_target_sockaddr(target_addr, addr, addrlen);
1967 if (put_user_u32(addrlen, target_addrlen_addr))
1968 ret = -TARGET_EFAULT;
1970 return ret;
1973 /* do_socketpair() Must return target values and target errnos. */
1974 static abi_long do_socketpair(int domain, int type, int protocol,
1975 abi_ulong target_tab_addr)
1977 int tab[2];
1978 abi_long ret;
1980 ret = get_errno(socketpair(domain, type, protocol, tab));
1981 if (!is_error(ret)) {
1982 if (put_user_s32(tab[0], target_tab_addr)
1983 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1984 ret = -TARGET_EFAULT;
1986 return ret;
1989 /* do_sendto() Must return target values and target errnos. */
1990 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1991 abi_ulong target_addr, socklen_t addrlen)
1993 void *addr;
1994 void *host_msg;
1995 abi_long ret;
1997 if ((int)addrlen < 0) {
1998 return -TARGET_EINVAL;
2001 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2002 if (!host_msg)
2003 return -TARGET_EFAULT;
2004 if (target_addr) {
2005 addr = alloca(addrlen);
2006 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2007 if (ret) {
2008 unlock_user(host_msg, msg, 0);
2009 return ret;
2011 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2012 } else {
2013 ret = get_errno(send(fd, host_msg, len, flags));
2015 unlock_user(host_msg, msg, 0);
2016 return ret;
2019 /* do_recvfrom() Must return target values and target errnos. */
2020 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2021 abi_ulong target_addr,
2022 abi_ulong target_addrlen)
2024 socklen_t addrlen;
2025 void *addr;
2026 void *host_msg;
2027 abi_long ret;
2029 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2030 if (!host_msg)
2031 return -TARGET_EFAULT;
2032 if (target_addr) {
2033 if (get_user_u32(addrlen, target_addrlen)) {
2034 ret = -TARGET_EFAULT;
2035 goto fail;
2037 if ((int)addrlen < 0) {
2038 ret = -TARGET_EINVAL;
2039 goto fail;
2041 addr = alloca(addrlen);
2042 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2043 } else {
2044 addr = NULL; /* To keep compiler quiet. */
2045 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2047 if (!is_error(ret)) {
2048 if (target_addr) {
2049 host_to_target_sockaddr(target_addr, addr, addrlen);
2050 if (put_user_u32(addrlen, target_addrlen)) {
2051 ret = -TARGET_EFAULT;
2052 goto fail;
2055 unlock_user(host_msg, msg, len);
2056 } else {
2057 fail:
2058 unlock_user(host_msg, msg, 0);
2060 return ret;
2063 #ifdef TARGET_NR_socketcall
2064 /* do_socketcall() Must return target values and target errnos. */
2065 static abi_long do_socketcall(int num, abi_ulong vptr)
2067 abi_long ret;
2068 const int n = sizeof(abi_ulong);
2070 switch(num) {
2071 case SOCKOP_socket:
2073 abi_ulong domain, type, protocol;
2075 if (get_user_ual(domain, vptr)
2076 || get_user_ual(type, vptr + n)
2077 || get_user_ual(protocol, vptr + 2 * n))
2078 return -TARGET_EFAULT;
2080 ret = do_socket(domain, type, protocol);
2082 break;
2083 case SOCKOP_bind:
2085 abi_ulong sockfd;
2086 abi_ulong target_addr;
2087 socklen_t addrlen;
2089 if (get_user_ual(sockfd, vptr)
2090 || get_user_ual(target_addr, vptr + n)
2091 || get_user_ual(addrlen, vptr + 2 * n))
2092 return -TARGET_EFAULT;
2094 ret = do_bind(sockfd, target_addr, addrlen);
2096 break;
2097 case SOCKOP_connect:
2099 abi_ulong sockfd;
2100 abi_ulong target_addr;
2101 socklen_t addrlen;
2103 if (get_user_ual(sockfd, vptr)
2104 || get_user_ual(target_addr, vptr + n)
2105 || get_user_ual(addrlen, vptr + 2 * n))
2106 return -TARGET_EFAULT;
2108 ret = do_connect(sockfd, target_addr, addrlen);
2110 break;
2111 case SOCKOP_listen:
2113 abi_ulong sockfd, backlog;
2115 if (get_user_ual(sockfd, vptr)
2116 || get_user_ual(backlog, vptr + n))
2117 return -TARGET_EFAULT;
2119 ret = get_errno(listen(sockfd, backlog));
2121 break;
2122 case SOCKOP_accept:
2124 abi_ulong sockfd;
2125 abi_ulong target_addr, target_addrlen;
2127 if (get_user_ual(sockfd, vptr)
2128 || get_user_ual(target_addr, vptr + n)
2129 || get_user_ual(target_addrlen, vptr + 2 * n))
2130 return -TARGET_EFAULT;
2132 ret = do_accept(sockfd, target_addr, target_addrlen);
2134 break;
2135 case SOCKOP_getsockname:
2137 abi_ulong sockfd;
2138 abi_ulong target_addr, target_addrlen;
2140 if (get_user_ual(sockfd, vptr)
2141 || get_user_ual(target_addr, vptr + n)
2142 || get_user_ual(target_addrlen, vptr + 2 * n))
2143 return -TARGET_EFAULT;
2145 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2147 break;
2148 case SOCKOP_getpeername:
2150 abi_ulong sockfd;
2151 abi_ulong target_addr, target_addrlen;
2153 if (get_user_ual(sockfd, vptr)
2154 || get_user_ual(target_addr, vptr + n)
2155 || get_user_ual(target_addrlen, vptr + 2 * n))
2156 return -TARGET_EFAULT;
2158 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2160 break;
2161 case SOCKOP_socketpair:
2163 abi_ulong domain, type, protocol;
2164 abi_ulong tab;
2166 if (get_user_ual(domain, vptr)
2167 || get_user_ual(type, vptr + n)
2168 || get_user_ual(protocol, vptr + 2 * n)
2169 || get_user_ual(tab, vptr + 3 * n))
2170 return -TARGET_EFAULT;
2172 ret = do_socketpair(domain, type, protocol, tab);
2174 break;
2175 case SOCKOP_send:
2177 abi_ulong sockfd;
2178 abi_ulong msg;
2179 size_t len;
2180 abi_ulong flags;
2182 if (get_user_ual(sockfd, vptr)
2183 || get_user_ual(msg, vptr + n)
2184 || get_user_ual(len, vptr + 2 * n)
2185 || get_user_ual(flags, vptr + 3 * n))
2186 return -TARGET_EFAULT;
2188 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2190 break;
2191 case SOCKOP_recv:
2193 abi_ulong sockfd;
2194 abi_ulong msg;
2195 size_t len;
2196 abi_ulong flags;
2198 if (get_user_ual(sockfd, vptr)
2199 || get_user_ual(msg, vptr + n)
2200 || get_user_ual(len, vptr + 2 * n)
2201 || get_user_ual(flags, vptr + 3 * n))
2202 return -TARGET_EFAULT;
2204 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2206 break;
2207 case SOCKOP_sendto:
2209 abi_ulong sockfd;
2210 abi_ulong msg;
2211 size_t len;
2212 abi_ulong flags;
2213 abi_ulong addr;
2214 socklen_t addrlen;
2216 if (get_user_ual(sockfd, vptr)
2217 || get_user_ual(msg, vptr + n)
2218 || get_user_ual(len, vptr + 2 * n)
2219 || get_user_ual(flags, vptr + 3 * n)
2220 || get_user_ual(addr, vptr + 4 * n)
2221 || get_user_ual(addrlen, vptr + 5 * n))
2222 return -TARGET_EFAULT;
2224 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2226 break;
2227 case SOCKOP_recvfrom:
2229 abi_ulong sockfd;
2230 abi_ulong msg;
2231 size_t len;
2232 abi_ulong flags;
2233 abi_ulong addr;
2234 socklen_t addrlen;
2236 if (get_user_ual(sockfd, vptr)
2237 || get_user_ual(msg, vptr + n)
2238 || get_user_ual(len, vptr + 2 * n)
2239 || get_user_ual(flags, vptr + 3 * n)
2240 || get_user_ual(addr, vptr + 4 * n)
2241 || get_user_ual(addrlen, vptr + 5 * n))
2242 return -TARGET_EFAULT;
2244 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2246 break;
2247 case SOCKOP_shutdown:
2249 abi_ulong sockfd, how;
2251 if (get_user_ual(sockfd, vptr)
2252 || get_user_ual(how, vptr + n))
2253 return -TARGET_EFAULT;
2255 ret = get_errno(shutdown(sockfd, how));
2257 break;
2258 case SOCKOP_sendmsg:
2259 case SOCKOP_recvmsg:
2261 abi_ulong fd;
2262 abi_ulong target_msg;
2263 abi_ulong flags;
2265 if (get_user_ual(fd, vptr)
2266 || get_user_ual(target_msg, vptr + n)
2267 || get_user_ual(flags, vptr + 2 * n))
2268 return -TARGET_EFAULT;
2270 ret = do_sendrecvmsg(fd, target_msg, flags,
2271 (num == SOCKOP_sendmsg));
2273 break;
2274 case SOCKOP_setsockopt:
2276 abi_ulong sockfd;
2277 abi_ulong level;
2278 abi_ulong optname;
2279 abi_ulong optval;
2280 socklen_t optlen;
2282 if (get_user_ual(sockfd, vptr)
2283 || get_user_ual(level, vptr + n)
2284 || get_user_ual(optname, vptr + 2 * n)
2285 || get_user_ual(optval, vptr + 3 * n)
2286 || get_user_ual(optlen, vptr + 4 * n))
2287 return -TARGET_EFAULT;
2289 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2291 break;
2292 case SOCKOP_getsockopt:
2294 abi_ulong sockfd;
2295 abi_ulong level;
2296 abi_ulong optname;
2297 abi_ulong optval;
2298 socklen_t optlen;
2300 if (get_user_ual(sockfd, vptr)
2301 || get_user_ual(level, vptr + n)
2302 || get_user_ual(optname, vptr + 2 * n)
2303 || get_user_ual(optval, vptr + 3 * n)
2304 || get_user_ual(optlen, vptr + 4 * n))
2305 return -TARGET_EFAULT;
2307 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2309 break;
2310 default:
2311 gemu_log("Unsupported socketcall: %d\n", num);
2312 ret = -TARGET_ENOSYS;
2313 break;
2315 return ret;
2317 #endif
2319 #define N_SHM_REGIONS 32
2321 static struct shm_region {
2322 abi_ulong start;
2323 abi_ulong size;
2324 } shm_regions[N_SHM_REGIONS];
2326 struct target_ipc_perm
2328 abi_long __key;
2329 abi_ulong uid;
2330 abi_ulong gid;
2331 abi_ulong cuid;
2332 abi_ulong cgid;
2333 unsigned short int mode;
2334 unsigned short int __pad1;
2335 unsigned short int __seq;
2336 unsigned short int __pad2;
2337 abi_ulong __unused1;
2338 abi_ulong __unused2;
2341 struct target_semid_ds
2343 struct target_ipc_perm sem_perm;
2344 abi_ulong sem_otime;
2345 abi_ulong __unused1;
2346 abi_ulong sem_ctime;
2347 abi_ulong __unused2;
2348 abi_ulong sem_nsems;
2349 abi_ulong __unused3;
2350 abi_ulong __unused4;
2353 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2354 abi_ulong target_addr)
2356 struct target_ipc_perm *target_ip;
2357 struct target_semid_ds *target_sd;
2359 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2360 return -TARGET_EFAULT;
2361 target_ip = &(target_sd->sem_perm);
2362 host_ip->__key = tswapal(target_ip->__key);
2363 host_ip->uid = tswapal(target_ip->uid);
2364 host_ip->gid = tswapal(target_ip->gid);
2365 host_ip->cuid = tswapal(target_ip->cuid);
2366 host_ip->cgid = tswapal(target_ip->cgid);
2367 host_ip->mode = tswap16(target_ip->mode);
2368 unlock_user_struct(target_sd, target_addr, 0);
2369 return 0;
2372 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2373 struct ipc_perm *host_ip)
2375 struct target_ipc_perm *target_ip;
2376 struct target_semid_ds *target_sd;
2378 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2379 return -TARGET_EFAULT;
2380 target_ip = &(target_sd->sem_perm);
2381 target_ip->__key = tswapal(host_ip->__key);
2382 target_ip->uid = tswapal(host_ip->uid);
2383 target_ip->gid = tswapal(host_ip->gid);
2384 target_ip->cuid = tswapal(host_ip->cuid);
2385 target_ip->cgid = tswapal(host_ip->cgid);
2386 target_ip->mode = tswap16(host_ip->mode);
2387 unlock_user_struct(target_sd, target_addr, 1);
2388 return 0;
2391 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2392 abi_ulong target_addr)
2394 struct target_semid_ds *target_sd;
2396 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2397 return -TARGET_EFAULT;
2398 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2399 return -TARGET_EFAULT;
2400 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2401 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2402 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2403 unlock_user_struct(target_sd, target_addr, 0);
2404 return 0;
2407 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2408 struct semid_ds *host_sd)
2410 struct target_semid_ds *target_sd;
2412 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2413 return -TARGET_EFAULT;
2414 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2415 return -TARGET_EFAULT;
2416 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2417 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2418 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2419 unlock_user_struct(target_sd, target_addr, 1);
2420 return 0;
2423 struct target_seminfo {
2424 int semmap;
2425 int semmni;
2426 int semmns;
2427 int semmnu;
2428 int semmsl;
2429 int semopm;
2430 int semume;
2431 int semusz;
2432 int semvmx;
2433 int semaem;
2436 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2437 struct seminfo *host_seminfo)
2439 struct target_seminfo *target_seminfo;
2440 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2441 return -TARGET_EFAULT;
2442 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2443 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2444 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2445 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2446 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2447 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2448 __put_user(host_seminfo->semume, &target_seminfo->semume);
2449 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2450 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2451 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2452 unlock_user_struct(target_seminfo, target_addr, 1);
2453 return 0;
2456 union semun {
2457 int val;
2458 struct semid_ds *buf;
2459 unsigned short *array;
2460 struct seminfo *__buf;
2463 union target_semun {
2464 int val;
2465 abi_ulong buf;
2466 abi_ulong array;
2467 abi_ulong __buf;
2470 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2471 abi_ulong target_addr)
2473 int nsems;
2474 unsigned short *array;
2475 union semun semun;
2476 struct semid_ds semid_ds;
2477 int i, ret;
2479 semun.buf = &semid_ds;
2481 ret = semctl(semid, 0, IPC_STAT, semun);
2482 if (ret == -1)
2483 return get_errno(ret);
2485 nsems = semid_ds.sem_nsems;
2487 *host_array = malloc(nsems*sizeof(unsigned short));
2488 array = lock_user(VERIFY_READ, target_addr,
2489 nsems*sizeof(unsigned short), 1);
2490 if (!array)
2491 return -TARGET_EFAULT;
2493 for(i=0; i<nsems; i++) {
2494 __get_user((*host_array)[i], &array[i]);
2496 unlock_user(array, target_addr, 0);
2498 return 0;
2501 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2502 unsigned short **host_array)
2504 int nsems;
2505 unsigned short *array;
2506 union semun semun;
2507 struct semid_ds semid_ds;
2508 int i, ret;
2510 semun.buf = &semid_ds;
2512 ret = semctl(semid, 0, IPC_STAT, semun);
2513 if (ret == -1)
2514 return get_errno(ret);
2516 nsems = semid_ds.sem_nsems;
2518 array = lock_user(VERIFY_WRITE, target_addr,
2519 nsems*sizeof(unsigned short), 0);
2520 if (!array)
2521 return -TARGET_EFAULT;
2523 for(i=0; i<nsems; i++) {
2524 __put_user((*host_array)[i], &array[i]);
2526 free(*host_array);
2527 unlock_user(array, target_addr, 1);
2529 return 0;
2532 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2533 union target_semun target_su)
2535 union semun arg;
2536 struct semid_ds dsarg;
2537 unsigned short *array = NULL;
2538 struct seminfo seminfo;
2539 abi_long ret = -TARGET_EINVAL;
2540 abi_long err;
2541 cmd &= 0xff;
2543 switch( cmd ) {
2544 case GETVAL:
2545 case SETVAL:
2546 arg.val = tswap32(target_su.val);
2547 ret = get_errno(semctl(semid, semnum, cmd, arg));
2548 target_su.val = tswap32(arg.val);
2549 break;
2550 case GETALL:
2551 case SETALL:
2552 err = target_to_host_semarray(semid, &array, target_su.array);
2553 if (err)
2554 return err;
2555 arg.array = array;
2556 ret = get_errno(semctl(semid, semnum, cmd, arg));
2557 err = host_to_target_semarray(semid, target_su.array, &array);
2558 if (err)
2559 return err;
2560 break;
2561 case IPC_STAT:
2562 case IPC_SET:
2563 case SEM_STAT:
2564 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2565 if (err)
2566 return err;
2567 arg.buf = &dsarg;
2568 ret = get_errno(semctl(semid, semnum, cmd, arg));
2569 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2570 if (err)
2571 return err;
2572 break;
2573 case IPC_INFO:
2574 case SEM_INFO:
2575 arg.__buf = &seminfo;
2576 ret = get_errno(semctl(semid, semnum, cmd, arg));
2577 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2578 if (err)
2579 return err;
2580 break;
2581 case IPC_RMID:
2582 case GETPID:
2583 case GETNCNT:
2584 case GETZCNT:
2585 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2586 break;
2589 return ret;
2592 struct target_sembuf {
2593 unsigned short sem_num;
2594 short sem_op;
2595 short sem_flg;
2598 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2599 abi_ulong target_addr,
2600 unsigned nsops)
2602 struct target_sembuf *target_sembuf;
2603 int i;
2605 target_sembuf = lock_user(VERIFY_READ, target_addr,
2606 nsops*sizeof(struct target_sembuf), 1);
2607 if (!target_sembuf)
2608 return -TARGET_EFAULT;
2610 for(i=0; i<nsops; i++) {
2611 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2612 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2613 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2616 unlock_user(target_sembuf, target_addr, 0);
2618 return 0;
2621 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2623 struct sembuf sops[nsops];
2625 if (target_to_host_sembuf(sops, ptr, nsops))
2626 return -TARGET_EFAULT;
2628 return semop(semid, sops, nsops);
2631 struct target_msqid_ds
2633 struct target_ipc_perm msg_perm;
2634 abi_ulong msg_stime;
2635 #if TARGET_ABI_BITS == 32
2636 abi_ulong __unused1;
2637 #endif
2638 abi_ulong msg_rtime;
2639 #if TARGET_ABI_BITS == 32
2640 abi_ulong __unused2;
2641 #endif
2642 abi_ulong msg_ctime;
2643 #if TARGET_ABI_BITS == 32
2644 abi_ulong __unused3;
2645 #endif
2646 abi_ulong __msg_cbytes;
2647 abi_ulong msg_qnum;
2648 abi_ulong msg_qbytes;
2649 abi_ulong msg_lspid;
2650 abi_ulong msg_lrpid;
2651 abi_ulong __unused4;
2652 abi_ulong __unused5;
2655 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2656 abi_ulong target_addr)
2658 struct target_msqid_ds *target_md;
2660 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2661 return -TARGET_EFAULT;
2662 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2663 return -TARGET_EFAULT;
2664 host_md->msg_stime = tswapal(target_md->msg_stime);
2665 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2666 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2667 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2668 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2669 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2670 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2671 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2672 unlock_user_struct(target_md, target_addr, 0);
2673 return 0;
2676 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2677 struct msqid_ds *host_md)
2679 struct target_msqid_ds *target_md;
2681 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2682 return -TARGET_EFAULT;
2683 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2684 return -TARGET_EFAULT;
2685 target_md->msg_stime = tswapal(host_md->msg_stime);
2686 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2687 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2688 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2689 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2690 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2691 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2692 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2693 unlock_user_struct(target_md, target_addr, 1);
2694 return 0;
2697 struct target_msginfo {
2698 int msgpool;
2699 int msgmap;
2700 int msgmax;
2701 int msgmnb;
2702 int msgmni;
2703 int msgssz;
2704 int msgtql;
2705 unsigned short int msgseg;
2708 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2709 struct msginfo *host_msginfo)
2711 struct target_msginfo *target_msginfo;
2712 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2713 return -TARGET_EFAULT;
2714 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2715 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2716 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2717 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2718 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2719 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2720 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2721 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2722 unlock_user_struct(target_msginfo, target_addr, 1);
2723 return 0;
2726 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2728 struct msqid_ds dsarg;
2729 struct msginfo msginfo;
2730 abi_long ret = -TARGET_EINVAL;
2732 cmd &= 0xff;
2734 switch (cmd) {
2735 case IPC_STAT:
2736 case IPC_SET:
2737 case MSG_STAT:
2738 if (target_to_host_msqid_ds(&dsarg,ptr))
2739 return -TARGET_EFAULT;
2740 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2741 if (host_to_target_msqid_ds(ptr,&dsarg))
2742 return -TARGET_EFAULT;
2743 break;
2744 case IPC_RMID:
2745 ret = get_errno(msgctl(msgid, cmd, NULL));
2746 break;
2747 case IPC_INFO:
2748 case MSG_INFO:
2749 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2750 if (host_to_target_msginfo(ptr, &msginfo))
2751 return -TARGET_EFAULT;
2752 break;
2755 return ret;
2758 struct target_msgbuf {
2759 abi_long mtype;
2760 char mtext[1];
2763 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2764 unsigned int msgsz, int msgflg)
2766 struct target_msgbuf *target_mb;
2767 struct msgbuf *host_mb;
2768 abi_long ret = 0;
2770 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2771 return -TARGET_EFAULT;
2772 host_mb = malloc(msgsz+sizeof(long));
2773 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2774 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2775 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2776 free(host_mb);
2777 unlock_user_struct(target_mb, msgp, 0);
2779 return ret;
2782 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2783 unsigned int msgsz, abi_long msgtyp,
2784 int msgflg)
2786 struct target_msgbuf *target_mb;
2787 char *target_mtext;
2788 struct msgbuf *host_mb;
2789 abi_long ret = 0;
2791 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2792 return -TARGET_EFAULT;
2794 host_mb = malloc(msgsz+sizeof(long));
2795 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2797 if (ret > 0) {
2798 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2799 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2800 if (!target_mtext) {
2801 ret = -TARGET_EFAULT;
2802 goto end;
2804 memcpy(target_mb->mtext, host_mb->mtext, ret);
2805 unlock_user(target_mtext, target_mtext_addr, ret);
2808 target_mb->mtype = tswapal(host_mb->mtype);
2809 free(host_mb);
2811 end:
2812 if (target_mb)
2813 unlock_user_struct(target_mb, msgp, 1);
2814 return ret;
2817 struct target_shmid_ds
2819 struct target_ipc_perm shm_perm;
2820 abi_ulong shm_segsz;
2821 abi_ulong shm_atime;
2822 #if TARGET_ABI_BITS == 32
2823 abi_ulong __unused1;
2824 #endif
2825 abi_ulong shm_dtime;
2826 #if TARGET_ABI_BITS == 32
2827 abi_ulong __unused2;
2828 #endif
2829 abi_ulong shm_ctime;
2830 #if TARGET_ABI_BITS == 32
2831 abi_ulong __unused3;
2832 #endif
2833 int shm_cpid;
2834 int shm_lpid;
2835 abi_ulong shm_nattch;
2836 unsigned long int __unused4;
2837 unsigned long int __unused5;
2840 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2841 abi_ulong target_addr)
2843 struct target_shmid_ds *target_sd;
2845 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2846 return -TARGET_EFAULT;
2847 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2848 return -TARGET_EFAULT;
2849 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2850 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2851 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2852 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2853 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2854 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2855 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2856 unlock_user_struct(target_sd, target_addr, 0);
2857 return 0;
2860 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2861 struct shmid_ds *host_sd)
2863 struct target_shmid_ds *target_sd;
2865 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2866 return -TARGET_EFAULT;
2867 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2868 return -TARGET_EFAULT;
2869 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2870 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2871 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2872 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2873 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2874 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2875 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2876 unlock_user_struct(target_sd, target_addr, 1);
2877 return 0;
2880 struct target_shminfo {
2881 abi_ulong shmmax;
2882 abi_ulong shmmin;
2883 abi_ulong shmmni;
2884 abi_ulong shmseg;
2885 abi_ulong shmall;
2888 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2889 struct shminfo *host_shminfo)
2891 struct target_shminfo *target_shminfo;
2892 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2893 return -TARGET_EFAULT;
2894 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2895 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2896 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2897 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2898 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2899 unlock_user_struct(target_shminfo, target_addr, 1);
2900 return 0;
2903 struct target_shm_info {
2904 int used_ids;
2905 abi_ulong shm_tot;
2906 abi_ulong shm_rss;
2907 abi_ulong shm_swp;
2908 abi_ulong swap_attempts;
2909 abi_ulong swap_successes;
2912 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2913 struct shm_info *host_shm_info)
2915 struct target_shm_info *target_shm_info;
2916 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2917 return -TARGET_EFAULT;
2918 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2919 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2920 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2921 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2922 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2923 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2924 unlock_user_struct(target_shm_info, target_addr, 1);
2925 return 0;
2928 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2930 struct shmid_ds dsarg;
2931 struct shminfo shminfo;
2932 struct shm_info shm_info;
2933 abi_long ret = -TARGET_EINVAL;
2935 cmd &= 0xff;
2937 switch(cmd) {
2938 case IPC_STAT:
2939 case IPC_SET:
2940 case SHM_STAT:
2941 if (target_to_host_shmid_ds(&dsarg, buf))
2942 return -TARGET_EFAULT;
2943 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2944 if (host_to_target_shmid_ds(buf, &dsarg))
2945 return -TARGET_EFAULT;
2946 break;
2947 case IPC_INFO:
2948 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2949 if (host_to_target_shminfo(buf, &shminfo))
2950 return -TARGET_EFAULT;
2951 break;
2952 case SHM_INFO:
2953 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2954 if (host_to_target_shm_info(buf, &shm_info))
2955 return -TARGET_EFAULT;
2956 break;
2957 case IPC_RMID:
2958 case SHM_LOCK:
2959 case SHM_UNLOCK:
2960 ret = get_errno(shmctl(shmid, cmd, NULL));
2961 break;
2964 return ret;
2967 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2969 abi_long raddr;
2970 void *host_raddr;
2971 struct shmid_ds shm_info;
2972 int i,ret;
2974 /* find out the length of the shared memory segment */
2975 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2976 if (is_error(ret)) {
2977 /* can't get length, bail out */
2978 return ret;
2981 mmap_lock();
2983 if (shmaddr)
2984 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2985 else {
2986 abi_ulong mmap_start;
2988 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2990 if (mmap_start == -1) {
2991 errno = ENOMEM;
2992 host_raddr = (void *)-1;
2993 } else
2994 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2997 if (host_raddr == (void *)-1) {
2998 mmap_unlock();
2999 return get_errno((long)host_raddr);
3001 raddr=h2g((unsigned long)host_raddr);
3003 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3004 PAGE_VALID | PAGE_READ |
3005 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3007 for (i = 0; i < N_SHM_REGIONS; i++) {
3008 if (shm_regions[i].start == 0) {
3009 shm_regions[i].start = raddr;
3010 shm_regions[i].size = shm_info.shm_segsz;
3011 break;
3015 mmap_unlock();
3016 return raddr;
3020 static inline abi_long do_shmdt(abi_ulong shmaddr)
3022 int i;
3024 for (i = 0; i < N_SHM_REGIONS; ++i) {
3025 if (shm_regions[i].start == shmaddr) {
3026 shm_regions[i].start = 0;
3027 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3028 break;
3032 return get_errno(shmdt(g2h(shmaddr)));
3035 #ifdef TARGET_NR_ipc
3036 /* ??? This only works with linear mappings. */
3037 /* do_ipc() must return target values and target errnos. */
3038 static abi_long do_ipc(unsigned int call, int first,
3039 int second, int third,
3040 abi_long ptr, abi_long fifth)
3042 int version;
3043 abi_long ret = 0;
3045 version = call >> 16;
3046 call &= 0xffff;
3048 switch (call) {
3049 case IPCOP_semop:
3050 ret = do_semop(first, ptr, second);
3051 break;
3053 case IPCOP_semget:
3054 ret = get_errno(semget(first, second, third));
3055 break;
3057 case IPCOP_semctl:
3058 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3059 break;
3061 case IPCOP_msgget:
3062 ret = get_errno(msgget(first, second));
3063 break;
3065 case IPCOP_msgsnd:
3066 ret = do_msgsnd(first, ptr, second, third);
3067 break;
3069 case IPCOP_msgctl:
3070 ret = do_msgctl(first, second, ptr);
3071 break;
3073 case IPCOP_msgrcv:
3074 switch (version) {
3075 case 0:
3077 struct target_ipc_kludge {
3078 abi_long msgp;
3079 abi_long msgtyp;
3080 } *tmp;
3082 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3083 ret = -TARGET_EFAULT;
3084 break;
3087 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3089 unlock_user_struct(tmp, ptr, 0);
3090 break;
3092 default:
3093 ret = do_msgrcv(first, ptr, second, fifth, third);
3095 break;
3097 case IPCOP_shmat:
3098 switch (version) {
3099 default:
3101 abi_ulong raddr;
3102 raddr = do_shmat(first, ptr, second);
3103 if (is_error(raddr))
3104 return get_errno(raddr);
3105 if (put_user_ual(raddr, third))
3106 return -TARGET_EFAULT;
3107 break;
3109 case 1:
3110 ret = -TARGET_EINVAL;
3111 break;
3113 break;
3114 case IPCOP_shmdt:
3115 ret = do_shmdt(ptr);
3116 break;
3118 case IPCOP_shmget:
3119 /* IPC_* flag values are the same on all linux platforms */
3120 ret = get_errno(shmget(first, second, third));
3121 break;
3123 /* IPC_* and SHM_* command values are the same on all linux platforms */
3124 case IPCOP_shmctl:
3125 ret = do_shmctl(first, second, third);
3126 break;
3127 default:
3128 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3129 ret = -TARGET_ENOSYS;
3130 break;
3132 return ret;
3134 #endif
3136 /* kernel structure types definitions */
3138 #define STRUCT(name, ...) STRUCT_ ## name,
3139 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3140 enum {
3141 #include "syscall_types.h"
3143 #undef STRUCT
3144 #undef STRUCT_SPECIAL
3146 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3147 #define STRUCT_SPECIAL(name)
3148 #include "syscall_types.h"
3149 #undef STRUCT
3150 #undef STRUCT_SPECIAL
3152 typedef struct IOCTLEntry IOCTLEntry;
3154 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3155 int fd, abi_long cmd, abi_long arg);
3157 struct IOCTLEntry {
3158 unsigned int target_cmd;
3159 unsigned int host_cmd;
3160 const char *name;
3161 int access;
3162 do_ioctl_fn *do_ioctl;
3163 const argtype arg_type[5];
3166 #define IOC_R 0x0001
3167 #define IOC_W 0x0002
3168 #define IOC_RW (IOC_R | IOC_W)
3170 #define MAX_STRUCT_SIZE 4096
3172 #ifdef CONFIG_FIEMAP
3173 /* So fiemap access checks don't overflow on 32 bit systems.
3174 * This is very slightly smaller than the limit imposed by
3175 * the underlying kernel.
3177 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3178 / sizeof(struct fiemap_extent))
3180 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3181 int fd, abi_long cmd, abi_long arg)
3183 /* The parameter for this ioctl is a struct fiemap followed
3184 * by an array of struct fiemap_extent whose size is set
3185 * in fiemap->fm_extent_count. The array is filled in by the
3186 * ioctl.
3188 int target_size_in, target_size_out;
3189 struct fiemap *fm;
3190 const argtype *arg_type = ie->arg_type;
3191 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3192 void *argptr, *p;
3193 abi_long ret;
3194 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3195 uint32_t outbufsz;
3196 int free_fm = 0;
3198 assert(arg_type[0] == TYPE_PTR);
3199 assert(ie->access == IOC_RW);
3200 arg_type++;
3201 target_size_in = thunk_type_size(arg_type, 0);
3202 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3203 if (!argptr) {
3204 return -TARGET_EFAULT;
3206 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3207 unlock_user(argptr, arg, 0);
3208 fm = (struct fiemap *)buf_temp;
3209 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3210 return -TARGET_EINVAL;
3213 outbufsz = sizeof (*fm) +
3214 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3216 if (outbufsz > MAX_STRUCT_SIZE) {
3217 /* We can't fit all the extents into the fixed size buffer.
3218 * Allocate one that is large enough and use it instead.
3220 fm = malloc(outbufsz);
3221 if (!fm) {
3222 return -TARGET_ENOMEM;
3224 memcpy(fm, buf_temp, sizeof(struct fiemap));
3225 free_fm = 1;
3227 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3228 if (!is_error(ret)) {
3229 target_size_out = target_size_in;
3230 /* An extent_count of 0 means we were only counting the extents
3231 * so there are no structs to copy
3233 if (fm->fm_extent_count != 0) {
3234 target_size_out += fm->fm_mapped_extents * extent_size;
3236 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3237 if (!argptr) {
3238 ret = -TARGET_EFAULT;
3239 } else {
3240 /* Convert the struct fiemap */
3241 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3242 if (fm->fm_extent_count != 0) {
3243 p = argptr + target_size_in;
3244 /* ...and then all the struct fiemap_extents */
3245 for (i = 0; i < fm->fm_mapped_extents; i++) {
3246 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3247 THUNK_TARGET);
3248 p += extent_size;
3251 unlock_user(argptr, arg, target_size_out);
3254 if (free_fm) {
3255 free(fm);
3257 return ret;
3259 #endif
3261 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3262 int fd, abi_long cmd, abi_long arg)
3264 const argtype *arg_type = ie->arg_type;
3265 int target_size;
3266 void *argptr;
3267 int ret;
3268 struct ifconf *host_ifconf;
3269 uint32_t outbufsz;
3270 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3271 int target_ifreq_size;
3272 int nb_ifreq;
3273 int free_buf = 0;
3274 int i;
3275 int target_ifc_len;
3276 abi_long target_ifc_buf;
3277 int host_ifc_len;
3278 char *host_ifc_buf;
3280 assert(arg_type[0] == TYPE_PTR);
3281 assert(ie->access == IOC_RW);
3283 arg_type++;
3284 target_size = thunk_type_size(arg_type, 0);
3286 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3287 if (!argptr)
3288 return -TARGET_EFAULT;
3289 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3290 unlock_user(argptr, arg, 0);
3292 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3293 target_ifc_len = host_ifconf->ifc_len;
3294 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3296 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3297 nb_ifreq = target_ifc_len / target_ifreq_size;
3298 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3300 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3301 if (outbufsz > MAX_STRUCT_SIZE) {
3302 /* We can't fit all the extents into the fixed size buffer.
3303 * Allocate one that is large enough and use it instead.
3305 host_ifconf = malloc(outbufsz);
3306 if (!host_ifconf) {
3307 return -TARGET_ENOMEM;
3309 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3310 free_buf = 1;
3312 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3314 host_ifconf->ifc_len = host_ifc_len;
3315 host_ifconf->ifc_buf = host_ifc_buf;
3317 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3318 if (!is_error(ret)) {
3319 /* convert host ifc_len to target ifc_len */
3321 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3322 target_ifc_len = nb_ifreq * target_ifreq_size;
3323 host_ifconf->ifc_len = target_ifc_len;
3325 /* restore target ifc_buf */
3327 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3329 /* copy struct ifconf to target user */
3331 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3332 if (!argptr)
3333 return -TARGET_EFAULT;
3334 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3335 unlock_user(argptr, arg, target_size);
3337 /* copy ifreq[] to target user */
3339 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3340 for (i = 0; i < nb_ifreq ; i++) {
3341 thunk_convert(argptr + i * target_ifreq_size,
3342 host_ifc_buf + i * sizeof(struct ifreq),
3343 ifreq_arg_type, THUNK_TARGET);
3345 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3348 if (free_buf) {
3349 free(host_ifconf);
3352 return ret;
3355 static IOCTLEntry ioctl_entries[] = {
3356 #define IOCTL(cmd, access, ...) \
3357 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3358 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3359 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3360 #include "ioctls.h"
3361 { 0, 0, },
3364 /* ??? Implement proper locking for ioctls. */
3365 /* do_ioctl() Must return target values and target errnos. */
3366 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3368 const IOCTLEntry *ie;
3369 const argtype *arg_type;
3370 abi_long ret;
3371 uint8_t buf_temp[MAX_STRUCT_SIZE];
3372 int target_size;
3373 void *argptr;
3375 ie = ioctl_entries;
3376 for(;;) {
3377 if (ie->target_cmd == 0) {
3378 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3379 return -TARGET_ENOSYS;
3381 if (ie->target_cmd == cmd)
3382 break;
3383 ie++;
3385 arg_type = ie->arg_type;
3386 #if defined(DEBUG)
3387 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3388 #endif
3389 if (ie->do_ioctl) {
3390 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3393 switch(arg_type[0]) {
3394 case TYPE_NULL:
3395 /* no argument */
3396 ret = get_errno(ioctl(fd, ie->host_cmd));
3397 break;
3398 case TYPE_PTRVOID:
3399 case TYPE_INT:
3400 /* int argment */
3401 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3402 break;
3403 case TYPE_PTR:
3404 arg_type++;
3405 target_size = thunk_type_size(arg_type, 0);
3406 switch(ie->access) {
3407 case IOC_R:
3408 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3409 if (!is_error(ret)) {
3410 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3411 if (!argptr)
3412 return -TARGET_EFAULT;
3413 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3414 unlock_user(argptr, arg, target_size);
3416 break;
3417 case IOC_W:
3418 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3419 if (!argptr)
3420 return -TARGET_EFAULT;
3421 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3422 unlock_user(argptr, arg, 0);
3423 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3424 break;
3425 default:
3426 case IOC_RW:
3427 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3428 if (!argptr)
3429 return -TARGET_EFAULT;
3430 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3431 unlock_user(argptr, arg, 0);
3432 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3433 if (!is_error(ret)) {
3434 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3435 if (!argptr)
3436 return -TARGET_EFAULT;
3437 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3438 unlock_user(argptr, arg, target_size);
3440 break;
3442 break;
3443 default:
3444 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3445 (long)cmd, arg_type[0]);
3446 ret = -TARGET_ENOSYS;
3447 break;
3449 return ret;
3452 static const bitmask_transtbl iflag_tbl[] = {
3453 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3454 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3455 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3456 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3457 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3458 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3459 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3460 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3461 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3462 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3463 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3464 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3465 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3466 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3467 { 0, 0, 0, 0 }
3470 static const bitmask_transtbl oflag_tbl[] = {
3471 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3472 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3473 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3474 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3475 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3476 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3477 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3478 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3479 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3480 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3481 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3482 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3483 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3484 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3485 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3486 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3487 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3488 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3489 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3490 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3491 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3492 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3493 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3494 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3495 { 0, 0, 0, 0 }
3498 static const bitmask_transtbl cflag_tbl[] = {
3499 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3500 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3501 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3502 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3503 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3504 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3505 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3506 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3507 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3508 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3509 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3510 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3511 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3512 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3513 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3514 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3515 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3516 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3517 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3518 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3519 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3520 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3521 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3522 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3523 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3524 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3525 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3526 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3527 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3528 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3529 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3530 { 0, 0, 0, 0 }
3533 static const bitmask_transtbl lflag_tbl[] = {
3534 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3535 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3536 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3537 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3538 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3539 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3540 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3541 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3542 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3543 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3544 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3545 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3546 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3547 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3548 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3549 { 0, 0, 0, 0 }
3552 static void target_to_host_termios (void *dst, const void *src)
3554 struct host_termios *host = dst;
3555 const struct target_termios *target = src;
3557 host->c_iflag =
3558 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3559 host->c_oflag =
3560 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3561 host->c_cflag =
3562 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3563 host->c_lflag =
3564 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3565 host->c_line = target->c_line;
3567 memset(host->c_cc, 0, sizeof(host->c_cc));
3568 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3569 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3570 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3571 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3572 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3573 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3574 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3575 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3576 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3577 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3578 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3579 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3580 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3581 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3582 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3583 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3584 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3587 static void host_to_target_termios (void *dst, const void *src)
3589 struct target_termios *target = dst;
3590 const struct host_termios *host = src;
3592 target->c_iflag =
3593 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3594 target->c_oflag =
3595 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3596 target->c_cflag =
3597 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3598 target->c_lflag =
3599 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3600 target->c_line = host->c_line;
3602 memset(target->c_cc, 0, sizeof(target->c_cc));
3603 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3604 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3605 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3606 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3607 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3608 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3609 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3610 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3611 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3612 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3613 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3614 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3615 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3616 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3617 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3618 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3619 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3622 static const StructEntry struct_termios_def = {
3623 .convert = { host_to_target_termios, target_to_host_termios },
3624 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3625 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3628 static bitmask_transtbl mmap_flags_tbl[] = {
3629 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3630 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3631 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3632 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3633 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3634 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3635 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3636 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3637 { 0, 0, 0, 0 }
3640 #if defined(TARGET_I386)
3642 /* NOTE: there is really one LDT for all the threads */
3643 static uint8_t *ldt_table;
3645 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3647 int size;
3648 void *p;
3650 if (!ldt_table)
3651 return 0;
3652 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3653 if (size > bytecount)
3654 size = bytecount;
3655 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3656 if (!p)
3657 return -TARGET_EFAULT;
3658 /* ??? Should this by byteswapped? */
3659 memcpy(p, ldt_table, size);
3660 unlock_user(p, ptr, size);
3661 return size;
3664 /* XXX: add locking support */
3665 static abi_long write_ldt(CPUX86State *env,
3666 abi_ulong ptr, unsigned long bytecount, int oldmode)
3668 struct target_modify_ldt_ldt_s ldt_info;
3669 struct target_modify_ldt_ldt_s *target_ldt_info;
3670 int seg_32bit, contents, read_exec_only, limit_in_pages;
3671 int seg_not_present, useable, lm;
3672 uint32_t *lp, entry_1, entry_2;
3674 if (bytecount != sizeof(ldt_info))
3675 return -TARGET_EINVAL;
3676 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3677 return -TARGET_EFAULT;
3678 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3679 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3680 ldt_info.limit = tswap32(target_ldt_info->limit);
3681 ldt_info.flags = tswap32(target_ldt_info->flags);
3682 unlock_user_struct(target_ldt_info, ptr, 0);
3684 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3685 return -TARGET_EINVAL;
3686 seg_32bit = ldt_info.flags & 1;
3687 contents = (ldt_info.flags >> 1) & 3;
3688 read_exec_only = (ldt_info.flags >> 3) & 1;
3689 limit_in_pages = (ldt_info.flags >> 4) & 1;
3690 seg_not_present = (ldt_info.flags >> 5) & 1;
3691 useable = (ldt_info.flags >> 6) & 1;
3692 #ifdef TARGET_ABI32
3693 lm = 0;
3694 #else
3695 lm = (ldt_info.flags >> 7) & 1;
3696 #endif
3697 if (contents == 3) {
3698 if (oldmode)
3699 return -TARGET_EINVAL;
3700 if (seg_not_present == 0)
3701 return -TARGET_EINVAL;
3703 /* allocate the LDT */
3704 if (!ldt_table) {
3705 env->ldt.base = target_mmap(0,
3706 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3707 PROT_READ|PROT_WRITE,
3708 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3709 if (env->ldt.base == -1)
3710 return -TARGET_ENOMEM;
3711 memset(g2h(env->ldt.base), 0,
3712 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3713 env->ldt.limit = 0xffff;
3714 ldt_table = g2h(env->ldt.base);
3717 /* NOTE: same code as Linux kernel */
3718 /* Allow LDTs to be cleared by the user. */
3719 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3720 if (oldmode ||
3721 (contents == 0 &&
3722 read_exec_only == 1 &&
3723 seg_32bit == 0 &&
3724 limit_in_pages == 0 &&
3725 seg_not_present == 1 &&
3726 useable == 0 )) {
3727 entry_1 = 0;
3728 entry_2 = 0;
3729 goto install;
3733 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3734 (ldt_info.limit & 0x0ffff);
3735 entry_2 = (ldt_info.base_addr & 0xff000000) |
3736 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3737 (ldt_info.limit & 0xf0000) |
3738 ((read_exec_only ^ 1) << 9) |
3739 (contents << 10) |
3740 ((seg_not_present ^ 1) << 15) |
3741 (seg_32bit << 22) |
3742 (limit_in_pages << 23) |
3743 (lm << 21) |
3744 0x7000;
3745 if (!oldmode)
3746 entry_2 |= (useable << 20);
3748 /* Install the new entry ... */
3749 install:
3750 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3751 lp[0] = tswap32(entry_1);
3752 lp[1] = tswap32(entry_2);
3753 return 0;
3756 /* specific and weird i386 syscalls */
3757 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3758 unsigned long bytecount)
3760 abi_long ret;
3762 switch (func) {
3763 case 0:
3764 ret = read_ldt(ptr, bytecount);
3765 break;
3766 case 1:
3767 ret = write_ldt(env, ptr, bytecount, 1);
3768 break;
3769 case 0x11:
3770 ret = write_ldt(env, ptr, bytecount, 0);
3771 break;
3772 default:
3773 ret = -TARGET_ENOSYS;
3774 break;
3776 return ret;
3779 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3780 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3782 uint64_t *gdt_table = g2h(env->gdt.base);
3783 struct target_modify_ldt_ldt_s ldt_info;
3784 struct target_modify_ldt_ldt_s *target_ldt_info;
3785 int seg_32bit, contents, read_exec_only, limit_in_pages;
3786 int seg_not_present, useable, lm;
3787 uint32_t *lp, entry_1, entry_2;
3788 int i;
3790 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3791 if (!target_ldt_info)
3792 return -TARGET_EFAULT;
3793 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3794 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3795 ldt_info.limit = tswap32(target_ldt_info->limit);
3796 ldt_info.flags = tswap32(target_ldt_info->flags);
3797 if (ldt_info.entry_number == -1) {
3798 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3799 if (gdt_table[i] == 0) {
3800 ldt_info.entry_number = i;
3801 target_ldt_info->entry_number = tswap32(i);
3802 break;
3806 unlock_user_struct(target_ldt_info, ptr, 1);
3808 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3809 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3810 return -TARGET_EINVAL;
3811 seg_32bit = ldt_info.flags & 1;
3812 contents = (ldt_info.flags >> 1) & 3;
3813 read_exec_only = (ldt_info.flags >> 3) & 1;
3814 limit_in_pages = (ldt_info.flags >> 4) & 1;
3815 seg_not_present = (ldt_info.flags >> 5) & 1;
3816 useable = (ldt_info.flags >> 6) & 1;
3817 #ifdef TARGET_ABI32
3818 lm = 0;
3819 #else
3820 lm = (ldt_info.flags >> 7) & 1;
3821 #endif
3823 if (contents == 3) {
3824 if (seg_not_present == 0)
3825 return -TARGET_EINVAL;
3828 /* NOTE: same code as Linux kernel */
3829 /* Allow LDTs to be cleared by the user. */
3830 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3831 if ((contents == 0 &&
3832 read_exec_only == 1 &&
3833 seg_32bit == 0 &&
3834 limit_in_pages == 0 &&
3835 seg_not_present == 1 &&
3836 useable == 0 )) {
3837 entry_1 = 0;
3838 entry_2 = 0;
3839 goto install;
3843 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3844 (ldt_info.limit & 0x0ffff);
3845 entry_2 = (ldt_info.base_addr & 0xff000000) |
3846 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3847 (ldt_info.limit & 0xf0000) |
3848 ((read_exec_only ^ 1) << 9) |
3849 (contents << 10) |
3850 ((seg_not_present ^ 1) << 15) |
3851 (seg_32bit << 22) |
3852 (limit_in_pages << 23) |
3853 (useable << 20) |
3854 (lm << 21) |
3855 0x7000;
3857 /* Install the new entry ... */
3858 install:
3859 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3860 lp[0] = tswap32(entry_1);
3861 lp[1] = tswap32(entry_2);
3862 return 0;
3865 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3867 struct target_modify_ldt_ldt_s *target_ldt_info;
3868 uint64_t *gdt_table = g2h(env->gdt.base);
3869 uint32_t base_addr, limit, flags;
3870 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3871 int seg_not_present, useable, lm;
3872 uint32_t *lp, entry_1, entry_2;
3874 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3875 if (!target_ldt_info)
3876 return -TARGET_EFAULT;
3877 idx = tswap32(target_ldt_info->entry_number);
3878 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3879 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3880 unlock_user_struct(target_ldt_info, ptr, 1);
3881 return -TARGET_EINVAL;
3883 lp = (uint32_t *)(gdt_table + idx);
3884 entry_1 = tswap32(lp[0]);
3885 entry_2 = tswap32(lp[1]);
3887 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3888 contents = (entry_2 >> 10) & 3;
3889 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3890 seg_32bit = (entry_2 >> 22) & 1;
3891 limit_in_pages = (entry_2 >> 23) & 1;
3892 useable = (entry_2 >> 20) & 1;
3893 #ifdef TARGET_ABI32
3894 lm = 0;
3895 #else
3896 lm = (entry_2 >> 21) & 1;
3897 #endif
3898 flags = (seg_32bit << 0) | (contents << 1) |
3899 (read_exec_only << 3) | (limit_in_pages << 4) |
3900 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3901 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3902 base_addr = (entry_1 >> 16) |
3903 (entry_2 & 0xff000000) |
3904 ((entry_2 & 0xff) << 16);
3905 target_ldt_info->base_addr = tswapal(base_addr);
3906 target_ldt_info->limit = tswap32(limit);
3907 target_ldt_info->flags = tswap32(flags);
3908 unlock_user_struct(target_ldt_info, ptr, 1);
3909 return 0;
3911 #endif /* TARGET_I386 && TARGET_ABI32 */
3913 #ifndef TARGET_ABI32
3914 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3916 abi_long ret = 0;
3917 abi_ulong val;
3918 int idx;
3920 switch(code) {
3921 case TARGET_ARCH_SET_GS:
3922 case TARGET_ARCH_SET_FS:
3923 if (code == TARGET_ARCH_SET_GS)
3924 idx = R_GS;
3925 else
3926 idx = R_FS;
3927 cpu_x86_load_seg(env, idx, 0);
3928 env->segs[idx].base = addr;
3929 break;
3930 case TARGET_ARCH_GET_GS:
3931 case TARGET_ARCH_GET_FS:
3932 if (code == TARGET_ARCH_GET_GS)
3933 idx = R_GS;
3934 else
3935 idx = R_FS;
3936 val = env->segs[idx].base;
3937 if (put_user(val, addr, abi_ulong))
3938 ret = -TARGET_EFAULT;
3939 break;
3940 default:
3941 ret = -TARGET_EINVAL;
3942 break;
3944 return ret;
3946 #endif
3948 #endif /* defined(TARGET_I386) */
3950 #define NEW_STACK_SIZE 0x40000
3952 #if defined(CONFIG_USE_NPTL)
3954 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3955 typedef struct {
3956 CPUState *env;
3957 pthread_mutex_t mutex;
3958 pthread_cond_t cond;
3959 pthread_t thread;
3960 uint32_t tid;
3961 abi_ulong child_tidptr;
3962 abi_ulong parent_tidptr;
3963 sigset_t sigmask;
3964 } new_thread_info;
3966 static void *clone_func(void *arg)
3968 new_thread_info *info = arg;
3969 CPUState *env;
3970 TaskState *ts;
3972 env = info->env;
3973 thread_env = env;
3974 ts = (TaskState *)thread_env->opaque;
3975 info->tid = gettid();
3976 env->host_tid = info->tid;
3977 task_settid(ts);
3978 if (info->child_tidptr)
3979 put_user_u32(info->tid, info->child_tidptr);
3980 if (info->parent_tidptr)
3981 put_user_u32(info->tid, info->parent_tidptr);
3982 /* Enable signals. */
3983 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3984 /* Signal to the parent that we're ready. */
3985 pthread_mutex_lock(&info->mutex);
3986 pthread_cond_broadcast(&info->cond);
3987 pthread_mutex_unlock(&info->mutex);
3988 /* Wait until the parent has finshed initializing the tls state. */
3989 pthread_mutex_lock(&clone_lock);
3990 pthread_mutex_unlock(&clone_lock);
3991 cpu_loop(env);
3992 /* never exits */
3993 return NULL;
3995 #else
3997 static int clone_func(void *arg)
3999 CPUState *env = arg;
4000 cpu_loop(env);
4001 /* never exits */
4002 return 0;
4004 #endif
4006 /* do_fork() Must return host values and target errnos (unlike most
4007 do_*() functions). */
4008 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
4009 abi_ulong parent_tidptr, target_ulong newtls,
4010 abi_ulong child_tidptr)
4012 int ret;
4013 TaskState *ts;
4014 CPUState *new_env;
4015 #if defined(CONFIG_USE_NPTL)
4016 unsigned int nptl_flags;
4017 sigset_t sigmask;
4018 #else
4019 uint8_t *new_stack;
4020 #endif
4022 /* Emulate vfork() with fork() */
4023 if (flags & CLONE_VFORK)
4024 flags &= ~(CLONE_VFORK | CLONE_VM);
4026 if (flags & CLONE_VM) {
4027 TaskState *parent_ts = (TaskState *)env->opaque;
4028 #if defined(CONFIG_USE_NPTL)
4029 new_thread_info info;
4030 pthread_attr_t attr;
4031 #endif
4032 ts = g_malloc0(sizeof(TaskState));
4033 init_task_state(ts);
4034 /* we create a new CPU instance. */
4035 new_env = cpu_copy(env);
4036 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4037 cpu_reset(new_env);
4038 #endif
4039 /* Init regs that differ from the parent. */
4040 cpu_clone_regs(new_env, newsp);
4041 new_env->opaque = ts;
4042 ts->bprm = parent_ts->bprm;
4043 ts->info = parent_ts->info;
4044 #if defined(CONFIG_USE_NPTL)
4045 nptl_flags = flags;
4046 flags &= ~CLONE_NPTL_FLAGS2;
4048 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4049 ts->child_tidptr = child_tidptr;
4052 if (nptl_flags & CLONE_SETTLS)
4053 cpu_set_tls (new_env, newtls);
4055 /* Grab a mutex so that thread setup appears atomic. */
4056 pthread_mutex_lock(&clone_lock);
4058 memset(&info, 0, sizeof(info));
4059 pthread_mutex_init(&info.mutex, NULL);
4060 pthread_mutex_lock(&info.mutex);
4061 pthread_cond_init(&info.cond, NULL);
4062 info.env = new_env;
4063 if (nptl_flags & CLONE_CHILD_SETTID)
4064 info.child_tidptr = child_tidptr;
4065 if (nptl_flags & CLONE_PARENT_SETTID)
4066 info.parent_tidptr = parent_tidptr;
4068 ret = pthread_attr_init(&attr);
4069 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4070 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4071 /* It is not safe to deliver signals until the child has finished
4072 initializing, so temporarily block all signals. */
4073 sigfillset(&sigmask);
4074 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4076 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4077 /* TODO: Free new CPU state if thread creation failed. */
4079 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4080 pthread_attr_destroy(&attr);
4081 if (ret == 0) {
4082 /* Wait for the child to initialize. */
4083 pthread_cond_wait(&info.cond, &info.mutex);
4084 ret = info.tid;
4085 if (flags & CLONE_PARENT_SETTID)
4086 put_user_u32(ret, parent_tidptr);
4087 } else {
4088 ret = -1;
4090 pthread_mutex_unlock(&info.mutex);
4091 pthread_cond_destroy(&info.cond);
4092 pthread_mutex_destroy(&info.mutex);
4093 pthread_mutex_unlock(&clone_lock);
4094 #else
4095 if (flags & CLONE_NPTL_FLAGS2)
4096 return -EINVAL;
4097 /* This is probably going to die very quickly, but do it anyway. */
4098 new_stack = g_malloc0 (NEW_STACK_SIZE);
4099 #ifdef __ia64__
4100 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4101 #else
4102 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4103 #endif
4104 #endif
4105 } else {
4106 /* if no CLONE_VM, we consider it is a fork */
4107 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4108 return -EINVAL;
4109 fork_start();
4110 ret = fork();
4111 if (ret == 0) {
4112 /* Child Process. */
4113 cpu_clone_regs(env, newsp);
4114 fork_end(1);
4115 #if defined(CONFIG_USE_NPTL)
4116 /* There is a race condition here. The parent process could
4117 theoretically read the TID in the child process before the child
4118 tid is set. This would require using either ptrace
4119 (not implemented) or having *_tidptr to point at a shared memory
4120 mapping. We can't repeat the spinlock hack used above because
4121 the child process gets its own copy of the lock. */
4122 if (flags & CLONE_CHILD_SETTID)
4123 put_user_u32(gettid(), child_tidptr);
4124 if (flags & CLONE_PARENT_SETTID)
4125 put_user_u32(gettid(), parent_tidptr);
4126 ts = (TaskState *)env->opaque;
4127 if (flags & CLONE_SETTLS)
4128 cpu_set_tls (env, newtls);
4129 if (flags & CLONE_CHILD_CLEARTID)
4130 ts->child_tidptr = child_tidptr;
4131 #endif
4132 } else {
4133 fork_end(0);
4136 return ret;
4139 /* warning : doesn't handle linux specific flags... */
4140 static int target_to_host_fcntl_cmd(int cmd)
4142 switch(cmd) {
4143 case TARGET_F_DUPFD:
4144 case TARGET_F_GETFD:
4145 case TARGET_F_SETFD:
4146 case TARGET_F_GETFL:
4147 case TARGET_F_SETFL:
4148 return cmd;
4149 case TARGET_F_GETLK:
4150 return F_GETLK;
4151 case TARGET_F_SETLK:
4152 return F_SETLK;
4153 case TARGET_F_SETLKW:
4154 return F_SETLKW;
4155 case TARGET_F_GETOWN:
4156 return F_GETOWN;
4157 case TARGET_F_SETOWN:
4158 return F_SETOWN;
4159 case TARGET_F_GETSIG:
4160 return F_GETSIG;
4161 case TARGET_F_SETSIG:
4162 return F_SETSIG;
4163 #if TARGET_ABI_BITS == 32
4164 case TARGET_F_GETLK64:
4165 return F_GETLK64;
4166 case TARGET_F_SETLK64:
4167 return F_SETLK64;
4168 case TARGET_F_SETLKW64:
4169 return F_SETLKW64;
4170 #endif
4171 case TARGET_F_SETLEASE:
4172 return F_SETLEASE;
4173 case TARGET_F_GETLEASE:
4174 return F_GETLEASE;
4175 #ifdef F_DUPFD_CLOEXEC
4176 case TARGET_F_DUPFD_CLOEXEC:
4177 return F_DUPFD_CLOEXEC;
4178 #endif
4179 case TARGET_F_NOTIFY:
4180 return F_NOTIFY;
4181 default:
4182 return -TARGET_EINVAL;
4184 return -TARGET_EINVAL;
4187 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4189 struct flock fl;
4190 struct target_flock *target_fl;
4191 struct flock64 fl64;
4192 struct target_flock64 *target_fl64;
4193 abi_long ret;
4194 int host_cmd = target_to_host_fcntl_cmd(cmd);
4196 if (host_cmd == -TARGET_EINVAL)
4197 return host_cmd;
4199 switch(cmd) {
4200 case TARGET_F_GETLK:
4201 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4202 return -TARGET_EFAULT;
4203 fl.l_type = tswap16(target_fl->l_type);
4204 fl.l_whence = tswap16(target_fl->l_whence);
4205 fl.l_start = tswapal(target_fl->l_start);
4206 fl.l_len = tswapal(target_fl->l_len);
4207 fl.l_pid = tswap32(target_fl->l_pid);
4208 unlock_user_struct(target_fl, arg, 0);
4209 ret = get_errno(fcntl(fd, host_cmd, &fl));
4210 if (ret == 0) {
4211 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4212 return -TARGET_EFAULT;
4213 target_fl->l_type = tswap16(fl.l_type);
4214 target_fl->l_whence = tswap16(fl.l_whence);
4215 target_fl->l_start = tswapal(fl.l_start);
4216 target_fl->l_len = tswapal(fl.l_len);
4217 target_fl->l_pid = tswap32(fl.l_pid);
4218 unlock_user_struct(target_fl, arg, 1);
4220 break;
4222 case TARGET_F_SETLK:
4223 case TARGET_F_SETLKW:
4224 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4225 return -TARGET_EFAULT;
4226 fl.l_type = tswap16(target_fl->l_type);
4227 fl.l_whence = tswap16(target_fl->l_whence);
4228 fl.l_start = tswapal(target_fl->l_start);
4229 fl.l_len = tswapal(target_fl->l_len);
4230 fl.l_pid = tswap32(target_fl->l_pid);
4231 unlock_user_struct(target_fl, arg, 0);
4232 ret = get_errno(fcntl(fd, host_cmd, &fl));
4233 break;
4235 case TARGET_F_GETLK64:
4236 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4237 return -TARGET_EFAULT;
4238 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4239 fl64.l_whence = tswap16(target_fl64->l_whence);
4240 fl64.l_start = tswap64(target_fl64->l_start);
4241 fl64.l_len = tswap64(target_fl64->l_len);
4242 fl64.l_pid = tswap32(target_fl64->l_pid);
4243 unlock_user_struct(target_fl64, arg, 0);
4244 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4245 if (ret == 0) {
4246 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4247 return -TARGET_EFAULT;
4248 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4249 target_fl64->l_whence = tswap16(fl64.l_whence);
4250 target_fl64->l_start = tswap64(fl64.l_start);
4251 target_fl64->l_len = tswap64(fl64.l_len);
4252 target_fl64->l_pid = tswap32(fl64.l_pid);
4253 unlock_user_struct(target_fl64, arg, 1);
4255 break;
4256 case TARGET_F_SETLK64:
4257 case TARGET_F_SETLKW64:
4258 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4259 return -TARGET_EFAULT;
4260 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4261 fl64.l_whence = tswap16(target_fl64->l_whence);
4262 fl64.l_start = tswap64(target_fl64->l_start);
4263 fl64.l_len = tswap64(target_fl64->l_len);
4264 fl64.l_pid = tswap32(target_fl64->l_pid);
4265 unlock_user_struct(target_fl64, arg, 0);
4266 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4267 break;
4269 case TARGET_F_GETFL:
4270 ret = get_errno(fcntl(fd, host_cmd, arg));
4271 if (ret >= 0) {
4272 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4274 break;
4276 case TARGET_F_SETFL:
4277 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4278 break;
4280 case TARGET_F_SETOWN:
4281 case TARGET_F_GETOWN:
4282 case TARGET_F_SETSIG:
4283 case TARGET_F_GETSIG:
4284 case TARGET_F_SETLEASE:
4285 case TARGET_F_GETLEASE:
4286 ret = get_errno(fcntl(fd, host_cmd, arg));
4287 break;
4289 default:
4290 ret = get_errno(fcntl(fd, cmd, arg));
4291 break;
4293 return ret;
4296 #ifdef USE_UID16
4298 static inline int high2lowuid(int uid)
4300 if (uid > 65535)
4301 return 65534;
4302 else
4303 return uid;
4306 static inline int high2lowgid(int gid)
4308 if (gid > 65535)
4309 return 65534;
4310 else
4311 return gid;
4314 static inline int low2highuid(int uid)
4316 if ((int16_t)uid == -1)
4317 return -1;
4318 else
4319 return uid;
4322 static inline int low2highgid(int gid)
4324 if ((int16_t)gid == -1)
4325 return -1;
4326 else
4327 return gid;
4329 static inline int tswapid(int id)
4331 return tswap16(id);
4333 #else /* !USE_UID16 */
4334 static inline int high2lowuid(int uid)
4336 return uid;
4338 static inline int high2lowgid(int gid)
4340 return gid;
4342 static inline int low2highuid(int uid)
4344 return uid;
4346 static inline int low2highgid(int gid)
4348 return gid;
4350 static inline int tswapid(int id)
4352 return tswap32(id);
4354 #endif /* USE_UID16 */
4356 void syscall_init(void)
4358 IOCTLEntry *ie;
4359 const argtype *arg_type;
4360 int size;
4361 int i;
4363 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4364 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4365 #include "syscall_types.h"
4366 #undef STRUCT
4367 #undef STRUCT_SPECIAL
4369 /* we patch the ioctl size if necessary. We rely on the fact that
4370 no ioctl has all the bits at '1' in the size field */
4371 ie = ioctl_entries;
4372 while (ie->target_cmd != 0) {
4373 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4374 TARGET_IOC_SIZEMASK) {
4375 arg_type = ie->arg_type;
4376 if (arg_type[0] != TYPE_PTR) {
4377 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4378 ie->target_cmd);
4379 exit(1);
4381 arg_type++;
4382 size = thunk_type_size(arg_type, 0);
4383 ie->target_cmd = (ie->target_cmd &
4384 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4385 (size << TARGET_IOC_SIZESHIFT);
4388 /* Build target_to_host_errno_table[] table from
4389 * host_to_target_errno_table[]. */
4390 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4391 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4393 /* automatic consistency check if same arch */
4394 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4395 (defined(__x86_64__) && defined(TARGET_X86_64))
4396 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4397 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4398 ie->name, ie->target_cmd, ie->host_cmd);
4400 #endif
4401 ie++;
4405 #if TARGET_ABI_BITS == 32
4406 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4408 #ifdef TARGET_WORDS_BIGENDIAN
4409 return ((uint64_t)word0 << 32) | word1;
4410 #else
4411 return ((uint64_t)word1 << 32) | word0;
4412 #endif
4414 #else /* TARGET_ABI_BITS == 32 */
4415 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4417 return word0;
4419 #endif /* TARGET_ABI_BITS != 32 */
4421 #ifdef TARGET_NR_truncate64
4422 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4423 abi_long arg2,
4424 abi_long arg3,
4425 abi_long arg4)
4427 if (regpairs_aligned(cpu_env)) {
4428 arg2 = arg3;
4429 arg3 = arg4;
4431 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4433 #endif
4435 #ifdef TARGET_NR_ftruncate64
4436 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4437 abi_long arg2,
4438 abi_long arg3,
4439 abi_long arg4)
4441 if (regpairs_aligned(cpu_env)) {
4442 arg2 = arg3;
4443 arg3 = arg4;
4445 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4447 #endif
4449 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4450 abi_ulong target_addr)
4452 struct target_timespec *target_ts;
4454 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4455 return -TARGET_EFAULT;
4456 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4457 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4458 unlock_user_struct(target_ts, target_addr, 0);
4459 return 0;
4462 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4463 struct timespec *host_ts)
4465 struct target_timespec *target_ts;
4467 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4468 return -TARGET_EFAULT;
4469 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4470 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4471 unlock_user_struct(target_ts, target_addr, 1);
4472 return 0;
4475 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4476 static inline abi_long host_to_target_stat64(void *cpu_env,
4477 abi_ulong target_addr,
4478 struct stat *host_st)
4480 #ifdef TARGET_ARM
4481 if (((CPUARMState *)cpu_env)->eabi) {
4482 struct target_eabi_stat64 *target_st;
4484 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4485 return -TARGET_EFAULT;
4486 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4487 __put_user(host_st->st_dev, &target_st->st_dev);
4488 __put_user(host_st->st_ino, &target_st->st_ino);
4489 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4490 __put_user(host_st->st_ino, &target_st->__st_ino);
4491 #endif
4492 __put_user(host_st->st_mode, &target_st->st_mode);
4493 __put_user(host_st->st_nlink, &target_st->st_nlink);
4494 __put_user(host_st->st_uid, &target_st->st_uid);
4495 __put_user(host_st->st_gid, &target_st->st_gid);
4496 __put_user(host_st->st_rdev, &target_st->st_rdev);
4497 __put_user(host_st->st_size, &target_st->st_size);
4498 __put_user(host_st->st_blksize, &target_st->st_blksize);
4499 __put_user(host_st->st_blocks, &target_st->st_blocks);
4500 __put_user(host_st->st_atime, &target_st->target_st_atime);
4501 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4502 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4503 unlock_user_struct(target_st, target_addr, 1);
4504 } else
4505 #endif
4507 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4508 struct target_stat *target_st;
4509 #else
4510 struct target_stat64 *target_st;
4511 #endif
4513 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4514 return -TARGET_EFAULT;
4515 memset(target_st, 0, sizeof(*target_st));
4516 __put_user(host_st->st_dev, &target_st->st_dev);
4517 __put_user(host_st->st_ino, &target_st->st_ino);
4518 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4519 __put_user(host_st->st_ino, &target_st->__st_ino);
4520 #endif
4521 __put_user(host_st->st_mode, &target_st->st_mode);
4522 __put_user(host_st->st_nlink, &target_st->st_nlink);
4523 __put_user(host_st->st_uid, &target_st->st_uid);
4524 __put_user(host_st->st_gid, &target_st->st_gid);
4525 __put_user(host_st->st_rdev, &target_st->st_rdev);
4526 /* XXX: better use of kernel struct */
4527 __put_user(host_st->st_size, &target_st->st_size);
4528 __put_user(host_st->st_blksize, &target_st->st_blksize);
4529 __put_user(host_st->st_blocks, &target_st->st_blocks);
4530 __put_user(host_st->st_atime, &target_st->target_st_atime);
4531 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4532 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4533 unlock_user_struct(target_st, target_addr, 1);
4536 return 0;
4538 #endif
4540 #if defined(CONFIG_USE_NPTL)
4541 /* ??? Using host futex calls even when target atomic operations
4542 are not really atomic probably breaks things. However implementing
4543 futexes locally would make futexes shared between multiple processes
4544 tricky. However they're probably useless because guest atomic
4545 operations won't work either. */
4546 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4547 target_ulong uaddr2, int val3)
4549 struct timespec ts, *pts;
4550 int base_op;
4552 /* ??? We assume FUTEX_* constants are the same on both host
4553 and target. */
4554 #ifdef FUTEX_CMD_MASK
4555 base_op = op & FUTEX_CMD_MASK;
4556 #else
4557 base_op = op;
4558 #endif
4559 switch (base_op) {
4560 case FUTEX_WAIT:
4561 if (timeout) {
4562 pts = &ts;
4563 target_to_host_timespec(pts, timeout);
4564 } else {
4565 pts = NULL;
4567 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4568 pts, NULL, 0));
4569 case FUTEX_WAKE:
4570 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4571 case FUTEX_FD:
4572 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4573 case FUTEX_REQUEUE:
4574 case FUTEX_CMP_REQUEUE:
4575 case FUTEX_WAKE_OP:
4576 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4577 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4578 But the prototype takes a `struct timespec *'; insert casts
4579 to satisfy the compiler. We do not need to tswap TIMEOUT
4580 since it's not compared to guest memory. */
4581 pts = (struct timespec *)(uintptr_t) timeout;
4582 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4583 g2h(uaddr2),
4584 (base_op == FUTEX_CMP_REQUEUE
4585 ? tswap32(val3)
4586 : val3)));
4587 default:
4588 return -TARGET_ENOSYS;
4591 #endif
4593 /* Map host to target signal numbers for the wait family of syscalls.
4594 Assume all other status bits are the same. */
4595 static int host_to_target_waitstatus(int status)
4597 if (WIFSIGNALED(status)) {
4598 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4600 if (WIFSTOPPED(status)) {
4601 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4602 | (status & 0xff);
4604 return status;
4607 int get_osversion(void)
4609 static int osversion;
4610 struct new_utsname buf;
4611 const char *s;
4612 int i, n, tmp;
4613 if (osversion)
4614 return osversion;
4615 if (qemu_uname_release && *qemu_uname_release) {
4616 s = qemu_uname_release;
4617 } else {
4618 if (sys_uname(&buf))
4619 return 0;
4620 s = buf.release;
4622 tmp = 0;
4623 for (i = 0; i < 3; i++) {
4624 n = 0;
4625 while (*s >= '0' && *s <= '9') {
4626 n *= 10;
4627 n += *s - '0';
4628 s++;
4630 tmp = (tmp << 8) + n;
4631 if (*s == '.')
4632 s++;
4634 osversion = tmp;
4635 return osversion;
4639 static int open_self_maps(void *cpu_env, int fd)
4641 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4643 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4644 (unsigned long long)ts->info->stack_limit,
4645 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4646 & TARGET_PAGE_MASK,
4647 (unsigned long long)ts->stack_base);
4649 return 0;
4652 static int open_self_stat(void *cpu_env, int fd)
4654 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4655 abi_ulong start_stack = ts->info->start_stack;
4656 int i;
4658 for (i = 0; i < 44; i++) {
4659 char buf[128];
4660 int len;
4661 uint64_t val = 0;
4663 if (i == 27) {
4664 /* stack bottom */
4665 val = start_stack;
4667 snprintf(buf, sizeof(buf), "%"PRId64 "%c", val, i == 43 ? '\n' : ' ');
4668 len = strlen(buf);
4669 if (write(fd, buf, len) != len) {
4670 return -1;
4674 return 0;
4677 static int open_self_auxv(void *cpu_env, int fd)
4679 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4680 abi_ulong auxv = ts->info->saved_auxv;
4681 abi_ulong len = ts->info->auxv_len;
4682 char *ptr;
4685 * Auxiliary vector is stored in target process stack.
4686 * read in whole auxv vector and copy it to file
4688 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4689 if (ptr != NULL) {
4690 while (len > 0) {
4691 ssize_t r;
4692 r = write(fd, ptr, len);
4693 if (r <= 0) {
4694 break;
4696 len -= r;
4697 ptr += r;
4699 lseek(fd, 0, SEEK_SET);
4700 unlock_user(ptr, auxv, len);
4703 return 0;
4706 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
4708 struct fake_open {
4709 const char *filename;
4710 int (*fill)(void *cpu_env, int fd);
4712 const struct fake_open *fake_open;
4713 static const struct fake_open fakes[] = {
4714 { "/proc/self/maps", open_self_maps },
4715 { "/proc/self/stat", open_self_stat },
4716 { "/proc/self/auxv", open_self_auxv },
4717 { NULL, NULL }
4720 for (fake_open = fakes; fake_open->filename; fake_open++) {
4721 if (!strncmp(pathname, fake_open->filename,
4722 strlen(fake_open->filename))) {
4723 break;
4727 if (fake_open->filename) {
4728 const char *tmpdir;
4729 char filename[PATH_MAX];
4730 int fd, r;
4732 /* create temporary file to map stat to */
4733 tmpdir = getenv("TMPDIR");
4734 if (!tmpdir)
4735 tmpdir = "/tmp";
4736 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
4737 fd = mkstemp(filename);
4738 if (fd < 0) {
4739 return fd;
4741 unlink(filename);
4743 if ((r = fake_open->fill(cpu_env, fd))) {
4744 close(fd);
4745 return r;
4747 lseek(fd, 0, SEEK_SET);
4749 return fd;
4752 return get_errno(open(path(pathname), flags, mode));
4755 /* do_syscall() should always have a single exit point at the end so
4756 that actions, such as logging of syscall results, can be performed.
4757 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4758 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4759 abi_long arg2, abi_long arg3, abi_long arg4,
4760 abi_long arg5, abi_long arg6, abi_long arg7,
4761 abi_long arg8)
4763 abi_long ret;
4764 struct stat st;
4765 struct statfs stfs;
4766 void *p;
4768 #ifdef DEBUG
4769 gemu_log("syscall %d", num);
4770 #endif
4771 if(do_strace)
4772 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4774 switch(num) {
4775 case TARGET_NR_exit:
4776 #ifdef CONFIG_USE_NPTL
4777 /* In old applications this may be used to implement _exit(2).
4778 However in threaded applictions it is used for thread termination,
4779 and _exit_group is used for application termination.
4780 Do thread termination if we have more then one thread. */
4781 /* FIXME: This probably breaks if a signal arrives. We should probably
4782 be disabling signals. */
4783 if (first_cpu->next_cpu) {
4784 TaskState *ts;
4785 CPUState **lastp;
4786 CPUState *p;
4788 cpu_list_lock();
4789 lastp = &first_cpu;
4790 p = first_cpu;
4791 while (p && p != (CPUState *)cpu_env) {
4792 lastp = &p->next_cpu;
4793 p = p->next_cpu;
4795 /* If we didn't find the CPU for this thread then something is
4796 horribly wrong. */
4797 if (!p)
4798 abort();
4799 /* Remove the CPU from the list. */
4800 *lastp = p->next_cpu;
4801 cpu_list_unlock();
4802 ts = ((CPUState *)cpu_env)->opaque;
4803 if (ts->child_tidptr) {
4804 put_user_u32(0, ts->child_tidptr);
4805 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4806 NULL, NULL, 0);
4808 thread_env = NULL;
4809 g_free(cpu_env);
4810 g_free(ts);
4811 pthread_exit(NULL);
4813 #endif
4814 #ifdef TARGET_GPROF
4815 _mcleanup();
4816 #endif
4817 gdb_exit(cpu_env, arg1);
4818 _exit(arg1);
4819 ret = 0; /* avoid warning */
4820 break;
4821 case TARGET_NR_read:
4822 if (arg3 == 0)
4823 ret = 0;
4824 else {
4825 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4826 goto efault;
4827 ret = get_errno(read(arg1, p, arg3));
4828 unlock_user(p, arg2, ret);
4830 break;
4831 case TARGET_NR_write:
4832 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4833 goto efault;
4834 ret = get_errno(write(arg1, p, arg3));
4835 unlock_user(p, arg2, 0);
4836 break;
4837 case TARGET_NR_open:
4838 if (!(p = lock_user_string(arg1)))
4839 goto efault;
4840 ret = get_errno(do_open(cpu_env, p,
4841 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4842 arg3));
4843 unlock_user(p, arg1, 0);
4844 break;
4845 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4846 case TARGET_NR_openat:
4847 if (!(p = lock_user_string(arg2)))
4848 goto efault;
4849 ret = get_errno(sys_openat(arg1,
4850 path(p),
4851 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4852 arg4));
4853 unlock_user(p, arg2, 0);
4854 break;
4855 #endif
4856 case TARGET_NR_close:
4857 ret = get_errno(close(arg1));
4858 break;
4859 case TARGET_NR_brk:
4860 ret = do_brk(arg1);
4861 break;
4862 case TARGET_NR_fork:
4863 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4864 break;
4865 #ifdef TARGET_NR_waitpid
4866 case TARGET_NR_waitpid:
4868 int status;
4869 ret = get_errno(waitpid(arg1, &status, arg3));
4870 if (!is_error(ret) && arg2 && ret
4871 && put_user_s32(host_to_target_waitstatus(status), arg2))
4872 goto efault;
4874 break;
4875 #endif
4876 #ifdef TARGET_NR_waitid
4877 case TARGET_NR_waitid:
4879 siginfo_t info;
4880 info.si_pid = 0;
4881 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4882 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4883 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4884 goto efault;
4885 host_to_target_siginfo(p, &info);
4886 unlock_user(p, arg3, sizeof(target_siginfo_t));
4889 break;
4890 #endif
4891 #ifdef TARGET_NR_creat /* not on alpha */
4892 case TARGET_NR_creat:
4893 if (!(p = lock_user_string(arg1)))
4894 goto efault;
4895 ret = get_errno(creat(p, arg2));
4896 unlock_user(p, arg1, 0);
4897 break;
4898 #endif
4899 case TARGET_NR_link:
4901 void * p2;
4902 p = lock_user_string(arg1);
4903 p2 = lock_user_string(arg2);
4904 if (!p || !p2)
4905 ret = -TARGET_EFAULT;
4906 else
4907 ret = get_errno(link(p, p2));
4908 unlock_user(p2, arg2, 0);
4909 unlock_user(p, arg1, 0);
4911 break;
4912 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4913 case TARGET_NR_linkat:
4915 void * p2 = NULL;
4916 if (!arg2 || !arg4)
4917 goto efault;
4918 p = lock_user_string(arg2);
4919 p2 = lock_user_string(arg4);
4920 if (!p || !p2)
4921 ret = -TARGET_EFAULT;
4922 else
4923 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4924 unlock_user(p, arg2, 0);
4925 unlock_user(p2, arg4, 0);
4927 break;
4928 #endif
4929 case TARGET_NR_unlink:
4930 if (!(p = lock_user_string(arg1)))
4931 goto efault;
4932 ret = get_errno(unlink(p));
4933 unlock_user(p, arg1, 0);
4934 break;
4935 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4936 case TARGET_NR_unlinkat:
4937 if (!(p = lock_user_string(arg2)))
4938 goto efault;
4939 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4940 unlock_user(p, arg2, 0);
4941 break;
4942 #endif
4943 case TARGET_NR_execve:
4945 char **argp, **envp;
4946 int argc, envc;
4947 abi_ulong gp;
4948 abi_ulong guest_argp;
4949 abi_ulong guest_envp;
4950 abi_ulong addr;
4951 char **q;
4953 argc = 0;
4954 guest_argp = arg2;
4955 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4956 if (get_user_ual(addr, gp))
4957 goto efault;
4958 if (!addr)
4959 break;
4960 argc++;
4962 envc = 0;
4963 guest_envp = arg3;
4964 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4965 if (get_user_ual(addr, gp))
4966 goto efault;
4967 if (!addr)
4968 break;
4969 envc++;
4972 argp = alloca((argc + 1) * sizeof(void *));
4973 envp = alloca((envc + 1) * sizeof(void *));
4975 for (gp = guest_argp, q = argp; gp;
4976 gp += sizeof(abi_ulong), q++) {
4977 if (get_user_ual(addr, gp))
4978 goto execve_efault;
4979 if (!addr)
4980 break;
4981 if (!(*q = lock_user_string(addr)))
4982 goto execve_efault;
4984 *q = NULL;
4986 for (gp = guest_envp, q = envp; gp;
4987 gp += sizeof(abi_ulong), q++) {
4988 if (get_user_ual(addr, gp))
4989 goto execve_efault;
4990 if (!addr)
4991 break;
4992 if (!(*q = lock_user_string(addr)))
4993 goto execve_efault;
4995 *q = NULL;
4997 if (!(p = lock_user_string(arg1)))
4998 goto execve_efault;
4999 ret = get_errno(execve(p, argp, envp));
5000 unlock_user(p, arg1, 0);
5002 goto execve_end;
5004 execve_efault:
5005 ret = -TARGET_EFAULT;
5007 execve_end:
5008 for (gp = guest_argp, q = argp; *q;
5009 gp += sizeof(abi_ulong), q++) {
5010 if (get_user_ual(addr, gp)
5011 || !addr)
5012 break;
5013 unlock_user(*q, addr, 0);
5015 for (gp = guest_envp, q = envp; *q;
5016 gp += sizeof(abi_ulong), q++) {
5017 if (get_user_ual(addr, gp)
5018 || !addr)
5019 break;
5020 unlock_user(*q, addr, 0);
5023 break;
5024 case TARGET_NR_chdir:
5025 if (!(p = lock_user_string(arg1)))
5026 goto efault;
5027 ret = get_errno(chdir(p));
5028 unlock_user(p, arg1, 0);
5029 break;
5030 #ifdef TARGET_NR_time
5031 case TARGET_NR_time:
5033 time_t host_time;
5034 ret = get_errno(time(&host_time));
5035 if (!is_error(ret)
5036 && arg1
5037 && put_user_sal(host_time, arg1))
5038 goto efault;
5040 break;
5041 #endif
5042 case TARGET_NR_mknod:
5043 if (!(p = lock_user_string(arg1)))
5044 goto efault;
5045 ret = get_errno(mknod(p, arg2, arg3));
5046 unlock_user(p, arg1, 0);
5047 break;
5048 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5049 case TARGET_NR_mknodat:
5050 if (!(p = lock_user_string(arg2)))
5051 goto efault;
5052 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5053 unlock_user(p, arg2, 0);
5054 break;
5055 #endif
5056 case TARGET_NR_chmod:
5057 if (!(p = lock_user_string(arg1)))
5058 goto efault;
5059 ret = get_errno(chmod(p, arg2));
5060 unlock_user(p, arg1, 0);
5061 break;
5062 #ifdef TARGET_NR_break
5063 case TARGET_NR_break:
5064 goto unimplemented;
5065 #endif
5066 #ifdef TARGET_NR_oldstat
5067 case TARGET_NR_oldstat:
5068 goto unimplemented;
5069 #endif
5070 case TARGET_NR_lseek:
5071 ret = get_errno(lseek(arg1, arg2, arg3));
5072 break;
5073 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5074 /* Alpha specific */
5075 case TARGET_NR_getxpid:
5076 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5077 ret = get_errno(getpid());
5078 break;
5079 #endif
5080 #ifdef TARGET_NR_getpid
5081 case TARGET_NR_getpid:
5082 ret = get_errno(getpid());
5083 break;
5084 #endif
5085 case TARGET_NR_mount:
5087 /* need to look at the data field */
5088 void *p2, *p3;
5089 p = lock_user_string(arg1);
5090 p2 = lock_user_string(arg2);
5091 p3 = lock_user_string(arg3);
5092 if (!p || !p2 || !p3)
5093 ret = -TARGET_EFAULT;
5094 else {
5095 /* FIXME - arg5 should be locked, but it isn't clear how to
5096 * do that since it's not guaranteed to be a NULL-terminated
5097 * string.
5099 if ( ! arg5 )
5100 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5101 else
5102 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5104 unlock_user(p, arg1, 0);
5105 unlock_user(p2, arg2, 0);
5106 unlock_user(p3, arg3, 0);
5107 break;
5109 #ifdef TARGET_NR_umount
5110 case TARGET_NR_umount:
5111 if (!(p = lock_user_string(arg1)))
5112 goto efault;
5113 ret = get_errno(umount(p));
5114 unlock_user(p, arg1, 0);
5115 break;
5116 #endif
5117 #ifdef TARGET_NR_stime /* not on alpha */
5118 case TARGET_NR_stime:
5120 time_t host_time;
5121 if (get_user_sal(host_time, arg1))
5122 goto efault;
5123 ret = get_errno(stime(&host_time));
5125 break;
5126 #endif
5127 case TARGET_NR_ptrace:
5128 goto unimplemented;
5129 #ifdef TARGET_NR_alarm /* not on alpha */
5130 case TARGET_NR_alarm:
5131 ret = alarm(arg1);
5132 break;
5133 #endif
5134 #ifdef TARGET_NR_oldfstat
5135 case TARGET_NR_oldfstat:
5136 goto unimplemented;
5137 #endif
5138 #ifdef TARGET_NR_pause /* not on alpha */
5139 case TARGET_NR_pause:
5140 ret = get_errno(pause());
5141 break;
5142 #endif
5143 #ifdef TARGET_NR_utime
5144 case TARGET_NR_utime:
5146 struct utimbuf tbuf, *host_tbuf;
5147 struct target_utimbuf *target_tbuf;
5148 if (arg2) {
5149 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5150 goto efault;
5151 tbuf.actime = tswapal(target_tbuf->actime);
5152 tbuf.modtime = tswapal(target_tbuf->modtime);
5153 unlock_user_struct(target_tbuf, arg2, 0);
5154 host_tbuf = &tbuf;
5155 } else {
5156 host_tbuf = NULL;
5158 if (!(p = lock_user_string(arg1)))
5159 goto efault;
5160 ret = get_errno(utime(p, host_tbuf));
5161 unlock_user(p, arg1, 0);
5163 break;
5164 #endif
5165 case TARGET_NR_utimes:
5167 struct timeval *tvp, tv[2];
5168 if (arg2) {
5169 if (copy_from_user_timeval(&tv[0], arg2)
5170 || copy_from_user_timeval(&tv[1],
5171 arg2 + sizeof(struct target_timeval)))
5172 goto efault;
5173 tvp = tv;
5174 } else {
5175 tvp = NULL;
5177 if (!(p = lock_user_string(arg1)))
5178 goto efault;
5179 ret = get_errno(utimes(p, tvp));
5180 unlock_user(p, arg1, 0);
5182 break;
5183 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5184 case TARGET_NR_futimesat:
5186 struct timeval *tvp, tv[2];
5187 if (arg3) {
5188 if (copy_from_user_timeval(&tv[0], arg3)
5189 || copy_from_user_timeval(&tv[1],
5190 arg3 + sizeof(struct target_timeval)))
5191 goto efault;
5192 tvp = tv;
5193 } else {
5194 tvp = NULL;
5196 if (!(p = lock_user_string(arg2)))
5197 goto efault;
5198 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5199 unlock_user(p, arg2, 0);
5201 break;
5202 #endif
5203 #ifdef TARGET_NR_stty
5204 case TARGET_NR_stty:
5205 goto unimplemented;
5206 #endif
5207 #ifdef TARGET_NR_gtty
5208 case TARGET_NR_gtty:
5209 goto unimplemented;
5210 #endif
5211 case TARGET_NR_access:
5212 if (!(p = lock_user_string(arg1)))
5213 goto efault;
5214 ret = get_errno(access(path(p), arg2));
5215 unlock_user(p, arg1, 0);
5216 break;
5217 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5218 case TARGET_NR_faccessat:
5219 if (!(p = lock_user_string(arg2)))
5220 goto efault;
5221 ret = get_errno(sys_faccessat(arg1, p, arg3));
5222 unlock_user(p, arg2, 0);
5223 break;
5224 #endif
5225 #ifdef TARGET_NR_nice /* not on alpha */
5226 case TARGET_NR_nice:
5227 ret = get_errno(nice(arg1));
5228 break;
5229 #endif
5230 #ifdef TARGET_NR_ftime
5231 case TARGET_NR_ftime:
5232 goto unimplemented;
5233 #endif
5234 case TARGET_NR_sync:
5235 sync();
5236 ret = 0;
5237 break;
5238 case TARGET_NR_kill:
5239 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5240 break;
5241 case TARGET_NR_rename:
5243 void *p2;
5244 p = lock_user_string(arg1);
5245 p2 = lock_user_string(arg2);
5246 if (!p || !p2)
5247 ret = -TARGET_EFAULT;
5248 else
5249 ret = get_errno(rename(p, p2));
5250 unlock_user(p2, arg2, 0);
5251 unlock_user(p, arg1, 0);
5253 break;
5254 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5255 case TARGET_NR_renameat:
5257 void *p2;
5258 p = lock_user_string(arg2);
5259 p2 = lock_user_string(arg4);
5260 if (!p || !p2)
5261 ret = -TARGET_EFAULT;
5262 else
5263 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5264 unlock_user(p2, arg4, 0);
5265 unlock_user(p, arg2, 0);
5267 break;
5268 #endif
5269 case TARGET_NR_mkdir:
5270 if (!(p = lock_user_string(arg1)))
5271 goto efault;
5272 ret = get_errno(mkdir(p, arg2));
5273 unlock_user(p, arg1, 0);
5274 break;
5275 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5276 case TARGET_NR_mkdirat:
5277 if (!(p = lock_user_string(arg2)))
5278 goto efault;
5279 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5280 unlock_user(p, arg2, 0);
5281 break;
5282 #endif
5283 case TARGET_NR_rmdir:
5284 if (!(p = lock_user_string(arg1)))
5285 goto efault;
5286 ret = get_errno(rmdir(p));
5287 unlock_user(p, arg1, 0);
5288 break;
5289 case TARGET_NR_dup:
5290 ret = get_errno(dup(arg1));
5291 break;
5292 case TARGET_NR_pipe:
5293 ret = do_pipe(cpu_env, arg1, 0, 0);
5294 break;
5295 #ifdef TARGET_NR_pipe2
5296 case TARGET_NR_pipe2:
5297 ret = do_pipe(cpu_env, arg1, arg2, 1);
5298 break;
5299 #endif
5300 case TARGET_NR_times:
5302 struct target_tms *tmsp;
5303 struct tms tms;
5304 ret = get_errno(times(&tms));
5305 if (arg1) {
5306 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5307 if (!tmsp)
5308 goto efault;
5309 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5310 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5311 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5312 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5314 if (!is_error(ret))
5315 ret = host_to_target_clock_t(ret);
5317 break;
5318 #ifdef TARGET_NR_prof
5319 case TARGET_NR_prof:
5320 goto unimplemented;
5321 #endif
5322 #ifdef TARGET_NR_signal
5323 case TARGET_NR_signal:
5324 goto unimplemented;
5325 #endif
5326 case TARGET_NR_acct:
5327 if (arg1 == 0) {
5328 ret = get_errno(acct(NULL));
5329 } else {
5330 if (!(p = lock_user_string(arg1)))
5331 goto efault;
5332 ret = get_errno(acct(path(p)));
5333 unlock_user(p, arg1, 0);
5335 break;
5336 #ifdef TARGET_NR_umount2 /* not on alpha */
5337 case TARGET_NR_umount2:
5338 if (!(p = lock_user_string(arg1)))
5339 goto efault;
5340 ret = get_errno(umount2(p, arg2));
5341 unlock_user(p, arg1, 0);
5342 break;
5343 #endif
5344 #ifdef TARGET_NR_lock
5345 case TARGET_NR_lock:
5346 goto unimplemented;
5347 #endif
5348 case TARGET_NR_ioctl:
5349 ret = do_ioctl(arg1, arg2, arg3);
5350 break;
5351 case TARGET_NR_fcntl:
5352 ret = do_fcntl(arg1, arg2, arg3);
5353 break;
5354 #ifdef TARGET_NR_mpx
5355 case TARGET_NR_mpx:
5356 goto unimplemented;
5357 #endif
5358 case TARGET_NR_setpgid:
5359 ret = get_errno(setpgid(arg1, arg2));
5360 break;
5361 #ifdef TARGET_NR_ulimit
5362 case TARGET_NR_ulimit:
5363 goto unimplemented;
5364 #endif
5365 #ifdef TARGET_NR_oldolduname
5366 case TARGET_NR_oldolduname:
5367 goto unimplemented;
5368 #endif
5369 case TARGET_NR_umask:
5370 ret = get_errno(umask(arg1));
5371 break;
5372 case TARGET_NR_chroot:
5373 if (!(p = lock_user_string(arg1)))
5374 goto efault;
5375 ret = get_errno(chroot(p));
5376 unlock_user(p, arg1, 0);
5377 break;
5378 case TARGET_NR_ustat:
5379 goto unimplemented;
5380 case TARGET_NR_dup2:
5381 ret = get_errno(dup2(arg1, arg2));
5382 break;
5383 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5384 case TARGET_NR_dup3:
5385 ret = get_errno(dup3(arg1, arg2, arg3));
5386 break;
5387 #endif
5388 #ifdef TARGET_NR_getppid /* not on alpha */
5389 case TARGET_NR_getppid:
5390 ret = get_errno(getppid());
5391 break;
5392 #endif
5393 case TARGET_NR_getpgrp:
5394 ret = get_errno(getpgrp());
5395 break;
5396 case TARGET_NR_setsid:
5397 ret = get_errno(setsid());
5398 break;
5399 #ifdef TARGET_NR_sigaction
5400 case TARGET_NR_sigaction:
5402 #if defined(TARGET_ALPHA)
5403 struct target_sigaction act, oact, *pact = 0;
5404 struct target_old_sigaction *old_act;
5405 if (arg2) {
5406 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5407 goto efault;
5408 act._sa_handler = old_act->_sa_handler;
5409 target_siginitset(&act.sa_mask, old_act->sa_mask);
5410 act.sa_flags = old_act->sa_flags;
5411 act.sa_restorer = 0;
5412 unlock_user_struct(old_act, arg2, 0);
5413 pact = &act;
5415 ret = get_errno(do_sigaction(arg1, pact, &oact));
5416 if (!is_error(ret) && arg3) {
5417 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5418 goto efault;
5419 old_act->_sa_handler = oact._sa_handler;
5420 old_act->sa_mask = oact.sa_mask.sig[0];
5421 old_act->sa_flags = oact.sa_flags;
5422 unlock_user_struct(old_act, arg3, 1);
5424 #elif defined(TARGET_MIPS)
5425 struct target_sigaction act, oact, *pact, *old_act;
5427 if (arg2) {
5428 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5429 goto efault;
5430 act._sa_handler = old_act->_sa_handler;
5431 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5432 act.sa_flags = old_act->sa_flags;
5433 unlock_user_struct(old_act, arg2, 0);
5434 pact = &act;
5435 } else {
5436 pact = NULL;
5439 ret = get_errno(do_sigaction(arg1, pact, &oact));
5441 if (!is_error(ret) && arg3) {
5442 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5443 goto efault;
5444 old_act->_sa_handler = oact._sa_handler;
5445 old_act->sa_flags = oact.sa_flags;
5446 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5447 old_act->sa_mask.sig[1] = 0;
5448 old_act->sa_mask.sig[2] = 0;
5449 old_act->sa_mask.sig[3] = 0;
5450 unlock_user_struct(old_act, arg3, 1);
5452 #else
5453 struct target_old_sigaction *old_act;
5454 struct target_sigaction act, oact, *pact;
5455 if (arg2) {
5456 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5457 goto efault;
5458 act._sa_handler = old_act->_sa_handler;
5459 target_siginitset(&act.sa_mask, old_act->sa_mask);
5460 act.sa_flags = old_act->sa_flags;
5461 act.sa_restorer = old_act->sa_restorer;
5462 unlock_user_struct(old_act, arg2, 0);
5463 pact = &act;
5464 } else {
5465 pact = NULL;
5467 ret = get_errno(do_sigaction(arg1, pact, &oact));
5468 if (!is_error(ret) && arg3) {
5469 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5470 goto efault;
5471 old_act->_sa_handler = oact._sa_handler;
5472 old_act->sa_mask = oact.sa_mask.sig[0];
5473 old_act->sa_flags = oact.sa_flags;
5474 old_act->sa_restorer = oact.sa_restorer;
5475 unlock_user_struct(old_act, arg3, 1);
5477 #endif
5479 break;
5480 #endif
5481 case TARGET_NR_rt_sigaction:
5483 #if defined(TARGET_ALPHA)
5484 struct target_sigaction act, oact, *pact = 0;
5485 struct target_rt_sigaction *rt_act;
5486 /* ??? arg4 == sizeof(sigset_t). */
5487 if (arg2) {
5488 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5489 goto efault;
5490 act._sa_handler = rt_act->_sa_handler;
5491 act.sa_mask = rt_act->sa_mask;
5492 act.sa_flags = rt_act->sa_flags;
5493 act.sa_restorer = arg5;
5494 unlock_user_struct(rt_act, arg2, 0);
5495 pact = &act;
5497 ret = get_errno(do_sigaction(arg1, pact, &oact));
5498 if (!is_error(ret) && arg3) {
5499 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5500 goto efault;
5501 rt_act->_sa_handler = oact._sa_handler;
5502 rt_act->sa_mask = oact.sa_mask;
5503 rt_act->sa_flags = oact.sa_flags;
5504 unlock_user_struct(rt_act, arg3, 1);
5506 #else
5507 struct target_sigaction *act;
5508 struct target_sigaction *oact;
5510 if (arg2) {
5511 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5512 goto efault;
5513 } else
5514 act = NULL;
5515 if (arg3) {
5516 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5517 ret = -TARGET_EFAULT;
5518 goto rt_sigaction_fail;
5520 } else
5521 oact = NULL;
5522 ret = get_errno(do_sigaction(arg1, act, oact));
5523 rt_sigaction_fail:
5524 if (act)
5525 unlock_user_struct(act, arg2, 0);
5526 if (oact)
5527 unlock_user_struct(oact, arg3, 1);
5528 #endif
5530 break;
5531 #ifdef TARGET_NR_sgetmask /* not on alpha */
5532 case TARGET_NR_sgetmask:
5534 sigset_t cur_set;
5535 abi_ulong target_set;
5536 sigprocmask(0, NULL, &cur_set);
5537 host_to_target_old_sigset(&target_set, &cur_set);
5538 ret = target_set;
5540 break;
5541 #endif
5542 #ifdef TARGET_NR_ssetmask /* not on alpha */
5543 case TARGET_NR_ssetmask:
5545 sigset_t set, oset, cur_set;
5546 abi_ulong target_set = arg1;
5547 sigprocmask(0, NULL, &cur_set);
5548 target_to_host_old_sigset(&set, &target_set);
5549 sigorset(&set, &set, &cur_set);
5550 sigprocmask(SIG_SETMASK, &set, &oset);
5551 host_to_target_old_sigset(&target_set, &oset);
5552 ret = target_set;
5554 break;
5555 #endif
5556 #ifdef TARGET_NR_sigprocmask
5557 case TARGET_NR_sigprocmask:
5559 #if defined(TARGET_ALPHA)
5560 sigset_t set, oldset;
5561 abi_ulong mask;
5562 int how;
5564 switch (arg1) {
5565 case TARGET_SIG_BLOCK:
5566 how = SIG_BLOCK;
5567 break;
5568 case TARGET_SIG_UNBLOCK:
5569 how = SIG_UNBLOCK;
5570 break;
5571 case TARGET_SIG_SETMASK:
5572 how = SIG_SETMASK;
5573 break;
5574 default:
5575 ret = -TARGET_EINVAL;
5576 goto fail;
5578 mask = arg2;
5579 target_to_host_old_sigset(&set, &mask);
5581 ret = get_errno(sigprocmask(how, &set, &oldset));
5583 if (!is_error(ret)) {
5584 host_to_target_old_sigset(&mask, &oldset);
5585 ret = mask;
5586 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5588 #else
5589 sigset_t set, oldset, *set_ptr;
5590 int how;
5592 if (arg2) {
5593 switch (arg1) {
5594 case TARGET_SIG_BLOCK:
5595 how = SIG_BLOCK;
5596 break;
5597 case TARGET_SIG_UNBLOCK:
5598 how = SIG_UNBLOCK;
5599 break;
5600 case TARGET_SIG_SETMASK:
5601 how = SIG_SETMASK;
5602 break;
5603 default:
5604 ret = -TARGET_EINVAL;
5605 goto fail;
5607 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5608 goto efault;
5609 target_to_host_old_sigset(&set, p);
5610 unlock_user(p, arg2, 0);
5611 set_ptr = &set;
5612 } else {
5613 how = 0;
5614 set_ptr = NULL;
5616 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5617 if (!is_error(ret) && arg3) {
5618 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5619 goto efault;
5620 host_to_target_old_sigset(p, &oldset);
5621 unlock_user(p, arg3, sizeof(target_sigset_t));
5623 #endif
5625 break;
5626 #endif
5627 case TARGET_NR_rt_sigprocmask:
5629 int how = arg1;
5630 sigset_t set, oldset, *set_ptr;
5632 if (arg2) {
5633 switch(how) {
5634 case TARGET_SIG_BLOCK:
5635 how = SIG_BLOCK;
5636 break;
5637 case TARGET_SIG_UNBLOCK:
5638 how = SIG_UNBLOCK;
5639 break;
5640 case TARGET_SIG_SETMASK:
5641 how = SIG_SETMASK;
5642 break;
5643 default:
5644 ret = -TARGET_EINVAL;
5645 goto fail;
5647 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5648 goto efault;
5649 target_to_host_sigset(&set, p);
5650 unlock_user(p, arg2, 0);
5651 set_ptr = &set;
5652 } else {
5653 how = 0;
5654 set_ptr = NULL;
5656 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5657 if (!is_error(ret) && arg3) {
5658 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5659 goto efault;
5660 host_to_target_sigset(p, &oldset);
5661 unlock_user(p, arg3, sizeof(target_sigset_t));
5664 break;
5665 #ifdef TARGET_NR_sigpending
5666 case TARGET_NR_sigpending:
5668 sigset_t set;
5669 ret = get_errno(sigpending(&set));
5670 if (!is_error(ret)) {
5671 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5672 goto efault;
5673 host_to_target_old_sigset(p, &set);
5674 unlock_user(p, arg1, sizeof(target_sigset_t));
5677 break;
5678 #endif
5679 case TARGET_NR_rt_sigpending:
5681 sigset_t set;
5682 ret = get_errno(sigpending(&set));
5683 if (!is_error(ret)) {
5684 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5685 goto efault;
5686 host_to_target_sigset(p, &set);
5687 unlock_user(p, arg1, sizeof(target_sigset_t));
5690 break;
5691 #ifdef TARGET_NR_sigsuspend
5692 case TARGET_NR_sigsuspend:
5694 sigset_t set;
5695 #if defined(TARGET_ALPHA)
5696 abi_ulong mask = arg1;
5697 target_to_host_old_sigset(&set, &mask);
5698 #else
5699 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5700 goto efault;
5701 target_to_host_old_sigset(&set, p);
5702 unlock_user(p, arg1, 0);
5703 #endif
5704 ret = get_errno(sigsuspend(&set));
5706 break;
5707 #endif
5708 case TARGET_NR_rt_sigsuspend:
5710 sigset_t set;
5711 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5712 goto efault;
5713 target_to_host_sigset(&set, p);
5714 unlock_user(p, arg1, 0);
5715 ret = get_errno(sigsuspend(&set));
5717 break;
5718 case TARGET_NR_rt_sigtimedwait:
5720 sigset_t set;
5721 struct timespec uts, *puts;
5722 siginfo_t uinfo;
5724 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5725 goto efault;
5726 target_to_host_sigset(&set, p);
5727 unlock_user(p, arg1, 0);
5728 if (arg3) {
5729 puts = &uts;
5730 target_to_host_timespec(puts, arg3);
5731 } else {
5732 puts = NULL;
5734 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5735 if (!is_error(ret) && arg2) {
5736 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5737 goto efault;
5738 host_to_target_siginfo(p, &uinfo);
5739 unlock_user(p, arg2, sizeof(target_siginfo_t));
5742 break;
5743 case TARGET_NR_rt_sigqueueinfo:
5745 siginfo_t uinfo;
5746 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5747 goto efault;
5748 target_to_host_siginfo(&uinfo, p);
5749 unlock_user(p, arg1, 0);
5750 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5752 break;
5753 #ifdef TARGET_NR_sigreturn
5754 case TARGET_NR_sigreturn:
5755 /* NOTE: ret is eax, so not transcoding must be done */
5756 ret = do_sigreturn(cpu_env);
5757 break;
5758 #endif
5759 case TARGET_NR_rt_sigreturn:
5760 /* NOTE: ret is eax, so not transcoding must be done */
5761 ret = do_rt_sigreturn(cpu_env);
5762 break;
5763 case TARGET_NR_sethostname:
5764 if (!(p = lock_user_string(arg1)))
5765 goto efault;
5766 ret = get_errno(sethostname(p, arg2));
5767 unlock_user(p, arg1, 0);
5768 break;
5769 case TARGET_NR_setrlimit:
5771 int resource = target_to_host_resource(arg1);
5772 struct target_rlimit *target_rlim;
5773 struct rlimit rlim;
5774 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5775 goto efault;
5776 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5777 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5778 unlock_user_struct(target_rlim, arg2, 0);
5779 ret = get_errno(setrlimit(resource, &rlim));
5781 break;
5782 case TARGET_NR_getrlimit:
5784 int resource = target_to_host_resource(arg1);
5785 struct target_rlimit *target_rlim;
5786 struct rlimit rlim;
5788 ret = get_errno(getrlimit(resource, &rlim));
5789 if (!is_error(ret)) {
5790 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5791 goto efault;
5792 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5793 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5794 unlock_user_struct(target_rlim, arg2, 1);
5797 break;
5798 case TARGET_NR_getrusage:
5800 struct rusage rusage;
5801 ret = get_errno(getrusage(arg1, &rusage));
5802 if (!is_error(ret)) {
5803 host_to_target_rusage(arg2, &rusage);
5806 break;
5807 case TARGET_NR_gettimeofday:
5809 struct timeval tv;
5810 ret = get_errno(gettimeofday(&tv, NULL));
5811 if (!is_error(ret)) {
5812 if (copy_to_user_timeval(arg1, &tv))
5813 goto efault;
5816 break;
5817 case TARGET_NR_settimeofday:
5819 struct timeval tv;
5820 if (copy_from_user_timeval(&tv, arg1))
5821 goto efault;
5822 ret = get_errno(settimeofday(&tv, NULL));
5824 break;
5825 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5826 case TARGET_NR_select:
5828 struct target_sel_arg_struct *sel;
5829 abi_ulong inp, outp, exp, tvp;
5830 long nsel;
5832 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5833 goto efault;
5834 nsel = tswapal(sel->n);
5835 inp = tswapal(sel->inp);
5836 outp = tswapal(sel->outp);
5837 exp = tswapal(sel->exp);
5838 tvp = tswapal(sel->tvp);
5839 unlock_user_struct(sel, arg1, 0);
5840 ret = do_select(nsel, inp, outp, exp, tvp);
5842 break;
5843 #endif
5844 #ifdef TARGET_NR_pselect6
5845 case TARGET_NR_pselect6:
5847 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5848 fd_set rfds, wfds, efds;
5849 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5850 struct timespec ts, *ts_ptr;
5853 * The 6th arg is actually two args smashed together,
5854 * so we cannot use the C library.
5856 sigset_t set;
5857 struct {
5858 sigset_t *set;
5859 size_t size;
5860 } sig, *sig_ptr;
5862 abi_ulong arg_sigset, arg_sigsize, *arg7;
5863 target_sigset_t *target_sigset;
5865 n = arg1;
5866 rfd_addr = arg2;
5867 wfd_addr = arg3;
5868 efd_addr = arg4;
5869 ts_addr = arg5;
5871 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5872 if (ret) {
5873 goto fail;
5875 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5876 if (ret) {
5877 goto fail;
5879 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5880 if (ret) {
5881 goto fail;
5885 * This takes a timespec, and not a timeval, so we cannot
5886 * use the do_select() helper ...
5888 if (ts_addr) {
5889 if (target_to_host_timespec(&ts, ts_addr)) {
5890 goto efault;
5892 ts_ptr = &ts;
5893 } else {
5894 ts_ptr = NULL;
5897 /* Extract the two packed args for the sigset */
5898 if (arg6) {
5899 sig_ptr = &sig;
5900 sig.size = _NSIG / 8;
5902 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5903 if (!arg7) {
5904 goto efault;
5906 arg_sigset = tswapal(arg7[0]);
5907 arg_sigsize = tswapal(arg7[1]);
5908 unlock_user(arg7, arg6, 0);
5910 if (arg_sigset) {
5911 sig.set = &set;
5912 if (arg_sigsize != sizeof(*target_sigset)) {
5913 /* Like the kernel, we enforce correct size sigsets */
5914 ret = -TARGET_EINVAL;
5915 goto fail;
5917 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5918 sizeof(*target_sigset), 1);
5919 if (!target_sigset) {
5920 goto efault;
5922 target_to_host_sigset(&set, target_sigset);
5923 unlock_user(target_sigset, arg_sigset, 0);
5924 } else {
5925 sig.set = NULL;
5927 } else {
5928 sig_ptr = NULL;
5931 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5932 ts_ptr, sig_ptr));
5934 if (!is_error(ret)) {
5935 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5936 goto efault;
5937 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5938 goto efault;
5939 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5940 goto efault;
5942 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5943 goto efault;
5946 break;
5947 #endif
5948 case TARGET_NR_symlink:
5950 void *p2;
5951 p = lock_user_string(arg1);
5952 p2 = lock_user_string(arg2);
5953 if (!p || !p2)
5954 ret = -TARGET_EFAULT;
5955 else
5956 ret = get_errno(symlink(p, p2));
5957 unlock_user(p2, arg2, 0);
5958 unlock_user(p, arg1, 0);
5960 break;
5961 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5962 case TARGET_NR_symlinkat:
5964 void *p2;
5965 p = lock_user_string(arg1);
5966 p2 = lock_user_string(arg3);
5967 if (!p || !p2)
5968 ret = -TARGET_EFAULT;
5969 else
5970 ret = get_errno(sys_symlinkat(p, arg2, p2));
5971 unlock_user(p2, arg3, 0);
5972 unlock_user(p, arg1, 0);
5974 break;
5975 #endif
5976 #ifdef TARGET_NR_oldlstat
5977 case TARGET_NR_oldlstat:
5978 goto unimplemented;
5979 #endif
5980 case TARGET_NR_readlink:
5982 void *p2, *temp;
5983 p = lock_user_string(arg1);
5984 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5985 if (!p || !p2)
5986 ret = -TARGET_EFAULT;
5987 else {
5988 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5989 char real[PATH_MAX];
5990 temp = realpath(exec_path,real);
5991 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5992 snprintf((char *)p2, arg3, "%s", real);
5994 else
5995 ret = get_errno(readlink(path(p), p2, arg3));
5997 unlock_user(p2, arg2, ret);
5998 unlock_user(p, arg1, 0);
6000 break;
6001 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6002 case TARGET_NR_readlinkat:
6004 void *p2;
6005 p = lock_user_string(arg2);
6006 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6007 if (!p || !p2)
6008 ret = -TARGET_EFAULT;
6009 else
6010 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6011 unlock_user(p2, arg3, ret);
6012 unlock_user(p, arg2, 0);
6014 break;
6015 #endif
6016 #ifdef TARGET_NR_uselib
6017 case TARGET_NR_uselib:
6018 goto unimplemented;
6019 #endif
6020 #ifdef TARGET_NR_swapon
6021 case TARGET_NR_swapon:
6022 if (!(p = lock_user_string(arg1)))
6023 goto efault;
6024 ret = get_errno(swapon(p, arg2));
6025 unlock_user(p, arg1, 0);
6026 break;
6027 #endif
6028 case TARGET_NR_reboot:
6029 if (!(p = lock_user_string(arg4)))
6030 goto efault;
6031 ret = reboot(arg1, arg2, arg3, p);
6032 unlock_user(p, arg4, 0);
6033 break;
6034 #ifdef TARGET_NR_readdir
6035 case TARGET_NR_readdir:
6036 goto unimplemented;
6037 #endif
6038 #ifdef TARGET_NR_mmap
6039 case TARGET_NR_mmap:
6040 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6041 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6042 || defined(TARGET_S390X)
6044 abi_ulong *v;
6045 abi_ulong v1, v2, v3, v4, v5, v6;
6046 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6047 goto efault;
6048 v1 = tswapal(v[0]);
6049 v2 = tswapal(v[1]);
6050 v3 = tswapal(v[2]);
6051 v4 = tswapal(v[3]);
6052 v5 = tswapal(v[4]);
6053 v6 = tswapal(v[5]);
6054 unlock_user(v, arg1, 0);
6055 ret = get_errno(target_mmap(v1, v2, v3,
6056 target_to_host_bitmask(v4, mmap_flags_tbl),
6057 v5, v6));
6059 #else
6060 ret = get_errno(target_mmap(arg1, arg2, arg3,
6061 target_to_host_bitmask(arg4, mmap_flags_tbl),
6062 arg5,
6063 arg6));
6064 #endif
6065 break;
6066 #endif
6067 #ifdef TARGET_NR_mmap2
6068 case TARGET_NR_mmap2:
6069 #ifndef MMAP_SHIFT
6070 #define MMAP_SHIFT 12
6071 #endif
6072 ret = get_errno(target_mmap(arg1, arg2, arg3,
6073 target_to_host_bitmask(arg4, mmap_flags_tbl),
6074 arg5,
6075 arg6 << MMAP_SHIFT));
6076 break;
6077 #endif
6078 case TARGET_NR_munmap:
6079 ret = get_errno(target_munmap(arg1, arg2));
6080 break;
6081 case TARGET_NR_mprotect:
6083 TaskState *ts = ((CPUState *)cpu_env)->opaque;
6084 /* Special hack to detect libc making the stack executable. */
6085 if ((arg3 & PROT_GROWSDOWN)
6086 && arg1 >= ts->info->stack_limit
6087 && arg1 <= ts->info->start_stack) {
6088 arg3 &= ~PROT_GROWSDOWN;
6089 arg2 = arg2 + arg1 - ts->info->stack_limit;
6090 arg1 = ts->info->stack_limit;
6093 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6094 break;
6095 #ifdef TARGET_NR_mremap
6096 case TARGET_NR_mremap:
6097 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6098 break;
6099 #endif
6100 /* ??? msync/mlock/munlock are broken for softmmu. */
6101 #ifdef TARGET_NR_msync
6102 case TARGET_NR_msync:
6103 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6104 break;
6105 #endif
6106 #ifdef TARGET_NR_mlock
6107 case TARGET_NR_mlock:
6108 ret = get_errno(mlock(g2h(arg1), arg2));
6109 break;
6110 #endif
6111 #ifdef TARGET_NR_munlock
6112 case TARGET_NR_munlock:
6113 ret = get_errno(munlock(g2h(arg1), arg2));
6114 break;
6115 #endif
6116 #ifdef TARGET_NR_mlockall
6117 case TARGET_NR_mlockall:
6118 ret = get_errno(mlockall(arg1));
6119 break;
6120 #endif
6121 #ifdef TARGET_NR_munlockall
6122 case TARGET_NR_munlockall:
6123 ret = get_errno(munlockall());
6124 break;
6125 #endif
6126 case TARGET_NR_truncate:
6127 if (!(p = lock_user_string(arg1)))
6128 goto efault;
6129 ret = get_errno(truncate(p, arg2));
6130 unlock_user(p, arg1, 0);
6131 break;
6132 case TARGET_NR_ftruncate:
6133 ret = get_errno(ftruncate(arg1, arg2));
6134 break;
6135 case TARGET_NR_fchmod:
6136 ret = get_errno(fchmod(arg1, arg2));
6137 break;
6138 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6139 case TARGET_NR_fchmodat:
6140 if (!(p = lock_user_string(arg2)))
6141 goto efault;
6142 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6143 unlock_user(p, arg2, 0);
6144 break;
6145 #endif
6146 case TARGET_NR_getpriority:
6147 /* libc does special remapping of the return value of
6148 * sys_getpriority() so it's just easiest to call
6149 * sys_getpriority() directly rather than through libc. */
6150 ret = get_errno(sys_getpriority(arg1, arg2));
6151 break;
6152 case TARGET_NR_setpriority:
6153 ret = get_errno(setpriority(arg1, arg2, arg3));
6154 break;
6155 #ifdef TARGET_NR_profil
6156 case TARGET_NR_profil:
6157 goto unimplemented;
6158 #endif
6159 case TARGET_NR_statfs:
6160 if (!(p = lock_user_string(arg1)))
6161 goto efault;
6162 ret = get_errno(statfs(path(p), &stfs));
6163 unlock_user(p, arg1, 0);
6164 convert_statfs:
6165 if (!is_error(ret)) {
6166 struct target_statfs *target_stfs;
6168 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6169 goto efault;
6170 __put_user(stfs.f_type, &target_stfs->f_type);
6171 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6172 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6173 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6174 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6175 __put_user(stfs.f_files, &target_stfs->f_files);
6176 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6177 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6178 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6179 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6180 unlock_user_struct(target_stfs, arg2, 1);
6182 break;
6183 case TARGET_NR_fstatfs:
6184 ret = get_errno(fstatfs(arg1, &stfs));
6185 goto convert_statfs;
6186 #ifdef TARGET_NR_statfs64
6187 case TARGET_NR_statfs64:
6188 if (!(p = lock_user_string(arg1)))
6189 goto efault;
6190 ret = get_errno(statfs(path(p), &stfs));
6191 unlock_user(p, arg1, 0);
6192 convert_statfs64:
6193 if (!is_error(ret)) {
6194 struct target_statfs64 *target_stfs;
6196 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6197 goto efault;
6198 __put_user(stfs.f_type, &target_stfs->f_type);
6199 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6200 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6201 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6202 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6203 __put_user(stfs.f_files, &target_stfs->f_files);
6204 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6205 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6206 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6207 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6208 unlock_user_struct(target_stfs, arg3, 1);
6210 break;
6211 case TARGET_NR_fstatfs64:
6212 ret = get_errno(fstatfs(arg1, &stfs));
6213 goto convert_statfs64;
6214 #endif
6215 #ifdef TARGET_NR_ioperm
6216 case TARGET_NR_ioperm:
6217 goto unimplemented;
6218 #endif
6219 #ifdef TARGET_NR_socketcall
6220 case TARGET_NR_socketcall:
6221 ret = do_socketcall(arg1, arg2);
6222 break;
6223 #endif
6224 #ifdef TARGET_NR_accept
6225 case TARGET_NR_accept:
6226 ret = do_accept(arg1, arg2, arg3);
6227 break;
6228 #endif
6229 #ifdef TARGET_NR_bind
6230 case TARGET_NR_bind:
6231 ret = do_bind(arg1, arg2, arg3);
6232 break;
6233 #endif
6234 #ifdef TARGET_NR_connect
6235 case TARGET_NR_connect:
6236 ret = do_connect(arg1, arg2, arg3);
6237 break;
6238 #endif
6239 #ifdef TARGET_NR_getpeername
6240 case TARGET_NR_getpeername:
6241 ret = do_getpeername(arg1, arg2, arg3);
6242 break;
6243 #endif
6244 #ifdef TARGET_NR_getsockname
6245 case TARGET_NR_getsockname:
6246 ret = do_getsockname(arg1, arg2, arg3);
6247 break;
6248 #endif
6249 #ifdef TARGET_NR_getsockopt
6250 case TARGET_NR_getsockopt:
6251 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6252 break;
6253 #endif
6254 #ifdef TARGET_NR_listen
6255 case TARGET_NR_listen:
6256 ret = get_errno(listen(arg1, arg2));
6257 break;
6258 #endif
6259 #ifdef TARGET_NR_recv
6260 case TARGET_NR_recv:
6261 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6262 break;
6263 #endif
6264 #ifdef TARGET_NR_recvfrom
6265 case TARGET_NR_recvfrom:
6266 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6267 break;
6268 #endif
6269 #ifdef TARGET_NR_recvmsg
6270 case TARGET_NR_recvmsg:
6271 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6272 break;
6273 #endif
6274 #ifdef TARGET_NR_send
6275 case TARGET_NR_send:
6276 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6277 break;
6278 #endif
6279 #ifdef TARGET_NR_sendmsg
6280 case TARGET_NR_sendmsg:
6281 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6282 break;
6283 #endif
6284 #ifdef TARGET_NR_sendto
6285 case TARGET_NR_sendto:
6286 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6287 break;
6288 #endif
6289 #ifdef TARGET_NR_shutdown
6290 case TARGET_NR_shutdown:
6291 ret = get_errno(shutdown(arg1, arg2));
6292 break;
6293 #endif
6294 #ifdef TARGET_NR_socket
6295 case TARGET_NR_socket:
6296 ret = do_socket(arg1, arg2, arg3);
6297 break;
6298 #endif
6299 #ifdef TARGET_NR_socketpair
6300 case TARGET_NR_socketpair:
6301 ret = do_socketpair(arg1, arg2, arg3, arg4);
6302 break;
6303 #endif
6304 #ifdef TARGET_NR_setsockopt
6305 case TARGET_NR_setsockopt:
6306 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6307 break;
6308 #endif
6310 case TARGET_NR_syslog:
6311 if (!(p = lock_user_string(arg2)))
6312 goto efault;
6313 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6314 unlock_user(p, arg2, 0);
6315 break;
6317 case TARGET_NR_setitimer:
6319 struct itimerval value, ovalue, *pvalue;
6321 if (arg2) {
6322 pvalue = &value;
6323 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6324 || copy_from_user_timeval(&pvalue->it_value,
6325 arg2 + sizeof(struct target_timeval)))
6326 goto efault;
6327 } else {
6328 pvalue = NULL;
6330 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6331 if (!is_error(ret) && arg3) {
6332 if (copy_to_user_timeval(arg3,
6333 &ovalue.it_interval)
6334 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6335 &ovalue.it_value))
6336 goto efault;
6339 break;
6340 case TARGET_NR_getitimer:
6342 struct itimerval value;
6344 ret = get_errno(getitimer(arg1, &value));
6345 if (!is_error(ret) && arg2) {
6346 if (copy_to_user_timeval(arg2,
6347 &value.it_interval)
6348 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6349 &value.it_value))
6350 goto efault;
6353 break;
6354 case TARGET_NR_stat:
6355 if (!(p = lock_user_string(arg1)))
6356 goto efault;
6357 ret = get_errno(stat(path(p), &st));
6358 unlock_user(p, arg1, 0);
6359 goto do_stat;
6360 case TARGET_NR_lstat:
6361 if (!(p = lock_user_string(arg1)))
6362 goto efault;
6363 ret = get_errno(lstat(path(p), &st));
6364 unlock_user(p, arg1, 0);
6365 goto do_stat;
6366 case TARGET_NR_fstat:
6368 ret = get_errno(fstat(arg1, &st));
6369 do_stat:
6370 if (!is_error(ret)) {
6371 struct target_stat *target_st;
6373 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6374 goto efault;
6375 memset(target_st, 0, sizeof(*target_st));
6376 __put_user(st.st_dev, &target_st->st_dev);
6377 __put_user(st.st_ino, &target_st->st_ino);
6378 __put_user(st.st_mode, &target_st->st_mode);
6379 __put_user(st.st_uid, &target_st->st_uid);
6380 __put_user(st.st_gid, &target_st->st_gid);
6381 __put_user(st.st_nlink, &target_st->st_nlink);
6382 __put_user(st.st_rdev, &target_st->st_rdev);
6383 __put_user(st.st_size, &target_st->st_size);
6384 __put_user(st.st_blksize, &target_st->st_blksize);
6385 __put_user(st.st_blocks, &target_st->st_blocks);
6386 __put_user(st.st_atime, &target_st->target_st_atime);
6387 __put_user(st.st_mtime, &target_st->target_st_mtime);
6388 __put_user(st.st_ctime, &target_st->target_st_ctime);
6389 unlock_user_struct(target_st, arg2, 1);
6392 break;
6393 #ifdef TARGET_NR_olduname
6394 case TARGET_NR_olduname:
6395 goto unimplemented;
6396 #endif
6397 #ifdef TARGET_NR_iopl
6398 case TARGET_NR_iopl:
6399 goto unimplemented;
6400 #endif
6401 case TARGET_NR_vhangup:
6402 ret = get_errno(vhangup());
6403 break;
6404 #ifdef TARGET_NR_idle
6405 case TARGET_NR_idle:
6406 goto unimplemented;
6407 #endif
6408 #ifdef TARGET_NR_syscall
6409 case TARGET_NR_syscall:
6410 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6411 arg6, arg7, arg8, 0);
6412 break;
6413 #endif
6414 case TARGET_NR_wait4:
6416 int status;
6417 abi_long status_ptr = arg2;
6418 struct rusage rusage, *rusage_ptr;
6419 abi_ulong target_rusage = arg4;
6420 if (target_rusage)
6421 rusage_ptr = &rusage;
6422 else
6423 rusage_ptr = NULL;
6424 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6425 if (!is_error(ret)) {
6426 if (status_ptr && ret) {
6427 status = host_to_target_waitstatus(status);
6428 if (put_user_s32(status, status_ptr))
6429 goto efault;
6431 if (target_rusage)
6432 host_to_target_rusage(target_rusage, &rusage);
6435 break;
6436 #ifdef TARGET_NR_swapoff
6437 case TARGET_NR_swapoff:
6438 if (!(p = lock_user_string(arg1)))
6439 goto efault;
6440 ret = get_errno(swapoff(p));
6441 unlock_user(p, arg1, 0);
6442 break;
6443 #endif
6444 case TARGET_NR_sysinfo:
6446 struct target_sysinfo *target_value;
6447 struct sysinfo value;
6448 ret = get_errno(sysinfo(&value));
6449 if (!is_error(ret) && arg1)
6451 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6452 goto efault;
6453 __put_user(value.uptime, &target_value->uptime);
6454 __put_user(value.loads[0], &target_value->loads[0]);
6455 __put_user(value.loads[1], &target_value->loads[1]);
6456 __put_user(value.loads[2], &target_value->loads[2]);
6457 __put_user(value.totalram, &target_value->totalram);
6458 __put_user(value.freeram, &target_value->freeram);
6459 __put_user(value.sharedram, &target_value->sharedram);
6460 __put_user(value.bufferram, &target_value->bufferram);
6461 __put_user(value.totalswap, &target_value->totalswap);
6462 __put_user(value.freeswap, &target_value->freeswap);
6463 __put_user(value.procs, &target_value->procs);
6464 __put_user(value.totalhigh, &target_value->totalhigh);
6465 __put_user(value.freehigh, &target_value->freehigh);
6466 __put_user(value.mem_unit, &target_value->mem_unit);
6467 unlock_user_struct(target_value, arg1, 1);
6470 break;
6471 #ifdef TARGET_NR_ipc
6472 case TARGET_NR_ipc:
6473 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6474 break;
6475 #endif
6476 #ifdef TARGET_NR_semget
6477 case TARGET_NR_semget:
6478 ret = get_errno(semget(arg1, arg2, arg3));
6479 break;
6480 #endif
6481 #ifdef TARGET_NR_semop
6482 case TARGET_NR_semop:
6483 ret = get_errno(do_semop(arg1, arg2, arg3));
6484 break;
6485 #endif
6486 #ifdef TARGET_NR_semctl
6487 case TARGET_NR_semctl:
6488 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6489 break;
6490 #endif
6491 #ifdef TARGET_NR_msgctl
6492 case TARGET_NR_msgctl:
6493 ret = do_msgctl(arg1, arg2, arg3);
6494 break;
6495 #endif
6496 #ifdef TARGET_NR_msgget
6497 case TARGET_NR_msgget:
6498 ret = get_errno(msgget(arg1, arg2));
6499 break;
6500 #endif
6501 #ifdef TARGET_NR_msgrcv
6502 case TARGET_NR_msgrcv:
6503 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6504 break;
6505 #endif
6506 #ifdef TARGET_NR_msgsnd
6507 case TARGET_NR_msgsnd:
6508 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6509 break;
6510 #endif
6511 #ifdef TARGET_NR_shmget
6512 case TARGET_NR_shmget:
6513 ret = get_errno(shmget(arg1, arg2, arg3));
6514 break;
6515 #endif
6516 #ifdef TARGET_NR_shmctl
6517 case TARGET_NR_shmctl:
6518 ret = do_shmctl(arg1, arg2, arg3);
6519 break;
6520 #endif
6521 #ifdef TARGET_NR_shmat
6522 case TARGET_NR_shmat:
6523 ret = do_shmat(arg1, arg2, arg3);
6524 break;
6525 #endif
6526 #ifdef TARGET_NR_shmdt
6527 case TARGET_NR_shmdt:
6528 ret = do_shmdt(arg1);
6529 break;
6530 #endif
6531 case TARGET_NR_fsync:
6532 ret = get_errno(fsync(arg1));
6533 break;
6534 case TARGET_NR_clone:
6535 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6536 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6537 #elif defined(TARGET_CRIS)
6538 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6539 #elif defined(TARGET_S390X)
6540 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6541 #else
6542 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6543 #endif
6544 break;
6545 #ifdef __NR_exit_group
6546 /* new thread calls */
6547 case TARGET_NR_exit_group:
6548 #ifdef TARGET_GPROF
6549 _mcleanup();
6550 #endif
6551 gdb_exit(cpu_env, arg1);
6552 ret = get_errno(exit_group(arg1));
6553 break;
6554 #endif
6555 case TARGET_NR_setdomainname:
6556 if (!(p = lock_user_string(arg1)))
6557 goto efault;
6558 ret = get_errno(setdomainname(p, arg2));
6559 unlock_user(p, arg1, 0);
6560 break;
6561 case TARGET_NR_uname:
6562 /* no need to transcode because we use the linux syscall */
6564 struct new_utsname * buf;
6566 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6567 goto efault;
6568 ret = get_errno(sys_uname(buf));
6569 if (!is_error(ret)) {
6570 /* Overrite the native machine name with whatever is being
6571 emulated. */
6572 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6573 /* Allow the user to override the reported release. */
6574 if (qemu_uname_release && *qemu_uname_release)
6575 strcpy (buf->release, qemu_uname_release);
6577 unlock_user_struct(buf, arg1, 1);
6579 break;
6580 #ifdef TARGET_I386
6581 case TARGET_NR_modify_ldt:
6582 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6583 break;
6584 #if !defined(TARGET_X86_64)
6585 case TARGET_NR_vm86old:
6586 goto unimplemented;
6587 case TARGET_NR_vm86:
6588 ret = do_vm86(cpu_env, arg1, arg2);
6589 break;
6590 #endif
6591 #endif
6592 case TARGET_NR_adjtimex:
6593 goto unimplemented;
6594 #ifdef TARGET_NR_create_module
6595 case TARGET_NR_create_module:
6596 #endif
6597 case TARGET_NR_init_module:
6598 case TARGET_NR_delete_module:
6599 #ifdef TARGET_NR_get_kernel_syms
6600 case TARGET_NR_get_kernel_syms:
6601 #endif
6602 goto unimplemented;
6603 case TARGET_NR_quotactl:
6604 goto unimplemented;
6605 case TARGET_NR_getpgid:
6606 ret = get_errno(getpgid(arg1));
6607 break;
6608 case TARGET_NR_fchdir:
6609 ret = get_errno(fchdir(arg1));
6610 break;
6611 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6612 case TARGET_NR_bdflush:
6613 goto unimplemented;
6614 #endif
6615 #ifdef TARGET_NR_sysfs
6616 case TARGET_NR_sysfs:
6617 goto unimplemented;
6618 #endif
6619 case TARGET_NR_personality:
6620 ret = get_errno(personality(arg1));
6621 break;
6622 #ifdef TARGET_NR_afs_syscall
6623 case TARGET_NR_afs_syscall:
6624 goto unimplemented;
6625 #endif
6626 #ifdef TARGET_NR__llseek /* Not on alpha */
6627 case TARGET_NR__llseek:
6629 int64_t res;
6630 #if !defined(__NR_llseek)
6631 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6632 if (res == -1) {
6633 ret = get_errno(res);
6634 } else {
6635 ret = 0;
6637 #else
6638 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6639 #endif
6640 if ((ret == 0) && put_user_s64(res, arg4)) {
6641 goto efault;
6644 break;
6645 #endif
6646 case TARGET_NR_getdents:
6647 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6649 struct target_dirent *target_dirp;
6650 struct linux_dirent *dirp;
6651 abi_long count = arg3;
6653 dirp = malloc(count);
6654 if (!dirp) {
6655 ret = -TARGET_ENOMEM;
6656 goto fail;
6659 ret = get_errno(sys_getdents(arg1, dirp, count));
6660 if (!is_error(ret)) {
6661 struct linux_dirent *de;
6662 struct target_dirent *tde;
6663 int len = ret;
6664 int reclen, treclen;
6665 int count1, tnamelen;
6667 count1 = 0;
6668 de = dirp;
6669 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6670 goto efault;
6671 tde = target_dirp;
6672 while (len > 0) {
6673 reclen = de->d_reclen;
6674 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6675 tde->d_reclen = tswap16(treclen);
6676 tde->d_ino = tswapal(de->d_ino);
6677 tde->d_off = tswapal(de->d_off);
6678 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6679 if (tnamelen > 256)
6680 tnamelen = 256;
6681 /* XXX: may not be correct */
6682 pstrcpy(tde->d_name, tnamelen, de->d_name);
6683 de = (struct linux_dirent *)((char *)de + reclen);
6684 len -= reclen;
6685 tde = (struct target_dirent *)((char *)tde + treclen);
6686 count1 += treclen;
6688 ret = count1;
6689 unlock_user(target_dirp, arg2, ret);
6691 free(dirp);
6693 #else
6695 struct linux_dirent *dirp;
6696 abi_long count = arg3;
6698 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6699 goto efault;
6700 ret = get_errno(sys_getdents(arg1, dirp, count));
6701 if (!is_error(ret)) {
6702 struct linux_dirent *de;
6703 int len = ret;
6704 int reclen;
6705 de = dirp;
6706 while (len > 0) {
6707 reclen = de->d_reclen;
6708 if (reclen > len)
6709 break;
6710 de->d_reclen = tswap16(reclen);
6711 tswapls(&de->d_ino);
6712 tswapls(&de->d_off);
6713 de = (struct linux_dirent *)((char *)de + reclen);
6714 len -= reclen;
6717 unlock_user(dirp, arg2, ret);
6719 #endif
6720 break;
6721 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6722 case TARGET_NR_getdents64:
6724 struct linux_dirent64 *dirp;
6725 abi_long count = arg3;
6726 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6727 goto efault;
6728 ret = get_errno(sys_getdents64(arg1, dirp, count));
6729 if (!is_error(ret)) {
6730 struct linux_dirent64 *de;
6731 int len = ret;
6732 int reclen;
6733 de = dirp;
6734 while (len > 0) {
6735 reclen = de->d_reclen;
6736 if (reclen > len)
6737 break;
6738 de->d_reclen = tswap16(reclen);
6739 tswap64s((uint64_t *)&de->d_ino);
6740 tswap64s((uint64_t *)&de->d_off);
6741 de = (struct linux_dirent64 *)((char *)de + reclen);
6742 len -= reclen;
6745 unlock_user(dirp, arg2, ret);
6747 break;
6748 #endif /* TARGET_NR_getdents64 */
6749 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6750 #ifdef TARGET_S390X
6751 case TARGET_NR_select:
6752 #else
6753 case TARGET_NR__newselect:
6754 #endif
6755 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6756 break;
6757 #endif
6758 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6759 # ifdef TARGET_NR_poll
6760 case TARGET_NR_poll:
6761 # endif
6762 # ifdef TARGET_NR_ppoll
6763 case TARGET_NR_ppoll:
6764 # endif
6766 struct target_pollfd *target_pfd;
6767 unsigned int nfds = arg2;
6768 int timeout = arg3;
6769 struct pollfd *pfd;
6770 unsigned int i;
6772 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6773 if (!target_pfd)
6774 goto efault;
6776 pfd = alloca(sizeof(struct pollfd) * nfds);
6777 for(i = 0; i < nfds; i++) {
6778 pfd[i].fd = tswap32(target_pfd[i].fd);
6779 pfd[i].events = tswap16(target_pfd[i].events);
6782 # ifdef TARGET_NR_ppoll
6783 if (num == TARGET_NR_ppoll) {
6784 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6785 target_sigset_t *target_set;
6786 sigset_t _set, *set = &_set;
6788 if (arg3) {
6789 if (target_to_host_timespec(timeout_ts, arg3)) {
6790 unlock_user(target_pfd, arg1, 0);
6791 goto efault;
6793 } else {
6794 timeout_ts = NULL;
6797 if (arg4) {
6798 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6799 if (!target_set) {
6800 unlock_user(target_pfd, arg1, 0);
6801 goto efault;
6803 target_to_host_sigset(set, target_set);
6804 } else {
6805 set = NULL;
6808 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6810 if (!is_error(ret) && arg3) {
6811 host_to_target_timespec(arg3, timeout_ts);
6813 if (arg4) {
6814 unlock_user(target_set, arg4, 0);
6816 } else
6817 # endif
6818 ret = get_errno(poll(pfd, nfds, timeout));
6820 if (!is_error(ret)) {
6821 for(i = 0; i < nfds; i++) {
6822 target_pfd[i].revents = tswap16(pfd[i].revents);
6825 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6827 break;
6828 #endif
6829 case TARGET_NR_flock:
6830 /* NOTE: the flock constant seems to be the same for every
6831 Linux platform */
6832 ret = get_errno(flock(arg1, arg2));
6833 break;
6834 case TARGET_NR_readv:
6836 int count = arg3;
6837 struct iovec *vec;
6839 vec = alloca(count * sizeof(struct iovec));
6840 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6841 goto efault;
6842 ret = get_errno(readv(arg1, vec, count));
6843 unlock_iovec(vec, arg2, count, 1);
6845 break;
6846 case TARGET_NR_writev:
6848 int count = arg3;
6849 struct iovec *vec;
6851 vec = alloca(count * sizeof(struct iovec));
6852 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6853 goto efault;
6854 ret = get_errno(writev(arg1, vec, count));
6855 unlock_iovec(vec, arg2, count, 0);
6857 break;
6858 case TARGET_NR_getsid:
6859 ret = get_errno(getsid(arg1));
6860 break;
6861 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6862 case TARGET_NR_fdatasync:
6863 ret = get_errno(fdatasync(arg1));
6864 break;
6865 #endif
6866 case TARGET_NR__sysctl:
6867 /* We don't implement this, but ENOTDIR is always a safe
6868 return value. */
6869 ret = -TARGET_ENOTDIR;
6870 break;
6871 case TARGET_NR_sched_getaffinity:
6873 unsigned int mask_size;
6874 unsigned long *mask;
6877 * sched_getaffinity needs multiples of ulong, so need to take
6878 * care of mismatches between target ulong and host ulong sizes.
6880 if (arg2 & (sizeof(abi_ulong) - 1)) {
6881 ret = -TARGET_EINVAL;
6882 break;
6884 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6886 mask = alloca(mask_size);
6887 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6889 if (!is_error(ret)) {
6890 if (copy_to_user(arg3, mask, ret)) {
6891 goto efault;
6895 break;
6896 case TARGET_NR_sched_setaffinity:
6898 unsigned int mask_size;
6899 unsigned long *mask;
6902 * sched_setaffinity needs multiples of ulong, so need to take
6903 * care of mismatches between target ulong and host ulong sizes.
6905 if (arg2 & (sizeof(abi_ulong) - 1)) {
6906 ret = -TARGET_EINVAL;
6907 break;
6909 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6911 mask = alloca(mask_size);
6912 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6913 goto efault;
6915 memcpy(mask, p, arg2);
6916 unlock_user_struct(p, arg2, 0);
6918 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6920 break;
6921 case TARGET_NR_sched_setparam:
6923 struct sched_param *target_schp;
6924 struct sched_param schp;
6926 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6927 goto efault;
6928 schp.sched_priority = tswap32(target_schp->sched_priority);
6929 unlock_user_struct(target_schp, arg2, 0);
6930 ret = get_errno(sched_setparam(arg1, &schp));
6932 break;
6933 case TARGET_NR_sched_getparam:
6935 struct sched_param *target_schp;
6936 struct sched_param schp;
6937 ret = get_errno(sched_getparam(arg1, &schp));
6938 if (!is_error(ret)) {
6939 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6940 goto efault;
6941 target_schp->sched_priority = tswap32(schp.sched_priority);
6942 unlock_user_struct(target_schp, arg2, 1);
6945 break;
6946 case TARGET_NR_sched_setscheduler:
6948 struct sched_param *target_schp;
6949 struct sched_param schp;
6950 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6951 goto efault;
6952 schp.sched_priority = tswap32(target_schp->sched_priority);
6953 unlock_user_struct(target_schp, arg3, 0);
6954 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6956 break;
6957 case TARGET_NR_sched_getscheduler:
6958 ret = get_errno(sched_getscheduler(arg1));
6959 break;
6960 case TARGET_NR_sched_yield:
6961 ret = get_errno(sched_yield());
6962 break;
6963 case TARGET_NR_sched_get_priority_max:
6964 ret = get_errno(sched_get_priority_max(arg1));
6965 break;
6966 case TARGET_NR_sched_get_priority_min:
6967 ret = get_errno(sched_get_priority_min(arg1));
6968 break;
6969 case TARGET_NR_sched_rr_get_interval:
6971 struct timespec ts;
6972 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6973 if (!is_error(ret)) {
6974 host_to_target_timespec(arg2, &ts);
6977 break;
6978 case TARGET_NR_nanosleep:
6980 struct timespec req, rem;
6981 target_to_host_timespec(&req, arg1);
6982 ret = get_errno(nanosleep(&req, &rem));
6983 if (is_error(ret) && arg2) {
6984 host_to_target_timespec(arg2, &rem);
6987 break;
6988 #ifdef TARGET_NR_query_module
6989 case TARGET_NR_query_module:
6990 goto unimplemented;
6991 #endif
6992 #ifdef TARGET_NR_nfsservctl
6993 case TARGET_NR_nfsservctl:
6994 goto unimplemented;
6995 #endif
6996 case TARGET_NR_prctl:
6997 switch (arg1)
6999 case PR_GET_PDEATHSIG:
7001 int deathsig;
7002 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7003 if (!is_error(ret) && arg2
7004 && put_user_ual(deathsig, arg2))
7005 goto efault;
7007 break;
7008 default:
7009 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7010 break;
7012 break;
7013 #ifdef TARGET_NR_arch_prctl
7014 case TARGET_NR_arch_prctl:
7015 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7016 ret = do_arch_prctl(cpu_env, arg1, arg2);
7017 break;
7018 #else
7019 goto unimplemented;
7020 #endif
7021 #endif
7022 #ifdef TARGET_NR_pread
7023 case TARGET_NR_pread:
7024 if (regpairs_aligned(cpu_env))
7025 arg4 = arg5;
7026 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7027 goto efault;
7028 ret = get_errno(pread(arg1, p, arg3, arg4));
7029 unlock_user(p, arg2, ret);
7030 break;
7031 case TARGET_NR_pwrite:
7032 if (regpairs_aligned(cpu_env))
7033 arg4 = arg5;
7034 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7035 goto efault;
7036 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7037 unlock_user(p, arg2, 0);
7038 break;
7039 #endif
7040 #ifdef TARGET_NR_pread64
7041 case TARGET_NR_pread64:
7042 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7043 goto efault;
7044 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7045 unlock_user(p, arg2, ret);
7046 break;
7047 case TARGET_NR_pwrite64:
7048 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7049 goto efault;
7050 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7051 unlock_user(p, arg2, 0);
7052 break;
7053 #endif
7054 case TARGET_NR_getcwd:
7055 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7056 goto efault;
7057 ret = get_errno(sys_getcwd1(p, arg2));
7058 unlock_user(p, arg1, ret);
7059 break;
7060 case TARGET_NR_capget:
7061 goto unimplemented;
7062 case TARGET_NR_capset:
7063 goto unimplemented;
7064 case TARGET_NR_sigaltstack:
7065 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7066 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7067 defined(TARGET_M68K) || defined(TARGET_S390X)
7068 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
7069 break;
7070 #else
7071 goto unimplemented;
7072 #endif
7073 case TARGET_NR_sendfile:
7074 goto unimplemented;
7075 #ifdef TARGET_NR_getpmsg
7076 case TARGET_NR_getpmsg:
7077 goto unimplemented;
7078 #endif
7079 #ifdef TARGET_NR_putpmsg
7080 case TARGET_NR_putpmsg:
7081 goto unimplemented;
7082 #endif
7083 #ifdef TARGET_NR_vfork
7084 case TARGET_NR_vfork:
7085 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7086 0, 0, 0, 0));
7087 break;
7088 #endif
7089 #ifdef TARGET_NR_ugetrlimit
7090 case TARGET_NR_ugetrlimit:
7092 struct rlimit rlim;
7093 int resource = target_to_host_resource(arg1);
7094 ret = get_errno(getrlimit(resource, &rlim));
7095 if (!is_error(ret)) {
7096 struct target_rlimit *target_rlim;
7097 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7098 goto efault;
7099 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7100 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7101 unlock_user_struct(target_rlim, arg2, 1);
7103 break;
7105 #endif
7106 #ifdef TARGET_NR_truncate64
7107 case TARGET_NR_truncate64:
7108 if (!(p = lock_user_string(arg1)))
7109 goto efault;
7110 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7111 unlock_user(p, arg1, 0);
7112 break;
7113 #endif
7114 #ifdef TARGET_NR_ftruncate64
7115 case TARGET_NR_ftruncate64:
7116 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7117 break;
7118 #endif
7119 #ifdef TARGET_NR_stat64
7120 case TARGET_NR_stat64:
7121 if (!(p = lock_user_string(arg1)))
7122 goto efault;
7123 ret = get_errno(stat(path(p), &st));
7124 unlock_user(p, arg1, 0);
7125 if (!is_error(ret))
7126 ret = host_to_target_stat64(cpu_env, arg2, &st);
7127 break;
7128 #endif
7129 #ifdef TARGET_NR_lstat64
7130 case TARGET_NR_lstat64:
7131 if (!(p = lock_user_string(arg1)))
7132 goto efault;
7133 ret = get_errno(lstat(path(p), &st));
7134 unlock_user(p, arg1, 0);
7135 if (!is_error(ret))
7136 ret = host_to_target_stat64(cpu_env, arg2, &st);
7137 break;
7138 #endif
7139 #ifdef TARGET_NR_fstat64
7140 case TARGET_NR_fstat64:
7141 ret = get_errno(fstat(arg1, &st));
7142 if (!is_error(ret))
7143 ret = host_to_target_stat64(cpu_env, arg2, &st);
7144 break;
7145 #endif
7146 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7147 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7148 #ifdef TARGET_NR_fstatat64
7149 case TARGET_NR_fstatat64:
7150 #endif
7151 #ifdef TARGET_NR_newfstatat
7152 case TARGET_NR_newfstatat:
7153 #endif
7154 if (!(p = lock_user_string(arg2)))
7155 goto efault;
7156 #ifdef __NR_fstatat64
7157 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7158 #else
7159 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7160 #endif
7161 if (!is_error(ret))
7162 ret = host_to_target_stat64(cpu_env, arg3, &st);
7163 break;
7164 #endif
7165 case TARGET_NR_lchown:
7166 if (!(p = lock_user_string(arg1)))
7167 goto efault;
7168 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7169 unlock_user(p, arg1, 0);
7170 break;
7171 #ifdef TARGET_NR_getuid
7172 case TARGET_NR_getuid:
7173 ret = get_errno(high2lowuid(getuid()));
7174 break;
7175 #endif
7176 #ifdef TARGET_NR_getgid
7177 case TARGET_NR_getgid:
7178 ret = get_errno(high2lowgid(getgid()));
7179 break;
7180 #endif
7181 #ifdef TARGET_NR_geteuid
7182 case TARGET_NR_geteuid:
7183 ret = get_errno(high2lowuid(geteuid()));
7184 break;
7185 #endif
7186 #ifdef TARGET_NR_getegid
7187 case TARGET_NR_getegid:
7188 ret = get_errno(high2lowgid(getegid()));
7189 break;
7190 #endif
7191 case TARGET_NR_setreuid:
7192 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7193 break;
7194 case TARGET_NR_setregid:
7195 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7196 break;
7197 case TARGET_NR_getgroups:
7199 int gidsetsize = arg1;
7200 target_id *target_grouplist;
7201 gid_t *grouplist;
7202 int i;
7204 grouplist = alloca(gidsetsize * sizeof(gid_t));
7205 ret = get_errno(getgroups(gidsetsize, grouplist));
7206 if (gidsetsize == 0)
7207 break;
7208 if (!is_error(ret)) {
7209 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7210 if (!target_grouplist)
7211 goto efault;
7212 for(i = 0;i < ret; i++)
7213 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7214 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7217 break;
7218 case TARGET_NR_setgroups:
7220 int gidsetsize = arg1;
7221 target_id *target_grouplist;
7222 gid_t *grouplist;
7223 int i;
7225 grouplist = alloca(gidsetsize * sizeof(gid_t));
7226 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7227 if (!target_grouplist) {
7228 ret = -TARGET_EFAULT;
7229 goto fail;
7231 for(i = 0;i < gidsetsize; i++)
7232 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7233 unlock_user(target_grouplist, arg2, 0);
7234 ret = get_errno(setgroups(gidsetsize, grouplist));
7236 break;
7237 case TARGET_NR_fchown:
7238 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7239 break;
7240 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7241 case TARGET_NR_fchownat:
7242 if (!(p = lock_user_string(arg2)))
7243 goto efault;
7244 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7245 unlock_user(p, arg2, 0);
7246 break;
7247 #endif
7248 #ifdef TARGET_NR_setresuid
7249 case TARGET_NR_setresuid:
7250 ret = get_errno(setresuid(low2highuid(arg1),
7251 low2highuid(arg2),
7252 low2highuid(arg3)));
7253 break;
7254 #endif
7255 #ifdef TARGET_NR_getresuid
7256 case TARGET_NR_getresuid:
7258 uid_t ruid, euid, suid;
7259 ret = get_errno(getresuid(&ruid, &euid, &suid));
7260 if (!is_error(ret)) {
7261 if (put_user_u16(high2lowuid(ruid), arg1)
7262 || put_user_u16(high2lowuid(euid), arg2)
7263 || put_user_u16(high2lowuid(suid), arg3))
7264 goto efault;
7267 break;
7268 #endif
7269 #ifdef TARGET_NR_getresgid
7270 case TARGET_NR_setresgid:
7271 ret = get_errno(setresgid(low2highgid(arg1),
7272 low2highgid(arg2),
7273 low2highgid(arg3)));
7274 break;
7275 #endif
7276 #ifdef TARGET_NR_getresgid
7277 case TARGET_NR_getresgid:
7279 gid_t rgid, egid, sgid;
7280 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7281 if (!is_error(ret)) {
7282 if (put_user_u16(high2lowgid(rgid), arg1)
7283 || put_user_u16(high2lowgid(egid), arg2)
7284 || put_user_u16(high2lowgid(sgid), arg3))
7285 goto efault;
7288 break;
7289 #endif
7290 case TARGET_NR_chown:
7291 if (!(p = lock_user_string(arg1)))
7292 goto efault;
7293 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7294 unlock_user(p, arg1, 0);
7295 break;
7296 case TARGET_NR_setuid:
7297 ret = get_errno(setuid(low2highuid(arg1)));
7298 break;
7299 case TARGET_NR_setgid:
7300 ret = get_errno(setgid(low2highgid(arg1)));
7301 break;
7302 case TARGET_NR_setfsuid:
7303 ret = get_errno(setfsuid(arg1));
7304 break;
7305 case TARGET_NR_setfsgid:
7306 ret = get_errno(setfsgid(arg1));
7307 break;
7309 #ifdef TARGET_NR_lchown32
7310 case TARGET_NR_lchown32:
7311 if (!(p = lock_user_string(arg1)))
7312 goto efault;
7313 ret = get_errno(lchown(p, arg2, arg3));
7314 unlock_user(p, arg1, 0);
7315 break;
7316 #endif
7317 #ifdef TARGET_NR_getuid32
7318 case TARGET_NR_getuid32:
7319 ret = get_errno(getuid());
7320 break;
7321 #endif
7323 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7324 /* Alpha specific */
7325 case TARGET_NR_getxuid:
7327 uid_t euid;
7328 euid=geteuid();
7329 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7331 ret = get_errno(getuid());
7332 break;
7333 #endif
7334 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7335 /* Alpha specific */
7336 case TARGET_NR_getxgid:
7338 uid_t egid;
7339 egid=getegid();
7340 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7342 ret = get_errno(getgid());
7343 break;
7344 #endif
7345 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7346 /* Alpha specific */
7347 case TARGET_NR_osf_getsysinfo:
7348 ret = -TARGET_EOPNOTSUPP;
7349 switch (arg1) {
7350 case TARGET_GSI_IEEE_FP_CONTROL:
7352 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7354 /* Copied from linux ieee_fpcr_to_swcr. */
7355 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7356 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7357 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7358 | SWCR_TRAP_ENABLE_DZE
7359 | SWCR_TRAP_ENABLE_OVF);
7360 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7361 | SWCR_TRAP_ENABLE_INE);
7362 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7363 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7365 if (put_user_u64 (swcr, arg2))
7366 goto efault;
7367 ret = 0;
7369 break;
7371 /* case GSI_IEEE_STATE_AT_SIGNAL:
7372 -- Not implemented in linux kernel.
7373 case GSI_UACPROC:
7374 -- Retrieves current unaligned access state; not much used.
7375 case GSI_PROC_TYPE:
7376 -- Retrieves implver information; surely not used.
7377 case GSI_GET_HWRPB:
7378 -- Grabs a copy of the HWRPB; surely not used.
7381 break;
7382 #endif
7383 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7384 /* Alpha specific */
7385 case TARGET_NR_osf_setsysinfo:
7386 ret = -TARGET_EOPNOTSUPP;
7387 switch (arg1) {
7388 case TARGET_SSI_IEEE_FP_CONTROL:
7389 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7391 uint64_t swcr, fpcr, orig_fpcr;
7393 if (get_user_u64 (swcr, arg2))
7394 goto efault;
7395 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7396 fpcr = orig_fpcr & FPCR_DYN_MASK;
7398 /* Copied from linux ieee_swcr_to_fpcr. */
7399 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7400 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7401 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7402 | SWCR_TRAP_ENABLE_DZE
7403 | SWCR_TRAP_ENABLE_OVF)) << 48;
7404 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7405 | SWCR_TRAP_ENABLE_INE)) << 57;
7406 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7407 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7409 cpu_alpha_store_fpcr (cpu_env, fpcr);
7410 ret = 0;
7412 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7413 /* Old exceptions are not signaled. */
7414 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7416 /* If any exceptions set by this call, and are unmasked,
7417 send a signal. */
7418 /* ??? FIXME */
7421 break;
7423 /* case SSI_NVPAIRS:
7424 -- Used with SSIN_UACPROC to enable unaligned accesses.
7425 case SSI_IEEE_STATE_AT_SIGNAL:
7426 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7427 -- Not implemented in linux kernel
7430 break;
7431 #endif
7432 #ifdef TARGET_NR_osf_sigprocmask
7433 /* Alpha specific. */
7434 case TARGET_NR_osf_sigprocmask:
7436 abi_ulong mask;
7437 int how;
7438 sigset_t set, oldset;
7440 switch(arg1) {
7441 case TARGET_SIG_BLOCK:
7442 how = SIG_BLOCK;
7443 break;
7444 case TARGET_SIG_UNBLOCK:
7445 how = SIG_UNBLOCK;
7446 break;
7447 case TARGET_SIG_SETMASK:
7448 how = SIG_SETMASK;
7449 break;
7450 default:
7451 ret = -TARGET_EINVAL;
7452 goto fail;
7454 mask = arg2;
7455 target_to_host_old_sigset(&set, &mask);
7456 sigprocmask(how, &set, &oldset);
7457 host_to_target_old_sigset(&mask, &oldset);
7458 ret = mask;
7460 break;
7461 #endif
7463 #ifdef TARGET_NR_getgid32
7464 case TARGET_NR_getgid32:
7465 ret = get_errno(getgid());
7466 break;
7467 #endif
7468 #ifdef TARGET_NR_geteuid32
7469 case TARGET_NR_geteuid32:
7470 ret = get_errno(geteuid());
7471 break;
7472 #endif
7473 #ifdef TARGET_NR_getegid32
7474 case TARGET_NR_getegid32:
7475 ret = get_errno(getegid());
7476 break;
7477 #endif
7478 #ifdef TARGET_NR_setreuid32
7479 case TARGET_NR_setreuid32:
7480 ret = get_errno(setreuid(arg1, arg2));
7481 break;
7482 #endif
7483 #ifdef TARGET_NR_setregid32
7484 case TARGET_NR_setregid32:
7485 ret = get_errno(setregid(arg1, arg2));
7486 break;
7487 #endif
7488 #ifdef TARGET_NR_getgroups32
7489 case TARGET_NR_getgroups32:
7491 int gidsetsize = arg1;
7492 uint32_t *target_grouplist;
7493 gid_t *grouplist;
7494 int i;
7496 grouplist = alloca(gidsetsize * sizeof(gid_t));
7497 ret = get_errno(getgroups(gidsetsize, grouplist));
7498 if (gidsetsize == 0)
7499 break;
7500 if (!is_error(ret)) {
7501 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7502 if (!target_grouplist) {
7503 ret = -TARGET_EFAULT;
7504 goto fail;
7506 for(i = 0;i < ret; i++)
7507 target_grouplist[i] = tswap32(grouplist[i]);
7508 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7511 break;
7512 #endif
7513 #ifdef TARGET_NR_setgroups32
7514 case TARGET_NR_setgroups32:
7516 int gidsetsize = arg1;
7517 uint32_t *target_grouplist;
7518 gid_t *grouplist;
7519 int i;
7521 grouplist = alloca(gidsetsize * sizeof(gid_t));
7522 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7523 if (!target_grouplist) {
7524 ret = -TARGET_EFAULT;
7525 goto fail;
7527 for(i = 0;i < gidsetsize; i++)
7528 grouplist[i] = tswap32(target_grouplist[i]);
7529 unlock_user(target_grouplist, arg2, 0);
7530 ret = get_errno(setgroups(gidsetsize, grouplist));
7532 break;
7533 #endif
7534 #ifdef TARGET_NR_fchown32
7535 case TARGET_NR_fchown32:
7536 ret = get_errno(fchown(arg1, arg2, arg3));
7537 break;
7538 #endif
7539 #ifdef TARGET_NR_setresuid32
7540 case TARGET_NR_setresuid32:
7541 ret = get_errno(setresuid(arg1, arg2, arg3));
7542 break;
7543 #endif
7544 #ifdef TARGET_NR_getresuid32
7545 case TARGET_NR_getresuid32:
7547 uid_t ruid, euid, suid;
7548 ret = get_errno(getresuid(&ruid, &euid, &suid));
7549 if (!is_error(ret)) {
7550 if (put_user_u32(ruid, arg1)
7551 || put_user_u32(euid, arg2)
7552 || put_user_u32(suid, arg3))
7553 goto efault;
7556 break;
7557 #endif
7558 #ifdef TARGET_NR_setresgid32
7559 case TARGET_NR_setresgid32:
7560 ret = get_errno(setresgid(arg1, arg2, arg3));
7561 break;
7562 #endif
7563 #ifdef TARGET_NR_getresgid32
7564 case TARGET_NR_getresgid32:
7566 gid_t rgid, egid, sgid;
7567 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7568 if (!is_error(ret)) {
7569 if (put_user_u32(rgid, arg1)
7570 || put_user_u32(egid, arg2)
7571 || put_user_u32(sgid, arg3))
7572 goto efault;
7575 break;
7576 #endif
7577 #ifdef TARGET_NR_chown32
7578 case TARGET_NR_chown32:
7579 if (!(p = lock_user_string(arg1)))
7580 goto efault;
7581 ret = get_errno(chown(p, arg2, arg3));
7582 unlock_user(p, arg1, 0);
7583 break;
7584 #endif
7585 #ifdef TARGET_NR_setuid32
7586 case TARGET_NR_setuid32:
7587 ret = get_errno(setuid(arg1));
7588 break;
7589 #endif
7590 #ifdef TARGET_NR_setgid32
7591 case TARGET_NR_setgid32:
7592 ret = get_errno(setgid(arg1));
7593 break;
7594 #endif
7595 #ifdef TARGET_NR_setfsuid32
7596 case TARGET_NR_setfsuid32:
7597 ret = get_errno(setfsuid(arg1));
7598 break;
7599 #endif
7600 #ifdef TARGET_NR_setfsgid32
7601 case TARGET_NR_setfsgid32:
7602 ret = get_errno(setfsgid(arg1));
7603 break;
7604 #endif
7606 case TARGET_NR_pivot_root:
7607 goto unimplemented;
7608 #ifdef TARGET_NR_mincore
7609 case TARGET_NR_mincore:
7611 void *a;
7612 ret = -TARGET_EFAULT;
7613 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7614 goto efault;
7615 if (!(p = lock_user_string(arg3)))
7616 goto mincore_fail;
7617 ret = get_errno(mincore(a, arg2, p));
7618 unlock_user(p, arg3, ret);
7619 mincore_fail:
7620 unlock_user(a, arg1, 0);
7622 break;
7623 #endif
7624 #ifdef TARGET_NR_arm_fadvise64_64
7625 case TARGET_NR_arm_fadvise64_64:
7628 * arm_fadvise64_64 looks like fadvise64_64 but
7629 * with different argument order
7631 abi_long temp;
7632 temp = arg3;
7633 arg3 = arg4;
7634 arg4 = temp;
7636 #endif
7637 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7638 #ifdef TARGET_NR_fadvise64_64
7639 case TARGET_NR_fadvise64_64:
7640 #endif
7641 #ifdef TARGET_NR_fadvise64
7642 case TARGET_NR_fadvise64:
7643 #endif
7644 #ifdef TARGET_S390X
7645 switch (arg4) {
7646 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7647 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7648 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7649 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7650 default: break;
7652 #endif
7653 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7654 break;
7655 #endif
7656 #ifdef TARGET_NR_madvise
7657 case TARGET_NR_madvise:
7658 /* A straight passthrough may not be safe because qemu sometimes
7659 turns private flie-backed mappings into anonymous mappings.
7660 This will break MADV_DONTNEED.
7661 This is a hint, so ignoring and returning success is ok. */
7662 ret = get_errno(0);
7663 break;
7664 #endif
7665 #if TARGET_ABI_BITS == 32
7666 case TARGET_NR_fcntl64:
7668 int cmd;
7669 struct flock64 fl;
7670 struct target_flock64 *target_fl;
7671 #ifdef TARGET_ARM
7672 struct target_eabi_flock64 *target_efl;
7673 #endif
7675 cmd = target_to_host_fcntl_cmd(arg2);
7676 if (cmd == -TARGET_EINVAL) {
7677 ret = cmd;
7678 break;
7681 switch(arg2) {
7682 case TARGET_F_GETLK64:
7683 #ifdef TARGET_ARM
7684 if (((CPUARMState *)cpu_env)->eabi) {
7685 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7686 goto efault;
7687 fl.l_type = tswap16(target_efl->l_type);
7688 fl.l_whence = tswap16(target_efl->l_whence);
7689 fl.l_start = tswap64(target_efl->l_start);
7690 fl.l_len = tswap64(target_efl->l_len);
7691 fl.l_pid = tswap32(target_efl->l_pid);
7692 unlock_user_struct(target_efl, arg3, 0);
7693 } else
7694 #endif
7696 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7697 goto efault;
7698 fl.l_type = tswap16(target_fl->l_type);
7699 fl.l_whence = tswap16(target_fl->l_whence);
7700 fl.l_start = tswap64(target_fl->l_start);
7701 fl.l_len = tswap64(target_fl->l_len);
7702 fl.l_pid = tswap32(target_fl->l_pid);
7703 unlock_user_struct(target_fl, arg3, 0);
7705 ret = get_errno(fcntl(arg1, cmd, &fl));
7706 if (ret == 0) {
7707 #ifdef TARGET_ARM
7708 if (((CPUARMState *)cpu_env)->eabi) {
7709 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7710 goto efault;
7711 target_efl->l_type = tswap16(fl.l_type);
7712 target_efl->l_whence = tswap16(fl.l_whence);
7713 target_efl->l_start = tswap64(fl.l_start);
7714 target_efl->l_len = tswap64(fl.l_len);
7715 target_efl->l_pid = tswap32(fl.l_pid);
7716 unlock_user_struct(target_efl, arg3, 1);
7717 } else
7718 #endif
7720 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7721 goto efault;
7722 target_fl->l_type = tswap16(fl.l_type);
7723 target_fl->l_whence = tswap16(fl.l_whence);
7724 target_fl->l_start = tswap64(fl.l_start);
7725 target_fl->l_len = tswap64(fl.l_len);
7726 target_fl->l_pid = tswap32(fl.l_pid);
7727 unlock_user_struct(target_fl, arg3, 1);
7730 break;
7732 case TARGET_F_SETLK64:
7733 case TARGET_F_SETLKW64:
7734 #ifdef TARGET_ARM
7735 if (((CPUARMState *)cpu_env)->eabi) {
7736 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7737 goto efault;
7738 fl.l_type = tswap16(target_efl->l_type);
7739 fl.l_whence = tswap16(target_efl->l_whence);
7740 fl.l_start = tswap64(target_efl->l_start);
7741 fl.l_len = tswap64(target_efl->l_len);
7742 fl.l_pid = tswap32(target_efl->l_pid);
7743 unlock_user_struct(target_efl, arg3, 0);
7744 } else
7745 #endif
7747 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7748 goto efault;
7749 fl.l_type = tswap16(target_fl->l_type);
7750 fl.l_whence = tswap16(target_fl->l_whence);
7751 fl.l_start = tswap64(target_fl->l_start);
7752 fl.l_len = tswap64(target_fl->l_len);
7753 fl.l_pid = tswap32(target_fl->l_pid);
7754 unlock_user_struct(target_fl, arg3, 0);
7756 ret = get_errno(fcntl(arg1, cmd, &fl));
7757 break;
7758 default:
7759 ret = do_fcntl(arg1, arg2, arg3);
7760 break;
7762 break;
7764 #endif
7765 #ifdef TARGET_NR_cacheflush
7766 case TARGET_NR_cacheflush:
7767 /* self-modifying code is handled automatically, so nothing needed */
7768 ret = 0;
7769 break;
7770 #endif
7771 #ifdef TARGET_NR_security
7772 case TARGET_NR_security:
7773 goto unimplemented;
7774 #endif
7775 #ifdef TARGET_NR_getpagesize
7776 case TARGET_NR_getpagesize:
7777 ret = TARGET_PAGE_SIZE;
7778 break;
7779 #endif
7780 case TARGET_NR_gettid:
7781 ret = get_errno(gettid());
7782 break;
7783 #ifdef TARGET_NR_readahead
7784 case TARGET_NR_readahead:
7785 #if TARGET_ABI_BITS == 32
7786 if (regpairs_aligned(cpu_env)) {
7787 arg2 = arg3;
7788 arg3 = arg4;
7789 arg4 = arg5;
7791 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7792 #else
7793 ret = get_errno(readahead(arg1, arg2, arg3));
7794 #endif
7795 break;
7796 #endif
7797 #ifdef CONFIG_ATTR
7798 #ifdef TARGET_NR_setxattr
7799 case TARGET_NR_listxattr:
7800 case TARGET_NR_llistxattr:
7802 void *p, *b = 0;
7803 if (arg2) {
7804 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7805 if (!b) {
7806 ret = -TARGET_EFAULT;
7807 break;
7810 p = lock_user_string(arg1);
7811 if (p) {
7812 if (num == TARGET_NR_listxattr) {
7813 ret = get_errno(listxattr(p, b, arg3));
7814 } else {
7815 ret = get_errno(llistxattr(p, b, arg3));
7817 } else {
7818 ret = -TARGET_EFAULT;
7820 unlock_user(p, arg1, 0);
7821 unlock_user(b, arg2, arg3);
7822 break;
7824 case TARGET_NR_flistxattr:
7826 void *b = 0;
7827 if (arg2) {
7828 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7829 if (!b) {
7830 ret = -TARGET_EFAULT;
7831 break;
7834 ret = get_errno(flistxattr(arg1, b, arg3));
7835 unlock_user(b, arg2, arg3);
7836 break;
7838 case TARGET_NR_setxattr:
7839 case TARGET_NR_lsetxattr:
7841 void *p, *n, *v = 0;
7842 if (arg3) {
7843 v = lock_user(VERIFY_READ, arg3, arg4, 1);
7844 if (!v) {
7845 ret = -TARGET_EFAULT;
7846 break;
7849 p = lock_user_string(arg1);
7850 n = lock_user_string(arg2);
7851 if (p && n) {
7852 if (num == TARGET_NR_setxattr) {
7853 ret = get_errno(setxattr(p, n, v, arg4, arg5));
7854 } else {
7855 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
7857 } else {
7858 ret = -TARGET_EFAULT;
7860 unlock_user(p, arg1, 0);
7861 unlock_user(n, arg2, 0);
7862 unlock_user(v, arg3, 0);
7864 break;
7865 case TARGET_NR_fsetxattr:
7867 void *n, *v = 0;
7868 if (arg3) {
7869 v = lock_user(VERIFY_READ, arg3, arg4, 1);
7870 if (!v) {
7871 ret = -TARGET_EFAULT;
7872 break;
7875 n = lock_user_string(arg2);
7876 if (n) {
7877 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
7878 } else {
7879 ret = -TARGET_EFAULT;
7881 unlock_user(n, arg2, 0);
7882 unlock_user(v, arg3, 0);
7884 break;
7885 case TARGET_NR_getxattr:
7886 case TARGET_NR_lgetxattr:
7888 void *p, *n, *v = 0;
7889 if (arg3) {
7890 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7891 if (!v) {
7892 ret = -TARGET_EFAULT;
7893 break;
7896 p = lock_user_string(arg1);
7897 n = lock_user_string(arg2);
7898 if (p && n) {
7899 if (num == TARGET_NR_getxattr) {
7900 ret = get_errno(getxattr(p, n, v, arg4));
7901 } else {
7902 ret = get_errno(lgetxattr(p, n, v, arg4));
7904 } else {
7905 ret = -TARGET_EFAULT;
7907 unlock_user(p, arg1, 0);
7908 unlock_user(n, arg2, 0);
7909 unlock_user(v, arg3, arg4);
7911 break;
7912 case TARGET_NR_fgetxattr:
7914 void *n, *v = 0;
7915 if (arg3) {
7916 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7917 if (!v) {
7918 ret = -TARGET_EFAULT;
7919 break;
7922 n = lock_user_string(arg2);
7923 if (n) {
7924 ret = get_errno(fgetxattr(arg1, n, v, arg4));
7925 } else {
7926 ret = -TARGET_EFAULT;
7928 unlock_user(n, arg2, 0);
7929 unlock_user(v, arg3, arg4);
7931 break;
7932 case TARGET_NR_removexattr:
7933 case TARGET_NR_lremovexattr:
7935 void *p, *n;
7936 p = lock_user_string(arg1);
7937 n = lock_user_string(arg2);
7938 if (p && n) {
7939 if (num == TARGET_NR_removexattr) {
7940 ret = get_errno(removexattr(p, n));
7941 } else {
7942 ret = get_errno(lremovexattr(p, n));
7944 } else {
7945 ret = -TARGET_EFAULT;
7947 unlock_user(p, arg1, 0);
7948 unlock_user(n, arg2, 0);
7950 break;
7951 case TARGET_NR_fremovexattr:
7953 void *n;
7954 n = lock_user_string(arg2);
7955 if (n) {
7956 ret = get_errno(fremovexattr(arg1, n));
7957 } else {
7958 ret = -TARGET_EFAULT;
7960 unlock_user(n, arg2, 0);
7962 break;
7963 #endif
7964 #endif /* CONFIG_ATTR */
7965 #ifdef TARGET_NR_set_thread_area
7966 case TARGET_NR_set_thread_area:
7967 #if defined(TARGET_MIPS)
7968 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7969 ret = 0;
7970 break;
7971 #elif defined(TARGET_CRIS)
7972 if (arg1 & 0xff)
7973 ret = -TARGET_EINVAL;
7974 else {
7975 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7976 ret = 0;
7978 break;
7979 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7980 ret = do_set_thread_area(cpu_env, arg1);
7981 break;
7982 #else
7983 goto unimplemented_nowarn;
7984 #endif
7985 #endif
7986 #ifdef TARGET_NR_get_thread_area
7987 case TARGET_NR_get_thread_area:
7988 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7989 ret = do_get_thread_area(cpu_env, arg1);
7990 #else
7991 goto unimplemented_nowarn;
7992 #endif
7993 #endif
7994 #ifdef TARGET_NR_getdomainname
7995 case TARGET_NR_getdomainname:
7996 goto unimplemented_nowarn;
7997 #endif
7999 #ifdef TARGET_NR_clock_gettime
8000 case TARGET_NR_clock_gettime:
8002 struct timespec ts;
8003 ret = get_errno(clock_gettime(arg1, &ts));
8004 if (!is_error(ret)) {
8005 host_to_target_timespec(arg2, &ts);
8007 break;
8009 #endif
8010 #ifdef TARGET_NR_clock_getres
8011 case TARGET_NR_clock_getres:
8013 struct timespec ts;
8014 ret = get_errno(clock_getres(arg1, &ts));
8015 if (!is_error(ret)) {
8016 host_to_target_timespec(arg2, &ts);
8018 break;
8020 #endif
8021 #ifdef TARGET_NR_clock_nanosleep
8022 case TARGET_NR_clock_nanosleep:
8024 struct timespec ts;
8025 target_to_host_timespec(&ts, arg3);
8026 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8027 if (arg4)
8028 host_to_target_timespec(arg4, &ts);
8029 break;
8031 #endif
8033 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8034 case TARGET_NR_set_tid_address:
8035 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8036 break;
8037 #endif
8039 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8040 case TARGET_NR_tkill:
8041 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8042 break;
8043 #endif
8045 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8046 case TARGET_NR_tgkill:
8047 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8048 target_to_host_signal(arg3)));
8049 break;
8050 #endif
8052 #ifdef TARGET_NR_set_robust_list
8053 case TARGET_NR_set_robust_list:
8054 goto unimplemented_nowarn;
8055 #endif
8057 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8058 case TARGET_NR_utimensat:
8060 struct timespec *tsp, ts[2];
8061 if (!arg3) {
8062 tsp = NULL;
8063 } else {
8064 target_to_host_timespec(ts, arg3);
8065 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8066 tsp = ts;
8068 if (!arg2)
8069 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8070 else {
8071 if (!(p = lock_user_string(arg2))) {
8072 ret = -TARGET_EFAULT;
8073 goto fail;
8075 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8076 unlock_user(p, arg2, 0);
8079 break;
8080 #endif
8081 #if defined(CONFIG_USE_NPTL)
8082 case TARGET_NR_futex:
8083 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8084 break;
8085 #endif
8086 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8087 case TARGET_NR_inotify_init:
8088 ret = get_errno(sys_inotify_init());
8089 break;
8090 #endif
8091 #ifdef CONFIG_INOTIFY1
8092 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8093 case TARGET_NR_inotify_init1:
8094 ret = get_errno(sys_inotify_init1(arg1));
8095 break;
8096 #endif
8097 #endif
8098 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8099 case TARGET_NR_inotify_add_watch:
8100 p = lock_user_string(arg2);
8101 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8102 unlock_user(p, arg2, 0);
8103 break;
8104 #endif
8105 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8106 case TARGET_NR_inotify_rm_watch:
8107 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8108 break;
8109 #endif
8111 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8112 case TARGET_NR_mq_open:
8114 struct mq_attr posix_mq_attr;
8116 p = lock_user_string(arg1 - 1);
8117 if (arg4 != 0)
8118 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8119 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8120 unlock_user (p, arg1, 0);
8122 break;
8124 case TARGET_NR_mq_unlink:
8125 p = lock_user_string(arg1 - 1);
8126 ret = get_errno(mq_unlink(p));
8127 unlock_user (p, arg1, 0);
8128 break;
8130 case TARGET_NR_mq_timedsend:
8132 struct timespec ts;
8134 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8135 if (arg5 != 0) {
8136 target_to_host_timespec(&ts, arg5);
8137 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8138 host_to_target_timespec(arg5, &ts);
8140 else
8141 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8142 unlock_user (p, arg2, arg3);
8144 break;
8146 case TARGET_NR_mq_timedreceive:
8148 struct timespec ts;
8149 unsigned int prio;
8151 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8152 if (arg5 != 0) {
8153 target_to_host_timespec(&ts, arg5);
8154 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8155 host_to_target_timespec(arg5, &ts);
8157 else
8158 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8159 unlock_user (p, arg2, arg3);
8160 if (arg4 != 0)
8161 put_user_u32(prio, arg4);
8163 break;
8165 /* Not implemented for now... */
8166 /* case TARGET_NR_mq_notify: */
8167 /* break; */
8169 case TARGET_NR_mq_getsetattr:
8171 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8172 ret = 0;
8173 if (arg3 != 0) {
8174 ret = mq_getattr(arg1, &posix_mq_attr_out);
8175 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8177 if (arg2 != 0) {
8178 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8179 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8183 break;
8184 #endif
8186 #ifdef CONFIG_SPLICE
8187 #ifdef TARGET_NR_tee
8188 case TARGET_NR_tee:
8190 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8192 break;
8193 #endif
8194 #ifdef TARGET_NR_splice
8195 case TARGET_NR_splice:
8197 loff_t loff_in, loff_out;
8198 loff_t *ploff_in = NULL, *ploff_out = NULL;
8199 if(arg2) {
8200 get_user_u64(loff_in, arg2);
8201 ploff_in = &loff_in;
8203 if(arg4) {
8204 get_user_u64(loff_out, arg2);
8205 ploff_out = &loff_out;
8207 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8209 break;
8210 #endif
8211 #ifdef TARGET_NR_vmsplice
8212 case TARGET_NR_vmsplice:
8214 int count = arg3;
8215 struct iovec *vec;
8217 vec = alloca(count * sizeof(struct iovec));
8218 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8219 goto efault;
8220 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8221 unlock_iovec(vec, arg2, count, 0);
8223 break;
8224 #endif
8225 #endif /* CONFIG_SPLICE */
8226 #ifdef CONFIG_EVENTFD
8227 #if defined(TARGET_NR_eventfd)
8228 case TARGET_NR_eventfd:
8229 ret = get_errno(eventfd(arg1, 0));
8230 break;
8231 #endif
8232 #if defined(TARGET_NR_eventfd2)
8233 case TARGET_NR_eventfd2:
8234 ret = get_errno(eventfd(arg1, arg2));
8235 break;
8236 #endif
8237 #endif /* CONFIG_EVENTFD */
8238 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8239 case TARGET_NR_fallocate:
8240 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8241 break;
8242 #endif
8243 #if defined(CONFIG_SYNC_FILE_RANGE)
8244 #if defined(TARGET_NR_sync_file_range)
8245 case TARGET_NR_sync_file_range:
8246 #if TARGET_ABI_BITS == 32
8247 #if defined(TARGET_MIPS)
8248 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8249 target_offset64(arg5, arg6), arg7));
8250 #else
8251 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8252 target_offset64(arg4, arg5), arg6));
8253 #endif /* !TARGET_MIPS */
8254 #else
8255 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8256 #endif
8257 break;
8258 #endif
8259 #if defined(TARGET_NR_sync_file_range2)
8260 case TARGET_NR_sync_file_range2:
8261 /* This is like sync_file_range but the arguments are reordered */
8262 #if TARGET_ABI_BITS == 32
8263 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8264 target_offset64(arg5, arg6), arg2));
8265 #else
8266 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8267 #endif
8268 break;
8269 #endif
8270 #endif
8271 #if defined(CONFIG_EPOLL)
8272 #if defined(TARGET_NR_epoll_create)
8273 case TARGET_NR_epoll_create:
8274 ret = get_errno(epoll_create(arg1));
8275 break;
8276 #endif
8277 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8278 case TARGET_NR_epoll_create1:
8279 ret = get_errno(epoll_create1(arg1));
8280 break;
8281 #endif
8282 #if defined(TARGET_NR_epoll_ctl)
8283 case TARGET_NR_epoll_ctl:
8285 struct epoll_event ep;
8286 struct epoll_event *epp = 0;
8287 if (arg4) {
8288 struct target_epoll_event *target_ep;
8289 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8290 goto efault;
8292 ep.events = tswap32(target_ep->events);
8293 /* The epoll_data_t union is just opaque data to the kernel,
8294 * so we transfer all 64 bits across and need not worry what
8295 * actual data type it is.
8297 ep.data.u64 = tswap64(target_ep->data.u64);
8298 unlock_user_struct(target_ep, arg4, 0);
8299 epp = &ep;
8301 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8302 break;
8304 #endif
8306 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8307 #define IMPLEMENT_EPOLL_PWAIT
8308 #endif
8309 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8310 #if defined(TARGET_NR_epoll_wait)
8311 case TARGET_NR_epoll_wait:
8312 #endif
8313 #if defined(IMPLEMENT_EPOLL_PWAIT)
8314 case TARGET_NR_epoll_pwait:
8315 #endif
8317 struct target_epoll_event *target_ep;
8318 struct epoll_event *ep;
8319 int epfd = arg1;
8320 int maxevents = arg3;
8321 int timeout = arg4;
8323 target_ep = lock_user(VERIFY_WRITE, arg2,
8324 maxevents * sizeof(struct target_epoll_event), 1);
8325 if (!target_ep) {
8326 goto efault;
8329 ep = alloca(maxevents * sizeof(struct epoll_event));
8331 switch (num) {
8332 #if defined(IMPLEMENT_EPOLL_PWAIT)
8333 case TARGET_NR_epoll_pwait:
8335 target_sigset_t *target_set;
8336 sigset_t _set, *set = &_set;
8338 if (arg5) {
8339 target_set = lock_user(VERIFY_READ, arg5,
8340 sizeof(target_sigset_t), 1);
8341 if (!target_set) {
8342 unlock_user(target_ep, arg2, 0);
8343 goto efault;
8345 target_to_host_sigset(set, target_set);
8346 unlock_user(target_set, arg5, 0);
8347 } else {
8348 set = NULL;
8351 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8352 break;
8354 #endif
8355 #if defined(TARGET_NR_epoll_wait)
8356 case TARGET_NR_epoll_wait:
8357 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8358 break;
8359 #endif
8360 default:
8361 ret = -TARGET_ENOSYS;
8363 if (!is_error(ret)) {
8364 int i;
8365 for (i = 0; i < ret; i++) {
8366 target_ep[i].events = tswap32(ep[i].events);
8367 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8370 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8371 break;
8373 #endif
8374 #endif
8375 #ifdef TARGET_NR_prlimit64
8376 case TARGET_NR_prlimit64:
8378 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8379 struct target_rlimit64 *target_rnew, *target_rold;
8380 struct host_rlimit64 rnew, rold, *rnewp = 0;
8381 if (arg3) {
8382 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8383 goto efault;
8385 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8386 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8387 unlock_user_struct(target_rnew, arg3, 0);
8388 rnewp = &rnew;
8391 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8392 if (!is_error(ret) && arg4) {
8393 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8394 goto efault;
8396 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8397 target_rold->rlim_max = tswap64(rold.rlim_max);
8398 unlock_user_struct(target_rold, arg4, 1);
8400 break;
8402 #endif
8403 default:
8404 unimplemented:
8405 gemu_log("qemu: Unsupported syscall: %d\n", num);
8406 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8407 unimplemented_nowarn:
8408 #endif
8409 ret = -TARGET_ENOSYS;
8410 break;
8412 fail:
8413 #ifdef DEBUG
8414 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8415 #endif
8416 if(do_strace)
8417 print_syscall_ret(num, ret);
8418 return ret;
8419 efault:
8420 ret = -TARGET_EFAULT;
8421 goto fail;