qemu-ga: use state dir from CONFIG_QEMU_LOCALSTATEDIR
[qemu/ar7.git] / linux-user / syscall.c
blob471d0605f7a5b8ba2471a6a6678986d5a01194e0
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include "qemu-common.h"
65 #ifdef TARGET_GPROF
66 #include <sys/gmon.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu-xattr.h"
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/utsname.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include "linux_loop.h"
101 #include "cpu-uname.h"
103 #include "qemu.h"
105 #if defined(CONFIG_USE_NPTL)
106 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
107 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
108 #else
109 /* XXX: Hardcode the above values. */
110 #define CLONE_NPTL_FLAGS2 0
111 #endif
113 //#define DEBUG
115 //#include <linux/msdos_fs.h>
116 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
117 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
120 #undef _syscall0
121 #undef _syscall1
122 #undef _syscall2
123 #undef _syscall3
124 #undef _syscall4
125 #undef _syscall5
126 #undef _syscall6
128 #define _syscall0(type,name) \
129 static type name (void) \
131 return syscall(__NR_##name); \
134 #define _syscall1(type,name,type1,arg1) \
135 static type name (type1 arg1) \
137 return syscall(__NR_##name, arg1); \
140 #define _syscall2(type,name,type1,arg1,type2,arg2) \
141 static type name (type1 arg1,type2 arg2) \
143 return syscall(__NR_##name, arg1, arg2); \
146 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
147 static type name (type1 arg1,type2 arg2,type3 arg3) \
149 return syscall(__NR_##name, arg1, arg2, arg3); \
152 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
153 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
155 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
158 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
159 type5,arg5) \
160 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
162 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
166 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5,type6,arg6) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
169 type6 arg6) \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
175 #define __NR_sys_uname __NR_uname
176 #define __NR_sys_faccessat __NR_faccessat
177 #define __NR_sys_fchmodat __NR_fchmodat
178 #define __NR_sys_fchownat __NR_fchownat
179 #define __NR_sys_fstatat64 __NR_fstatat64
180 #define __NR_sys_futimesat __NR_futimesat
181 #define __NR_sys_getcwd1 __NR_getcwd
182 #define __NR_sys_getdents __NR_getdents
183 #define __NR_sys_getdents64 __NR_getdents64
184 #define __NR_sys_getpriority __NR_getpriority
185 #define __NR_sys_linkat __NR_linkat
186 #define __NR_sys_mkdirat __NR_mkdirat
187 #define __NR_sys_mknodat __NR_mknodat
188 #define __NR_sys_newfstatat __NR_newfstatat
189 #define __NR_sys_openat __NR_openat
190 #define __NR_sys_readlinkat __NR_readlinkat
191 #define __NR_sys_renameat __NR_renameat
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_symlinkat __NR_symlinkat
194 #define __NR_sys_syslog __NR_syslog
195 #define __NR_sys_tgkill __NR_tgkill
196 #define __NR_sys_tkill __NR_tkill
197 #define __NR_sys_unlinkat __NR_unlinkat
198 #define __NR_sys_utimensat __NR_utimensat
199 #define __NR_sys_futex __NR_futex
200 #define __NR_sys_inotify_init __NR_inotify_init
201 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
202 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
204 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
205 defined(__s390x__)
206 #define __NR__llseek __NR_lseek
207 #endif
209 #ifdef __NR_gettid
210 _syscall0(int, gettid)
211 #else
212 /* This is a replacement for the host gettid() and must return a host
213 errno. */
214 static int gettid(void) {
215 return -ENOSYS;
217 #endif
218 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
219 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
220 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
221 #endif
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
224 loff_t *, res, uint, wh);
225 #endif
226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
230 #endif
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill,int,tid,int,sig)
233 #endif
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group,int,error_code)
236 #endif
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address,int *,tidptr)
239 #endif
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #endif
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
253 void *, arg);
255 static bitmask_transtbl fcntl_flags_tbl[] = {
256 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
257 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
258 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
259 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
260 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
261 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
262 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
263 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
264 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
265 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
266 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
267 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
268 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
271 #endif
272 #if defined(O_NOATIME)
273 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
274 #endif
275 #if defined(O_CLOEXEC)
276 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
277 #endif
278 #if defined(O_PATH)
279 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
280 #endif
281 /* Don't terminate the list prematurely on 64-bit host+guest. */
282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
283 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
284 #endif
285 { 0, 0, 0, 0 }
288 #define COPY_UTSNAME_FIELD(dest, src) \
289 do { \
290 /* __NEW_UTS_LEN doesn't include terminating null */ \
291 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
292 (dest)[__NEW_UTS_LEN] = '\0'; \
293 } while (0)
295 static int sys_uname(struct new_utsname *buf)
297 struct utsname uts_buf;
299 if (uname(&uts_buf) < 0)
300 return (-1);
303 * Just in case these have some differences, we
304 * translate utsname to new_utsname (which is the
305 * struct linux kernel uses).
308 memset(buf, 0, sizeof(*buf));
309 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
310 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
311 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
312 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
313 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
314 #ifdef _GNU_SOURCE
315 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
316 #endif
317 return (0);
319 #undef COPY_UTSNAME_FIELD
322 static int sys_getcwd1(char *buf, size_t size)
324 if (getcwd(buf, size) == NULL) {
325 /* getcwd() sets errno */
326 return (-1);
328 return strlen(buf)+1;
331 #ifdef CONFIG_ATFILE
333 * Host system seems to have atfile syscall stubs available. We
334 * now enable them one by one as specified by target syscall_nr.h.
337 #ifdef TARGET_NR_faccessat
338 static int sys_faccessat(int dirfd, const char *pathname, int mode)
340 return (faccessat(dirfd, pathname, mode, 0));
342 #endif
343 #ifdef TARGET_NR_fchmodat
344 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
346 return (fchmodat(dirfd, pathname, mode, 0));
348 #endif
349 #if defined(TARGET_NR_fchownat)
350 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
351 gid_t group, int flags)
353 return (fchownat(dirfd, pathname, owner, group, flags));
355 #endif
356 #ifdef __NR_fstatat64
357 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
358 int flags)
360 return (fstatat(dirfd, pathname, buf, flags));
362 #endif
363 #ifdef __NR_newfstatat
364 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
365 int flags)
367 return (fstatat(dirfd, pathname, buf, flags));
369 #endif
370 #ifdef TARGET_NR_futimesat
371 static int sys_futimesat(int dirfd, const char *pathname,
372 const struct timeval times[2])
374 return (futimesat(dirfd, pathname, times));
376 #endif
377 #ifdef TARGET_NR_linkat
378 static int sys_linkat(int olddirfd, const char *oldpath,
379 int newdirfd, const char *newpath, int flags)
381 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
383 #endif
384 #ifdef TARGET_NR_mkdirat
385 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
387 return (mkdirat(dirfd, pathname, mode));
389 #endif
390 #ifdef TARGET_NR_mknodat
391 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
392 dev_t dev)
394 return (mknodat(dirfd, pathname, mode, dev));
396 #endif
397 #ifdef TARGET_NR_openat
398 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
401 * open(2) has extra parameter 'mode' when called with
402 * flag O_CREAT.
404 if ((flags & O_CREAT) != 0) {
405 return (openat(dirfd, pathname, flags, mode));
407 return (openat(dirfd, pathname, flags));
409 #endif
410 #ifdef TARGET_NR_readlinkat
411 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
413 return (readlinkat(dirfd, pathname, buf, bufsiz));
415 #endif
416 #ifdef TARGET_NR_renameat
417 static int sys_renameat(int olddirfd, const char *oldpath,
418 int newdirfd, const char *newpath)
420 return (renameat(olddirfd, oldpath, newdirfd, newpath));
422 #endif
423 #ifdef TARGET_NR_symlinkat
424 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
426 return (symlinkat(oldpath, newdirfd, newpath));
428 #endif
429 #ifdef TARGET_NR_unlinkat
430 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
432 return (unlinkat(dirfd, pathname, flags));
434 #endif
435 #else /* !CONFIG_ATFILE */
438 * Try direct syscalls instead
440 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
441 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
442 #endif
443 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
444 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
445 #endif
446 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
447 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
448 uid_t,owner,gid_t,group,int,flags)
449 #endif
450 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
451 defined(__NR_fstatat64)
452 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
453 struct stat *,buf,int,flags)
454 #endif
455 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
456 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
457 const struct timeval *,times)
458 #endif
459 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
460 defined(__NR_newfstatat)
461 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
462 struct stat *,buf,int,flags)
463 #endif
464 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
465 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
466 int,newdirfd,const char *,newpath,int,flags)
467 #endif
468 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
469 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
470 #endif
471 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
472 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
473 mode_t,mode,dev_t,dev)
474 #endif
475 #if defined(TARGET_NR_openat) && defined(__NR_openat)
476 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
477 #endif
478 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
479 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
480 char *,buf,size_t,bufsize)
481 #endif
482 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
483 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
484 int,newdirfd,const char *,newpath)
485 #endif
486 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
487 _syscall3(int,sys_symlinkat,const char *,oldpath,
488 int,newdirfd,const char *,newpath)
489 #endif
490 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
491 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
492 #endif
494 #endif /* CONFIG_ATFILE */
496 #ifdef CONFIG_UTIMENSAT
497 static int sys_utimensat(int dirfd, const char *pathname,
498 const struct timespec times[2], int flags)
500 if (pathname == NULL)
501 return futimens(dirfd, times);
502 else
503 return utimensat(dirfd, pathname, times, flags);
505 #else
506 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
507 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
508 const struct timespec *,tsp,int,flags)
509 #endif
510 #endif /* CONFIG_UTIMENSAT */
512 #ifdef CONFIG_INOTIFY
513 #include <sys/inotify.h>
515 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
516 static int sys_inotify_init(void)
518 return (inotify_init());
520 #endif
521 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
522 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
524 return (inotify_add_watch(fd, pathname, mask));
526 #endif
527 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
528 static int sys_inotify_rm_watch(int fd, int32_t wd)
530 return (inotify_rm_watch(fd, wd));
532 #endif
533 #ifdef CONFIG_INOTIFY1
534 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
535 static int sys_inotify_init1(int flags)
537 return (inotify_init1(flags));
539 #endif
540 #endif
541 #else
542 /* Userspace can usually survive runtime without inotify */
543 #undef TARGET_NR_inotify_init
544 #undef TARGET_NR_inotify_init1
545 #undef TARGET_NR_inotify_add_watch
546 #undef TARGET_NR_inotify_rm_watch
547 #endif /* CONFIG_INOTIFY */
549 #if defined(TARGET_NR_ppoll)
550 #ifndef __NR_ppoll
551 # define __NR_ppoll -1
552 #endif
553 #define __NR_sys_ppoll __NR_ppoll
554 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
555 struct timespec *, timeout, const __sigset_t *, sigmask,
556 size_t, sigsetsize)
557 #endif
559 #if defined(TARGET_NR_pselect6)
560 #ifndef __NR_pselect6
561 # define __NR_pselect6 -1
562 #endif
563 #define __NR_sys_pselect6 __NR_pselect6
564 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
565 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
566 #endif
568 #if defined(TARGET_NR_prlimit64)
569 #ifndef __NR_prlimit64
570 # define __NR_prlimit64 -1
571 #endif
572 #define __NR_sys_prlimit64 __NR_prlimit64
573 /* The glibc rlimit structure may not be that used by the underlying syscall */
574 struct host_rlimit64 {
575 uint64_t rlim_cur;
576 uint64_t rlim_max;
578 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
579 const struct host_rlimit64 *, new_limit,
580 struct host_rlimit64 *, old_limit)
581 #endif
583 extern int personality(int);
584 extern int flock(int, int);
585 extern int setfsuid(int);
586 extern int setfsgid(int);
587 extern int setgroups(int, gid_t *);
589 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
590 #ifdef TARGET_ARM
591 static inline int regpairs_aligned(void *cpu_env) {
592 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
594 #elif defined(TARGET_MIPS)
595 static inline int regpairs_aligned(void *cpu_env) { return 1; }
596 #else
597 static inline int regpairs_aligned(void *cpu_env) { return 0; }
598 #endif
600 #define ERRNO_TABLE_SIZE 1200
602 /* target_to_host_errno_table[] is initialized from
603 * host_to_target_errno_table[] in syscall_init(). */
604 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
608 * This list is the union of errno values overridden in asm-<arch>/errno.h
609 * minus the errnos that are not actually generic to all archs.
611 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
612 [EIDRM] = TARGET_EIDRM,
613 [ECHRNG] = TARGET_ECHRNG,
614 [EL2NSYNC] = TARGET_EL2NSYNC,
615 [EL3HLT] = TARGET_EL3HLT,
616 [EL3RST] = TARGET_EL3RST,
617 [ELNRNG] = TARGET_ELNRNG,
618 [EUNATCH] = TARGET_EUNATCH,
619 [ENOCSI] = TARGET_ENOCSI,
620 [EL2HLT] = TARGET_EL2HLT,
621 [EDEADLK] = TARGET_EDEADLK,
622 [ENOLCK] = TARGET_ENOLCK,
623 [EBADE] = TARGET_EBADE,
624 [EBADR] = TARGET_EBADR,
625 [EXFULL] = TARGET_EXFULL,
626 [ENOANO] = TARGET_ENOANO,
627 [EBADRQC] = TARGET_EBADRQC,
628 [EBADSLT] = TARGET_EBADSLT,
629 [EBFONT] = TARGET_EBFONT,
630 [ENOSTR] = TARGET_ENOSTR,
631 [ENODATA] = TARGET_ENODATA,
632 [ETIME] = TARGET_ETIME,
633 [ENOSR] = TARGET_ENOSR,
634 [ENONET] = TARGET_ENONET,
635 [ENOPKG] = TARGET_ENOPKG,
636 [EREMOTE] = TARGET_EREMOTE,
637 [ENOLINK] = TARGET_ENOLINK,
638 [EADV] = TARGET_EADV,
639 [ESRMNT] = TARGET_ESRMNT,
640 [ECOMM] = TARGET_ECOMM,
641 [EPROTO] = TARGET_EPROTO,
642 [EDOTDOT] = TARGET_EDOTDOT,
643 [EMULTIHOP] = TARGET_EMULTIHOP,
644 [EBADMSG] = TARGET_EBADMSG,
645 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
646 [EOVERFLOW] = TARGET_EOVERFLOW,
647 [ENOTUNIQ] = TARGET_ENOTUNIQ,
648 [EBADFD] = TARGET_EBADFD,
649 [EREMCHG] = TARGET_EREMCHG,
650 [ELIBACC] = TARGET_ELIBACC,
651 [ELIBBAD] = TARGET_ELIBBAD,
652 [ELIBSCN] = TARGET_ELIBSCN,
653 [ELIBMAX] = TARGET_ELIBMAX,
654 [ELIBEXEC] = TARGET_ELIBEXEC,
655 [EILSEQ] = TARGET_EILSEQ,
656 [ENOSYS] = TARGET_ENOSYS,
657 [ELOOP] = TARGET_ELOOP,
658 [ERESTART] = TARGET_ERESTART,
659 [ESTRPIPE] = TARGET_ESTRPIPE,
660 [ENOTEMPTY] = TARGET_ENOTEMPTY,
661 [EUSERS] = TARGET_EUSERS,
662 [ENOTSOCK] = TARGET_ENOTSOCK,
663 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
664 [EMSGSIZE] = TARGET_EMSGSIZE,
665 [EPROTOTYPE] = TARGET_EPROTOTYPE,
666 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
667 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
668 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
669 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
670 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
671 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
672 [EADDRINUSE] = TARGET_EADDRINUSE,
673 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
674 [ENETDOWN] = TARGET_ENETDOWN,
675 [ENETUNREACH] = TARGET_ENETUNREACH,
676 [ENETRESET] = TARGET_ENETRESET,
677 [ECONNABORTED] = TARGET_ECONNABORTED,
678 [ECONNRESET] = TARGET_ECONNRESET,
679 [ENOBUFS] = TARGET_ENOBUFS,
680 [EISCONN] = TARGET_EISCONN,
681 [ENOTCONN] = TARGET_ENOTCONN,
682 [EUCLEAN] = TARGET_EUCLEAN,
683 [ENOTNAM] = TARGET_ENOTNAM,
684 [ENAVAIL] = TARGET_ENAVAIL,
685 [EISNAM] = TARGET_EISNAM,
686 [EREMOTEIO] = TARGET_EREMOTEIO,
687 [ESHUTDOWN] = TARGET_ESHUTDOWN,
688 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
689 [ETIMEDOUT] = TARGET_ETIMEDOUT,
690 [ECONNREFUSED] = TARGET_ECONNREFUSED,
691 [EHOSTDOWN] = TARGET_EHOSTDOWN,
692 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
693 [EALREADY] = TARGET_EALREADY,
694 [EINPROGRESS] = TARGET_EINPROGRESS,
695 [ESTALE] = TARGET_ESTALE,
696 [ECANCELED] = TARGET_ECANCELED,
697 [ENOMEDIUM] = TARGET_ENOMEDIUM,
698 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
699 #ifdef ENOKEY
700 [ENOKEY] = TARGET_ENOKEY,
701 #endif
702 #ifdef EKEYEXPIRED
703 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
704 #endif
705 #ifdef EKEYREVOKED
706 [EKEYREVOKED] = TARGET_EKEYREVOKED,
707 #endif
708 #ifdef EKEYREJECTED
709 [EKEYREJECTED] = TARGET_EKEYREJECTED,
710 #endif
711 #ifdef EOWNERDEAD
712 [EOWNERDEAD] = TARGET_EOWNERDEAD,
713 #endif
714 #ifdef ENOTRECOVERABLE
715 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
716 #endif
719 static inline int host_to_target_errno(int err)
721 if(host_to_target_errno_table[err])
722 return host_to_target_errno_table[err];
723 return err;
726 static inline int target_to_host_errno(int err)
728 if (target_to_host_errno_table[err])
729 return target_to_host_errno_table[err];
730 return err;
733 static inline abi_long get_errno(abi_long ret)
735 if (ret == -1)
736 return -host_to_target_errno(errno);
737 else
738 return ret;
741 static inline int is_error(abi_long ret)
743 return (abi_ulong)ret >= (abi_ulong)(-4096);
746 char *target_strerror(int err)
748 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
749 return NULL;
751 return strerror(target_to_host_errno(err));
754 static abi_ulong target_brk;
755 static abi_ulong target_original_brk;
756 static abi_ulong brk_page;
758 void target_set_brk(abi_ulong new_brk)
760 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
761 brk_page = HOST_PAGE_ALIGN(target_brk);
764 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
765 #define DEBUGF_BRK(message, args...)
767 /* do_brk() must return target values and target errnos. */
768 abi_long do_brk(abi_ulong new_brk)
770 abi_long mapped_addr;
771 int new_alloc_size;
773 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
775 if (!new_brk) {
776 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
777 return target_brk;
779 if (new_brk < target_original_brk) {
780 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
781 target_brk);
782 return target_brk;
785 /* If the new brk is less than the highest page reserved to the
786 * target heap allocation, set it and we're almost done... */
787 if (new_brk <= brk_page) {
788 /* Heap contents are initialized to zero, as for anonymous
789 * mapped pages. */
790 if (new_brk > target_brk) {
791 memset(g2h(target_brk), 0, new_brk - target_brk);
793 target_brk = new_brk;
794 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
795 return target_brk;
798 /* We need to allocate more memory after the brk... Note that
799 * we don't use MAP_FIXED because that will map over the top of
800 * any existing mapping (like the one with the host libc or qemu
801 * itself); instead we treat "mapped but at wrong address" as
802 * a failure and unmap again.
804 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
805 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
806 PROT_READ|PROT_WRITE,
807 MAP_ANON|MAP_PRIVATE, 0, 0));
809 if (mapped_addr == brk_page) {
810 /* Heap contents are initialized to zero, as for anonymous
811 * mapped pages. Technically the new pages are already
812 * initialized to zero since they *are* anonymous mapped
813 * pages, however we have to take care with the contents that
814 * come from the remaining part of the previous page: it may
815 * contains garbage data due to a previous heap usage (grown
816 * then shrunken). */
817 memset(g2h(target_brk), 0, brk_page - target_brk);
819 target_brk = new_brk;
820 brk_page = HOST_PAGE_ALIGN(target_brk);
821 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
822 target_brk);
823 return target_brk;
824 } else if (mapped_addr != -1) {
825 /* Mapped but at wrong address, meaning there wasn't actually
826 * enough space for this brk.
828 target_munmap(mapped_addr, new_alloc_size);
829 mapped_addr = -1;
830 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
832 else {
833 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
836 #if defined(TARGET_ALPHA)
837 /* We (partially) emulate OSF/1 on Alpha, which requires we
838 return a proper errno, not an unchanged brk value. */
839 return -TARGET_ENOMEM;
840 #endif
841 /* For everything else, return the previous break. */
842 return target_brk;
845 static inline abi_long copy_from_user_fdset(fd_set *fds,
846 abi_ulong target_fds_addr,
847 int n)
849 int i, nw, j, k;
850 abi_ulong b, *target_fds;
852 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
853 if (!(target_fds = lock_user(VERIFY_READ,
854 target_fds_addr,
855 sizeof(abi_ulong) * nw,
856 1)))
857 return -TARGET_EFAULT;
859 FD_ZERO(fds);
860 k = 0;
861 for (i = 0; i < nw; i++) {
862 /* grab the abi_ulong */
863 __get_user(b, &target_fds[i]);
864 for (j = 0; j < TARGET_ABI_BITS; j++) {
865 /* check the bit inside the abi_ulong */
866 if ((b >> j) & 1)
867 FD_SET(k, fds);
868 k++;
872 unlock_user(target_fds, target_fds_addr, 0);
874 return 0;
877 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
878 abi_ulong target_fds_addr,
879 int n)
881 if (target_fds_addr) {
882 if (copy_from_user_fdset(fds, target_fds_addr, n))
883 return -TARGET_EFAULT;
884 *fds_ptr = fds;
885 } else {
886 *fds_ptr = NULL;
888 return 0;
891 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
892 const fd_set *fds,
893 int n)
895 int i, nw, j, k;
896 abi_long v;
897 abi_ulong *target_fds;
899 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
900 if (!(target_fds = lock_user(VERIFY_WRITE,
901 target_fds_addr,
902 sizeof(abi_ulong) * nw,
903 0)))
904 return -TARGET_EFAULT;
906 k = 0;
907 for (i = 0; i < nw; i++) {
908 v = 0;
909 for (j = 0; j < TARGET_ABI_BITS; j++) {
910 v |= ((FD_ISSET(k, fds) != 0) << j);
911 k++;
913 __put_user(v, &target_fds[i]);
916 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
918 return 0;
921 #if defined(__alpha__)
922 #define HOST_HZ 1024
923 #else
924 #define HOST_HZ 100
925 #endif
927 static inline abi_long host_to_target_clock_t(long ticks)
929 #if HOST_HZ == TARGET_HZ
930 return ticks;
931 #else
932 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
933 #endif
936 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
937 const struct rusage *rusage)
939 struct target_rusage *target_rusage;
941 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
942 return -TARGET_EFAULT;
943 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
944 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
945 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
946 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
947 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
948 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
949 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
950 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
951 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
952 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
953 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
954 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
955 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
956 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
957 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
958 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
959 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
960 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
961 unlock_user_struct(target_rusage, target_addr, 1);
963 return 0;
966 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
968 abi_ulong target_rlim_swap;
969 rlim_t result;
971 target_rlim_swap = tswapal(target_rlim);
972 if (target_rlim_swap == TARGET_RLIM_INFINITY)
973 return RLIM_INFINITY;
975 result = target_rlim_swap;
976 if (target_rlim_swap != (rlim_t)result)
977 return RLIM_INFINITY;
979 return result;
982 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
984 abi_ulong target_rlim_swap;
985 abi_ulong result;
987 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
988 target_rlim_swap = TARGET_RLIM_INFINITY;
989 else
990 target_rlim_swap = rlim;
991 result = tswapal(target_rlim_swap);
993 return result;
996 static inline int target_to_host_resource(int code)
998 switch (code) {
999 case TARGET_RLIMIT_AS:
1000 return RLIMIT_AS;
1001 case TARGET_RLIMIT_CORE:
1002 return RLIMIT_CORE;
1003 case TARGET_RLIMIT_CPU:
1004 return RLIMIT_CPU;
1005 case TARGET_RLIMIT_DATA:
1006 return RLIMIT_DATA;
1007 case TARGET_RLIMIT_FSIZE:
1008 return RLIMIT_FSIZE;
1009 case TARGET_RLIMIT_LOCKS:
1010 return RLIMIT_LOCKS;
1011 case TARGET_RLIMIT_MEMLOCK:
1012 return RLIMIT_MEMLOCK;
1013 case TARGET_RLIMIT_MSGQUEUE:
1014 return RLIMIT_MSGQUEUE;
1015 case TARGET_RLIMIT_NICE:
1016 return RLIMIT_NICE;
1017 case TARGET_RLIMIT_NOFILE:
1018 return RLIMIT_NOFILE;
1019 case TARGET_RLIMIT_NPROC:
1020 return RLIMIT_NPROC;
1021 case TARGET_RLIMIT_RSS:
1022 return RLIMIT_RSS;
1023 case TARGET_RLIMIT_RTPRIO:
1024 return RLIMIT_RTPRIO;
1025 case TARGET_RLIMIT_SIGPENDING:
1026 return RLIMIT_SIGPENDING;
1027 case TARGET_RLIMIT_STACK:
1028 return RLIMIT_STACK;
1029 default:
1030 return code;
1034 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1035 abi_ulong target_tv_addr)
1037 struct target_timeval *target_tv;
1039 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1040 return -TARGET_EFAULT;
1042 __get_user(tv->tv_sec, &target_tv->tv_sec);
1043 __get_user(tv->tv_usec, &target_tv->tv_usec);
1045 unlock_user_struct(target_tv, target_tv_addr, 0);
1047 return 0;
1050 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1051 const struct timeval *tv)
1053 struct target_timeval *target_tv;
1055 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1056 return -TARGET_EFAULT;
1058 __put_user(tv->tv_sec, &target_tv->tv_sec);
1059 __put_user(tv->tv_usec, &target_tv->tv_usec);
1061 unlock_user_struct(target_tv, target_tv_addr, 1);
1063 return 0;
1066 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1067 #include <mqueue.h>
1069 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1070 abi_ulong target_mq_attr_addr)
1072 struct target_mq_attr *target_mq_attr;
1074 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1075 target_mq_attr_addr, 1))
1076 return -TARGET_EFAULT;
1078 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1079 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1080 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1081 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1083 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1085 return 0;
1088 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1089 const struct mq_attr *attr)
1091 struct target_mq_attr *target_mq_attr;
1093 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1094 target_mq_attr_addr, 0))
1095 return -TARGET_EFAULT;
1097 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1098 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1099 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1100 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1102 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1104 return 0;
1106 #endif
1108 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1109 /* do_select() must return target values and target errnos. */
1110 static abi_long do_select(int n,
1111 abi_ulong rfd_addr, abi_ulong wfd_addr,
1112 abi_ulong efd_addr, abi_ulong target_tv_addr)
1114 fd_set rfds, wfds, efds;
1115 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1116 struct timeval tv, *tv_ptr;
1117 abi_long ret;
1119 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1120 if (ret) {
1121 return ret;
1123 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1124 if (ret) {
1125 return ret;
1127 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1128 if (ret) {
1129 return ret;
1132 if (target_tv_addr) {
1133 if (copy_from_user_timeval(&tv, target_tv_addr))
1134 return -TARGET_EFAULT;
1135 tv_ptr = &tv;
1136 } else {
1137 tv_ptr = NULL;
1140 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1142 if (!is_error(ret)) {
1143 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1144 return -TARGET_EFAULT;
1145 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1146 return -TARGET_EFAULT;
1147 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1148 return -TARGET_EFAULT;
1150 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1151 return -TARGET_EFAULT;
1154 return ret;
1156 #endif
1158 static abi_long do_pipe2(int host_pipe[], int flags)
1160 #ifdef CONFIG_PIPE2
1161 return pipe2(host_pipe, flags);
1162 #else
1163 return -ENOSYS;
1164 #endif
1167 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1168 int flags, int is_pipe2)
1170 int host_pipe[2];
1171 abi_long ret;
1172 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1174 if (is_error(ret))
1175 return get_errno(ret);
1177 /* Several targets have special calling conventions for the original
1178 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1179 if (!is_pipe2) {
1180 #if defined(TARGET_ALPHA)
1181 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1182 return host_pipe[0];
1183 #elif defined(TARGET_MIPS)
1184 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1185 return host_pipe[0];
1186 #elif defined(TARGET_SH4)
1187 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1188 return host_pipe[0];
1189 #endif
1192 if (put_user_s32(host_pipe[0], pipedes)
1193 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1194 return -TARGET_EFAULT;
1195 return get_errno(ret);
1198 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1199 abi_ulong target_addr,
1200 socklen_t len)
1202 struct target_ip_mreqn *target_smreqn;
1204 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1205 if (!target_smreqn)
1206 return -TARGET_EFAULT;
1207 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1208 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1209 if (len == sizeof(struct target_ip_mreqn))
1210 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1211 unlock_user(target_smreqn, target_addr, 0);
1213 return 0;
1216 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1217 abi_ulong target_addr,
1218 socklen_t len)
1220 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1221 sa_family_t sa_family;
1222 struct target_sockaddr *target_saddr;
1224 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1225 if (!target_saddr)
1226 return -TARGET_EFAULT;
1228 sa_family = tswap16(target_saddr->sa_family);
1230 /* Oops. The caller might send a incomplete sun_path; sun_path
1231 * must be terminated by \0 (see the manual page), but
1232 * unfortunately it is quite common to specify sockaddr_un
1233 * length as "strlen(x->sun_path)" while it should be
1234 * "strlen(...) + 1". We'll fix that here if needed.
1235 * Linux kernel has a similar feature.
1238 if (sa_family == AF_UNIX) {
1239 if (len < unix_maxlen && len > 0) {
1240 char *cp = (char*)target_saddr;
1242 if ( cp[len-1] && !cp[len] )
1243 len++;
1245 if (len > unix_maxlen)
1246 len = unix_maxlen;
1249 memcpy(addr, target_saddr, len);
1250 addr->sa_family = sa_family;
1251 unlock_user(target_saddr, target_addr, 0);
1253 return 0;
1256 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1257 struct sockaddr *addr,
1258 socklen_t len)
1260 struct target_sockaddr *target_saddr;
1262 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1263 if (!target_saddr)
1264 return -TARGET_EFAULT;
1265 memcpy(target_saddr, addr, len);
1266 target_saddr->sa_family = tswap16(addr->sa_family);
1267 unlock_user(target_saddr, target_addr, len);
1269 return 0;
1272 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1273 struct target_msghdr *target_msgh)
1275 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1276 abi_long msg_controllen;
1277 abi_ulong target_cmsg_addr;
1278 struct target_cmsghdr *target_cmsg;
1279 socklen_t space = 0;
1281 msg_controllen = tswapal(target_msgh->msg_controllen);
1282 if (msg_controllen < sizeof (struct target_cmsghdr))
1283 goto the_end;
1284 target_cmsg_addr = tswapal(target_msgh->msg_control);
1285 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1286 if (!target_cmsg)
1287 return -TARGET_EFAULT;
1289 while (cmsg && target_cmsg) {
1290 void *data = CMSG_DATA(cmsg);
1291 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1293 int len = tswapal(target_cmsg->cmsg_len)
1294 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1296 space += CMSG_SPACE(len);
1297 if (space > msgh->msg_controllen) {
1298 space -= CMSG_SPACE(len);
1299 gemu_log("Host cmsg overflow\n");
1300 break;
1303 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1304 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1305 cmsg->cmsg_len = CMSG_LEN(len);
1307 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1308 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1309 memcpy(data, target_data, len);
1310 } else {
1311 int *fd = (int *)data;
1312 int *target_fd = (int *)target_data;
1313 int i, numfds = len / sizeof(int);
1315 for (i = 0; i < numfds; i++)
1316 fd[i] = tswap32(target_fd[i]);
1319 cmsg = CMSG_NXTHDR(msgh, cmsg);
1320 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1322 unlock_user(target_cmsg, target_cmsg_addr, 0);
1323 the_end:
1324 msgh->msg_controllen = space;
1325 return 0;
1328 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1329 struct msghdr *msgh)
1331 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1332 abi_long msg_controllen;
1333 abi_ulong target_cmsg_addr;
1334 struct target_cmsghdr *target_cmsg;
1335 socklen_t space = 0;
1337 msg_controllen = tswapal(target_msgh->msg_controllen);
1338 if (msg_controllen < sizeof (struct target_cmsghdr))
1339 goto the_end;
1340 target_cmsg_addr = tswapal(target_msgh->msg_control);
1341 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1342 if (!target_cmsg)
1343 return -TARGET_EFAULT;
1345 while (cmsg && target_cmsg) {
1346 void *data = CMSG_DATA(cmsg);
1347 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1349 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1351 space += TARGET_CMSG_SPACE(len);
1352 if (space > msg_controllen) {
1353 space -= TARGET_CMSG_SPACE(len);
1354 gemu_log("Target cmsg overflow\n");
1355 break;
1358 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1359 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1360 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1362 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1363 (cmsg->cmsg_type == SCM_RIGHTS)) {
1364 int *fd = (int *)data;
1365 int *target_fd = (int *)target_data;
1366 int i, numfds = len / sizeof(int);
1368 for (i = 0; i < numfds; i++)
1369 target_fd[i] = tswap32(fd[i]);
1370 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1371 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1372 (len == sizeof(struct timeval))) {
1373 /* copy struct timeval to target */
1374 struct timeval *tv = (struct timeval *)data;
1375 struct target_timeval *target_tv =
1376 (struct target_timeval *)target_data;
1378 target_tv->tv_sec = tswapal(tv->tv_sec);
1379 target_tv->tv_usec = tswapal(tv->tv_usec);
1380 } else {
1381 gemu_log("Unsupported ancillary data: %d/%d\n",
1382 cmsg->cmsg_level, cmsg->cmsg_type);
1383 memcpy(target_data, data, len);
1386 cmsg = CMSG_NXTHDR(msgh, cmsg);
1387 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1389 unlock_user(target_cmsg, target_cmsg_addr, space);
1390 the_end:
1391 target_msgh->msg_controllen = tswapal(space);
1392 return 0;
1395 /* do_setsockopt() Must return target values and target errnos. */
1396 static abi_long do_setsockopt(int sockfd, int level, int optname,
1397 abi_ulong optval_addr, socklen_t optlen)
1399 abi_long ret;
1400 int val;
1401 struct ip_mreqn *ip_mreq;
1402 struct ip_mreq_source *ip_mreq_source;
1404 switch(level) {
1405 case SOL_TCP:
1406 /* TCP options all take an 'int' value. */
1407 if (optlen < sizeof(uint32_t))
1408 return -TARGET_EINVAL;
1410 if (get_user_u32(val, optval_addr))
1411 return -TARGET_EFAULT;
1412 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1413 break;
1414 case SOL_IP:
1415 switch(optname) {
1416 case IP_TOS:
1417 case IP_TTL:
1418 case IP_HDRINCL:
1419 case IP_ROUTER_ALERT:
1420 case IP_RECVOPTS:
1421 case IP_RETOPTS:
1422 case IP_PKTINFO:
1423 case IP_MTU_DISCOVER:
1424 case IP_RECVERR:
1425 case IP_RECVTOS:
1426 #ifdef IP_FREEBIND
1427 case IP_FREEBIND:
1428 #endif
1429 case IP_MULTICAST_TTL:
1430 case IP_MULTICAST_LOOP:
1431 val = 0;
1432 if (optlen >= sizeof(uint32_t)) {
1433 if (get_user_u32(val, optval_addr))
1434 return -TARGET_EFAULT;
1435 } else if (optlen >= 1) {
1436 if (get_user_u8(val, optval_addr))
1437 return -TARGET_EFAULT;
1439 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1440 break;
1441 case IP_ADD_MEMBERSHIP:
1442 case IP_DROP_MEMBERSHIP:
1443 if (optlen < sizeof (struct target_ip_mreq) ||
1444 optlen > sizeof (struct target_ip_mreqn))
1445 return -TARGET_EINVAL;
1447 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1448 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1449 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1450 break;
1452 case IP_BLOCK_SOURCE:
1453 case IP_UNBLOCK_SOURCE:
1454 case IP_ADD_SOURCE_MEMBERSHIP:
1455 case IP_DROP_SOURCE_MEMBERSHIP:
1456 if (optlen != sizeof (struct target_ip_mreq_source))
1457 return -TARGET_EINVAL;
1459 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1460 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1461 unlock_user (ip_mreq_source, optval_addr, 0);
1462 break;
1464 default:
1465 goto unimplemented;
1467 break;
1468 case SOL_RAW:
1469 switch (optname) {
1470 case ICMP_FILTER:
1471 /* struct icmp_filter takes an u32 value */
1472 if (optlen < sizeof(uint32_t)) {
1473 return -TARGET_EINVAL;
1476 if (get_user_u32(val, optval_addr)) {
1477 return -TARGET_EFAULT;
1479 ret = get_errno(setsockopt(sockfd, level, optname,
1480 &val, sizeof(val)));
1481 break;
1483 default:
1484 goto unimplemented;
1486 break;
1487 case TARGET_SOL_SOCKET:
1488 switch (optname) {
1489 /* Options with 'int' argument. */
1490 case TARGET_SO_DEBUG:
1491 optname = SO_DEBUG;
1492 break;
1493 case TARGET_SO_REUSEADDR:
1494 optname = SO_REUSEADDR;
1495 break;
1496 case TARGET_SO_TYPE:
1497 optname = SO_TYPE;
1498 break;
1499 case TARGET_SO_ERROR:
1500 optname = SO_ERROR;
1501 break;
1502 case TARGET_SO_DONTROUTE:
1503 optname = SO_DONTROUTE;
1504 break;
1505 case TARGET_SO_BROADCAST:
1506 optname = SO_BROADCAST;
1507 break;
1508 case TARGET_SO_SNDBUF:
1509 optname = SO_SNDBUF;
1510 break;
1511 case TARGET_SO_RCVBUF:
1512 optname = SO_RCVBUF;
1513 break;
1514 case TARGET_SO_KEEPALIVE:
1515 optname = SO_KEEPALIVE;
1516 break;
1517 case TARGET_SO_OOBINLINE:
1518 optname = SO_OOBINLINE;
1519 break;
1520 case TARGET_SO_NO_CHECK:
1521 optname = SO_NO_CHECK;
1522 break;
1523 case TARGET_SO_PRIORITY:
1524 optname = SO_PRIORITY;
1525 break;
1526 #ifdef SO_BSDCOMPAT
1527 case TARGET_SO_BSDCOMPAT:
1528 optname = SO_BSDCOMPAT;
1529 break;
1530 #endif
1531 case TARGET_SO_PASSCRED:
1532 optname = SO_PASSCRED;
1533 break;
1534 case TARGET_SO_TIMESTAMP:
1535 optname = SO_TIMESTAMP;
1536 break;
1537 case TARGET_SO_RCVLOWAT:
1538 optname = SO_RCVLOWAT;
1539 break;
1540 case TARGET_SO_RCVTIMEO:
1541 optname = SO_RCVTIMEO;
1542 break;
1543 case TARGET_SO_SNDTIMEO:
1544 optname = SO_SNDTIMEO;
1545 break;
1546 break;
1547 default:
1548 goto unimplemented;
1550 if (optlen < sizeof(uint32_t))
1551 return -TARGET_EINVAL;
1553 if (get_user_u32(val, optval_addr))
1554 return -TARGET_EFAULT;
1555 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1556 break;
1557 default:
1558 unimplemented:
1559 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1560 ret = -TARGET_ENOPROTOOPT;
1562 return ret;
1565 /* do_getsockopt() Must return target values and target errnos. */
1566 static abi_long do_getsockopt(int sockfd, int level, int optname,
1567 abi_ulong optval_addr, abi_ulong optlen)
1569 abi_long ret;
1570 int len, val;
1571 socklen_t lv;
1573 switch(level) {
1574 case TARGET_SOL_SOCKET:
1575 level = SOL_SOCKET;
1576 switch (optname) {
1577 /* These don't just return a single integer */
1578 case TARGET_SO_LINGER:
1579 case TARGET_SO_RCVTIMEO:
1580 case TARGET_SO_SNDTIMEO:
1581 case TARGET_SO_PEERNAME:
1582 goto unimplemented;
1583 case TARGET_SO_PEERCRED: {
1584 struct ucred cr;
1585 socklen_t crlen;
1586 struct target_ucred *tcr;
1588 if (get_user_u32(len, optlen)) {
1589 return -TARGET_EFAULT;
1591 if (len < 0) {
1592 return -TARGET_EINVAL;
1595 crlen = sizeof(cr);
1596 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1597 &cr, &crlen));
1598 if (ret < 0) {
1599 return ret;
1601 if (len > crlen) {
1602 len = crlen;
1604 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1605 return -TARGET_EFAULT;
1607 __put_user(cr.pid, &tcr->pid);
1608 __put_user(cr.uid, &tcr->uid);
1609 __put_user(cr.gid, &tcr->gid);
1610 unlock_user_struct(tcr, optval_addr, 1);
1611 if (put_user_u32(len, optlen)) {
1612 return -TARGET_EFAULT;
1614 break;
1616 /* Options with 'int' argument. */
1617 case TARGET_SO_DEBUG:
1618 optname = SO_DEBUG;
1619 goto int_case;
1620 case TARGET_SO_REUSEADDR:
1621 optname = SO_REUSEADDR;
1622 goto int_case;
1623 case TARGET_SO_TYPE:
1624 optname = SO_TYPE;
1625 goto int_case;
1626 case TARGET_SO_ERROR:
1627 optname = SO_ERROR;
1628 goto int_case;
1629 case TARGET_SO_DONTROUTE:
1630 optname = SO_DONTROUTE;
1631 goto int_case;
1632 case TARGET_SO_BROADCAST:
1633 optname = SO_BROADCAST;
1634 goto int_case;
1635 case TARGET_SO_SNDBUF:
1636 optname = SO_SNDBUF;
1637 goto int_case;
1638 case TARGET_SO_RCVBUF:
1639 optname = SO_RCVBUF;
1640 goto int_case;
1641 case TARGET_SO_KEEPALIVE:
1642 optname = SO_KEEPALIVE;
1643 goto int_case;
1644 case TARGET_SO_OOBINLINE:
1645 optname = SO_OOBINLINE;
1646 goto int_case;
1647 case TARGET_SO_NO_CHECK:
1648 optname = SO_NO_CHECK;
1649 goto int_case;
1650 case TARGET_SO_PRIORITY:
1651 optname = SO_PRIORITY;
1652 goto int_case;
1653 #ifdef SO_BSDCOMPAT
1654 case TARGET_SO_BSDCOMPAT:
1655 optname = SO_BSDCOMPAT;
1656 goto int_case;
1657 #endif
1658 case TARGET_SO_PASSCRED:
1659 optname = SO_PASSCRED;
1660 goto int_case;
1661 case TARGET_SO_TIMESTAMP:
1662 optname = SO_TIMESTAMP;
1663 goto int_case;
1664 case TARGET_SO_RCVLOWAT:
1665 optname = SO_RCVLOWAT;
1666 goto int_case;
1667 default:
1668 goto int_case;
1670 break;
1671 case SOL_TCP:
1672 /* TCP options all take an 'int' value. */
1673 int_case:
1674 if (get_user_u32(len, optlen))
1675 return -TARGET_EFAULT;
1676 if (len < 0)
1677 return -TARGET_EINVAL;
1678 lv = sizeof(lv);
1679 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1680 if (ret < 0)
1681 return ret;
1682 if (len > lv)
1683 len = lv;
1684 if (len == 4) {
1685 if (put_user_u32(val, optval_addr))
1686 return -TARGET_EFAULT;
1687 } else {
1688 if (put_user_u8(val, optval_addr))
1689 return -TARGET_EFAULT;
1691 if (put_user_u32(len, optlen))
1692 return -TARGET_EFAULT;
1693 break;
1694 case SOL_IP:
1695 switch(optname) {
1696 case IP_TOS:
1697 case IP_TTL:
1698 case IP_HDRINCL:
1699 case IP_ROUTER_ALERT:
1700 case IP_RECVOPTS:
1701 case IP_RETOPTS:
1702 case IP_PKTINFO:
1703 case IP_MTU_DISCOVER:
1704 case IP_RECVERR:
1705 case IP_RECVTOS:
1706 #ifdef IP_FREEBIND
1707 case IP_FREEBIND:
1708 #endif
1709 case IP_MULTICAST_TTL:
1710 case IP_MULTICAST_LOOP:
1711 if (get_user_u32(len, optlen))
1712 return -TARGET_EFAULT;
1713 if (len < 0)
1714 return -TARGET_EINVAL;
1715 lv = sizeof(lv);
1716 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1717 if (ret < 0)
1718 return ret;
1719 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1720 len = 1;
1721 if (put_user_u32(len, optlen)
1722 || put_user_u8(val, optval_addr))
1723 return -TARGET_EFAULT;
1724 } else {
1725 if (len > sizeof(int))
1726 len = sizeof(int);
1727 if (put_user_u32(len, optlen)
1728 || put_user_u32(val, optval_addr))
1729 return -TARGET_EFAULT;
1731 break;
1732 default:
1733 ret = -TARGET_ENOPROTOOPT;
1734 break;
1736 break;
1737 default:
1738 unimplemented:
1739 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1740 level, optname);
1741 ret = -TARGET_EOPNOTSUPP;
1742 break;
1744 return ret;
1747 /* FIXME
1748 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1749 * other lock functions have a return code of 0 for failure.
1751 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1752 int count, int copy)
1754 struct target_iovec *target_vec;
1755 abi_ulong base;
1756 int i;
1758 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1759 if (!target_vec)
1760 return -TARGET_EFAULT;
1761 for(i = 0;i < count; i++) {
1762 base = tswapal(target_vec[i].iov_base);
1763 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1764 if (vec[i].iov_len != 0) {
1765 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1766 /* Don't check lock_user return value. We must call writev even
1767 if a element has invalid base address. */
1768 } else {
1769 /* zero length pointer is ignored */
1770 vec[i].iov_base = NULL;
1773 unlock_user (target_vec, target_addr, 0);
1774 return 0;
1777 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1778 int count, int copy)
1780 struct target_iovec *target_vec;
1781 abi_ulong base;
1782 int i;
1784 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1785 if (!target_vec)
1786 return -TARGET_EFAULT;
1787 for(i = 0;i < count; i++) {
1788 if (target_vec[i].iov_base) {
1789 base = tswapal(target_vec[i].iov_base);
1790 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1793 unlock_user (target_vec, target_addr, 0);
1795 return 0;
1798 /* do_socket() Must return target values and target errnos. */
1799 static abi_long do_socket(int domain, int type, int protocol)
1801 #if defined(TARGET_MIPS)
1802 switch(type) {
1803 case TARGET_SOCK_DGRAM:
1804 type = SOCK_DGRAM;
1805 break;
1806 case TARGET_SOCK_STREAM:
1807 type = SOCK_STREAM;
1808 break;
1809 case TARGET_SOCK_RAW:
1810 type = SOCK_RAW;
1811 break;
1812 case TARGET_SOCK_RDM:
1813 type = SOCK_RDM;
1814 break;
1815 case TARGET_SOCK_SEQPACKET:
1816 type = SOCK_SEQPACKET;
1817 break;
1818 case TARGET_SOCK_PACKET:
1819 type = SOCK_PACKET;
1820 break;
1822 #endif
1823 if (domain == PF_NETLINK)
1824 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1825 return get_errno(socket(domain, type, protocol));
1828 /* do_bind() Must return target values and target errnos. */
1829 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1830 socklen_t addrlen)
1832 void *addr;
1833 abi_long ret;
1835 if ((int)addrlen < 0) {
1836 return -TARGET_EINVAL;
1839 addr = alloca(addrlen+1);
1841 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1842 if (ret)
1843 return ret;
1845 return get_errno(bind(sockfd, addr, addrlen));
1848 /* do_connect() Must return target values and target errnos. */
1849 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1850 socklen_t addrlen)
1852 void *addr;
1853 abi_long ret;
1855 if ((int)addrlen < 0) {
1856 return -TARGET_EINVAL;
1859 addr = alloca(addrlen);
1861 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1862 if (ret)
1863 return ret;
1865 return get_errno(connect(sockfd, addr, addrlen));
1868 /* do_sendrecvmsg() Must return target values and target errnos. */
1869 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1870 int flags, int send)
1872 abi_long ret, len;
1873 struct target_msghdr *msgp;
1874 struct msghdr msg;
1875 int count;
1876 struct iovec *vec;
1877 abi_ulong target_vec;
1879 /* FIXME */
1880 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1881 msgp,
1882 target_msg,
1883 send ? 1 : 0))
1884 return -TARGET_EFAULT;
1885 if (msgp->msg_name) {
1886 msg.msg_namelen = tswap32(msgp->msg_namelen);
1887 msg.msg_name = alloca(msg.msg_namelen);
1888 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1889 msg.msg_namelen);
1890 if (ret) {
1891 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1892 return ret;
1894 } else {
1895 msg.msg_name = NULL;
1896 msg.msg_namelen = 0;
1898 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1899 msg.msg_control = alloca(msg.msg_controllen);
1900 msg.msg_flags = tswap32(msgp->msg_flags);
1902 count = tswapal(msgp->msg_iovlen);
1903 vec = alloca(count * sizeof(struct iovec));
1904 target_vec = tswapal(msgp->msg_iov);
1905 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1906 msg.msg_iovlen = count;
1907 msg.msg_iov = vec;
1909 if (send) {
1910 ret = target_to_host_cmsg(&msg, msgp);
1911 if (ret == 0)
1912 ret = get_errno(sendmsg(fd, &msg, flags));
1913 } else {
1914 ret = get_errno(recvmsg(fd, &msg, flags));
1915 if (!is_error(ret)) {
1916 len = ret;
1917 ret = host_to_target_cmsg(msgp, &msg);
1918 if (!is_error(ret)) {
1919 msgp->msg_namelen = tswap32(msg.msg_namelen);
1920 if (msg.msg_name != NULL) {
1921 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1922 msg.msg_name, msg.msg_namelen);
1923 if (ret) {
1924 goto out;
1928 ret = len;
1933 out:
1934 unlock_iovec(vec, target_vec, count, !send);
1935 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1936 return ret;
1939 /* do_accept() Must return target values and target errnos. */
1940 static abi_long do_accept(int fd, abi_ulong target_addr,
1941 abi_ulong target_addrlen_addr)
1943 socklen_t addrlen;
1944 void *addr;
1945 abi_long ret;
1947 if (target_addr == 0)
1948 return get_errno(accept(fd, NULL, NULL));
1950 /* linux returns EINVAL if addrlen pointer is invalid */
1951 if (get_user_u32(addrlen, target_addrlen_addr))
1952 return -TARGET_EINVAL;
1954 if ((int)addrlen < 0) {
1955 return -TARGET_EINVAL;
1958 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1959 return -TARGET_EINVAL;
1961 addr = alloca(addrlen);
1963 ret = get_errno(accept(fd, addr, &addrlen));
1964 if (!is_error(ret)) {
1965 host_to_target_sockaddr(target_addr, addr, addrlen);
1966 if (put_user_u32(addrlen, target_addrlen_addr))
1967 ret = -TARGET_EFAULT;
1969 return ret;
1972 /* do_getpeername() Must return target values and target errnos. */
1973 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1974 abi_ulong target_addrlen_addr)
1976 socklen_t addrlen;
1977 void *addr;
1978 abi_long ret;
1980 if (get_user_u32(addrlen, target_addrlen_addr))
1981 return -TARGET_EFAULT;
1983 if ((int)addrlen < 0) {
1984 return -TARGET_EINVAL;
1987 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1988 return -TARGET_EFAULT;
1990 addr = alloca(addrlen);
1992 ret = get_errno(getpeername(fd, addr, &addrlen));
1993 if (!is_error(ret)) {
1994 host_to_target_sockaddr(target_addr, addr, addrlen);
1995 if (put_user_u32(addrlen, target_addrlen_addr))
1996 ret = -TARGET_EFAULT;
1998 return ret;
2001 /* do_getsockname() Must return target values and target errnos. */
2002 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2003 abi_ulong target_addrlen_addr)
2005 socklen_t addrlen;
2006 void *addr;
2007 abi_long ret;
2009 if (get_user_u32(addrlen, target_addrlen_addr))
2010 return -TARGET_EFAULT;
2012 if ((int)addrlen < 0) {
2013 return -TARGET_EINVAL;
2016 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2017 return -TARGET_EFAULT;
2019 addr = alloca(addrlen);
2021 ret = get_errno(getsockname(fd, addr, &addrlen));
2022 if (!is_error(ret)) {
2023 host_to_target_sockaddr(target_addr, addr, addrlen);
2024 if (put_user_u32(addrlen, target_addrlen_addr))
2025 ret = -TARGET_EFAULT;
2027 return ret;
2030 /* do_socketpair() Must return target values and target errnos. */
2031 static abi_long do_socketpair(int domain, int type, int protocol,
2032 abi_ulong target_tab_addr)
2034 int tab[2];
2035 abi_long ret;
2037 ret = get_errno(socketpair(domain, type, protocol, tab));
2038 if (!is_error(ret)) {
2039 if (put_user_s32(tab[0], target_tab_addr)
2040 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2041 ret = -TARGET_EFAULT;
2043 return ret;
2046 /* do_sendto() Must return target values and target errnos. */
2047 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2048 abi_ulong target_addr, socklen_t addrlen)
2050 void *addr;
2051 void *host_msg;
2052 abi_long ret;
2054 if ((int)addrlen < 0) {
2055 return -TARGET_EINVAL;
2058 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2059 if (!host_msg)
2060 return -TARGET_EFAULT;
2061 if (target_addr) {
2062 addr = alloca(addrlen);
2063 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2064 if (ret) {
2065 unlock_user(host_msg, msg, 0);
2066 return ret;
2068 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2069 } else {
2070 ret = get_errno(send(fd, host_msg, len, flags));
2072 unlock_user(host_msg, msg, 0);
2073 return ret;
2076 /* do_recvfrom() Must return target values and target errnos. */
2077 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2078 abi_ulong target_addr,
2079 abi_ulong target_addrlen)
2081 socklen_t addrlen;
2082 void *addr;
2083 void *host_msg;
2084 abi_long ret;
2086 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2087 if (!host_msg)
2088 return -TARGET_EFAULT;
2089 if (target_addr) {
2090 if (get_user_u32(addrlen, target_addrlen)) {
2091 ret = -TARGET_EFAULT;
2092 goto fail;
2094 if ((int)addrlen < 0) {
2095 ret = -TARGET_EINVAL;
2096 goto fail;
2098 addr = alloca(addrlen);
2099 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2100 } else {
2101 addr = NULL; /* To keep compiler quiet. */
2102 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2104 if (!is_error(ret)) {
2105 if (target_addr) {
2106 host_to_target_sockaddr(target_addr, addr, addrlen);
2107 if (put_user_u32(addrlen, target_addrlen)) {
2108 ret = -TARGET_EFAULT;
2109 goto fail;
2112 unlock_user(host_msg, msg, len);
2113 } else {
2114 fail:
2115 unlock_user(host_msg, msg, 0);
2117 return ret;
2120 #ifdef TARGET_NR_socketcall
2121 /* do_socketcall() Must return target values and target errnos. */
2122 static abi_long do_socketcall(int num, abi_ulong vptr)
2124 abi_long ret;
2125 const int n = sizeof(abi_ulong);
2127 switch(num) {
2128 case SOCKOP_socket:
2130 abi_ulong domain, type, protocol;
2132 if (get_user_ual(domain, vptr)
2133 || get_user_ual(type, vptr + n)
2134 || get_user_ual(protocol, vptr + 2 * n))
2135 return -TARGET_EFAULT;
2137 ret = do_socket(domain, type, protocol);
2139 break;
2140 case SOCKOP_bind:
2142 abi_ulong sockfd;
2143 abi_ulong target_addr;
2144 socklen_t addrlen;
2146 if (get_user_ual(sockfd, vptr)
2147 || get_user_ual(target_addr, vptr + n)
2148 || get_user_ual(addrlen, vptr + 2 * n))
2149 return -TARGET_EFAULT;
2151 ret = do_bind(sockfd, target_addr, addrlen);
2153 break;
2154 case SOCKOP_connect:
2156 abi_ulong sockfd;
2157 abi_ulong target_addr;
2158 socklen_t addrlen;
2160 if (get_user_ual(sockfd, vptr)
2161 || get_user_ual(target_addr, vptr + n)
2162 || get_user_ual(addrlen, vptr + 2 * n))
2163 return -TARGET_EFAULT;
2165 ret = do_connect(sockfd, target_addr, addrlen);
2167 break;
2168 case SOCKOP_listen:
2170 abi_ulong sockfd, backlog;
2172 if (get_user_ual(sockfd, vptr)
2173 || get_user_ual(backlog, vptr + n))
2174 return -TARGET_EFAULT;
2176 ret = get_errno(listen(sockfd, backlog));
2178 break;
2179 case SOCKOP_accept:
2181 abi_ulong sockfd;
2182 abi_ulong target_addr, target_addrlen;
2184 if (get_user_ual(sockfd, vptr)
2185 || get_user_ual(target_addr, vptr + n)
2186 || get_user_ual(target_addrlen, vptr + 2 * n))
2187 return -TARGET_EFAULT;
2189 ret = do_accept(sockfd, target_addr, target_addrlen);
2191 break;
2192 case SOCKOP_getsockname:
2194 abi_ulong sockfd;
2195 abi_ulong target_addr, target_addrlen;
2197 if (get_user_ual(sockfd, vptr)
2198 || get_user_ual(target_addr, vptr + n)
2199 || get_user_ual(target_addrlen, vptr + 2 * n))
2200 return -TARGET_EFAULT;
2202 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2204 break;
2205 case SOCKOP_getpeername:
2207 abi_ulong sockfd;
2208 abi_ulong target_addr, target_addrlen;
2210 if (get_user_ual(sockfd, vptr)
2211 || get_user_ual(target_addr, vptr + n)
2212 || get_user_ual(target_addrlen, vptr + 2 * n))
2213 return -TARGET_EFAULT;
2215 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2217 break;
2218 case SOCKOP_socketpair:
2220 abi_ulong domain, type, protocol;
2221 abi_ulong tab;
2223 if (get_user_ual(domain, vptr)
2224 || get_user_ual(type, vptr + n)
2225 || get_user_ual(protocol, vptr + 2 * n)
2226 || get_user_ual(tab, vptr + 3 * n))
2227 return -TARGET_EFAULT;
2229 ret = do_socketpair(domain, type, protocol, tab);
2231 break;
2232 case SOCKOP_send:
2234 abi_ulong sockfd;
2235 abi_ulong msg;
2236 size_t len;
2237 abi_ulong flags;
2239 if (get_user_ual(sockfd, vptr)
2240 || get_user_ual(msg, vptr + n)
2241 || get_user_ual(len, vptr + 2 * n)
2242 || get_user_ual(flags, vptr + 3 * n))
2243 return -TARGET_EFAULT;
2245 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2247 break;
2248 case SOCKOP_recv:
2250 abi_ulong sockfd;
2251 abi_ulong msg;
2252 size_t len;
2253 abi_ulong flags;
2255 if (get_user_ual(sockfd, vptr)
2256 || get_user_ual(msg, vptr + n)
2257 || get_user_ual(len, vptr + 2 * n)
2258 || get_user_ual(flags, vptr + 3 * n))
2259 return -TARGET_EFAULT;
2261 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2263 break;
2264 case SOCKOP_sendto:
2266 abi_ulong sockfd;
2267 abi_ulong msg;
2268 size_t len;
2269 abi_ulong flags;
2270 abi_ulong addr;
2271 socklen_t addrlen;
2273 if (get_user_ual(sockfd, vptr)
2274 || get_user_ual(msg, vptr + n)
2275 || get_user_ual(len, vptr + 2 * n)
2276 || get_user_ual(flags, vptr + 3 * n)
2277 || get_user_ual(addr, vptr + 4 * n)
2278 || get_user_ual(addrlen, vptr + 5 * n))
2279 return -TARGET_EFAULT;
2281 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2283 break;
2284 case SOCKOP_recvfrom:
2286 abi_ulong sockfd;
2287 abi_ulong msg;
2288 size_t len;
2289 abi_ulong flags;
2290 abi_ulong addr;
2291 socklen_t addrlen;
2293 if (get_user_ual(sockfd, vptr)
2294 || get_user_ual(msg, vptr + n)
2295 || get_user_ual(len, vptr + 2 * n)
2296 || get_user_ual(flags, vptr + 3 * n)
2297 || get_user_ual(addr, vptr + 4 * n)
2298 || get_user_ual(addrlen, vptr + 5 * n))
2299 return -TARGET_EFAULT;
2301 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2303 break;
2304 case SOCKOP_shutdown:
2306 abi_ulong sockfd, how;
2308 if (get_user_ual(sockfd, vptr)
2309 || get_user_ual(how, vptr + n))
2310 return -TARGET_EFAULT;
2312 ret = get_errno(shutdown(sockfd, how));
2314 break;
2315 case SOCKOP_sendmsg:
2316 case SOCKOP_recvmsg:
2318 abi_ulong fd;
2319 abi_ulong target_msg;
2320 abi_ulong flags;
2322 if (get_user_ual(fd, vptr)
2323 || get_user_ual(target_msg, vptr + n)
2324 || get_user_ual(flags, vptr + 2 * n))
2325 return -TARGET_EFAULT;
2327 ret = do_sendrecvmsg(fd, target_msg, flags,
2328 (num == SOCKOP_sendmsg));
2330 break;
2331 case SOCKOP_setsockopt:
2333 abi_ulong sockfd;
2334 abi_ulong level;
2335 abi_ulong optname;
2336 abi_ulong optval;
2337 socklen_t optlen;
2339 if (get_user_ual(sockfd, vptr)
2340 || get_user_ual(level, vptr + n)
2341 || get_user_ual(optname, vptr + 2 * n)
2342 || get_user_ual(optval, vptr + 3 * n)
2343 || get_user_ual(optlen, vptr + 4 * n))
2344 return -TARGET_EFAULT;
2346 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2348 break;
2349 case SOCKOP_getsockopt:
2351 abi_ulong sockfd;
2352 abi_ulong level;
2353 abi_ulong optname;
2354 abi_ulong optval;
2355 socklen_t optlen;
2357 if (get_user_ual(sockfd, vptr)
2358 || get_user_ual(level, vptr + n)
2359 || get_user_ual(optname, vptr + 2 * n)
2360 || get_user_ual(optval, vptr + 3 * n)
2361 || get_user_ual(optlen, vptr + 4 * n))
2362 return -TARGET_EFAULT;
2364 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2366 break;
2367 default:
2368 gemu_log("Unsupported socketcall: %d\n", num);
2369 ret = -TARGET_ENOSYS;
2370 break;
2372 return ret;
2374 #endif
2376 #define N_SHM_REGIONS 32
2378 static struct shm_region {
2379 abi_ulong start;
2380 abi_ulong size;
2381 } shm_regions[N_SHM_REGIONS];
2383 struct target_ipc_perm
2385 abi_long __key;
2386 abi_ulong uid;
2387 abi_ulong gid;
2388 abi_ulong cuid;
2389 abi_ulong cgid;
2390 unsigned short int mode;
2391 unsigned short int __pad1;
2392 unsigned short int __seq;
2393 unsigned short int __pad2;
2394 abi_ulong __unused1;
2395 abi_ulong __unused2;
2398 struct target_semid_ds
2400 struct target_ipc_perm sem_perm;
2401 abi_ulong sem_otime;
2402 abi_ulong __unused1;
2403 abi_ulong sem_ctime;
2404 abi_ulong __unused2;
2405 abi_ulong sem_nsems;
2406 abi_ulong __unused3;
2407 abi_ulong __unused4;
2410 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2411 abi_ulong target_addr)
2413 struct target_ipc_perm *target_ip;
2414 struct target_semid_ds *target_sd;
2416 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2417 return -TARGET_EFAULT;
2418 target_ip = &(target_sd->sem_perm);
2419 host_ip->__key = tswapal(target_ip->__key);
2420 host_ip->uid = tswapal(target_ip->uid);
2421 host_ip->gid = tswapal(target_ip->gid);
2422 host_ip->cuid = tswapal(target_ip->cuid);
2423 host_ip->cgid = tswapal(target_ip->cgid);
2424 host_ip->mode = tswap16(target_ip->mode);
2425 unlock_user_struct(target_sd, target_addr, 0);
2426 return 0;
2429 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2430 struct ipc_perm *host_ip)
2432 struct target_ipc_perm *target_ip;
2433 struct target_semid_ds *target_sd;
2435 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2436 return -TARGET_EFAULT;
2437 target_ip = &(target_sd->sem_perm);
2438 target_ip->__key = tswapal(host_ip->__key);
2439 target_ip->uid = tswapal(host_ip->uid);
2440 target_ip->gid = tswapal(host_ip->gid);
2441 target_ip->cuid = tswapal(host_ip->cuid);
2442 target_ip->cgid = tswapal(host_ip->cgid);
2443 target_ip->mode = tswap16(host_ip->mode);
2444 unlock_user_struct(target_sd, target_addr, 1);
2445 return 0;
2448 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2449 abi_ulong target_addr)
2451 struct target_semid_ds *target_sd;
2453 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2454 return -TARGET_EFAULT;
2455 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2456 return -TARGET_EFAULT;
2457 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2458 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2459 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2460 unlock_user_struct(target_sd, target_addr, 0);
2461 return 0;
2464 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2465 struct semid_ds *host_sd)
2467 struct target_semid_ds *target_sd;
2469 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2470 return -TARGET_EFAULT;
2471 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2472 return -TARGET_EFAULT;
2473 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2474 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2475 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2476 unlock_user_struct(target_sd, target_addr, 1);
2477 return 0;
2480 struct target_seminfo {
2481 int semmap;
2482 int semmni;
2483 int semmns;
2484 int semmnu;
2485 int semmsl;
2486 int semopm;
2487 int semume;
2488 int semusz;
2489 int semvmx;
2490 int semaem;
2493 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2494 struct seminfo *host_seminfo)
2496 struct target_seminfo *target_seminfo;
2497 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2498 return -TARGET_EFAULT;
2499 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2500 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2501 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2502 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2503 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2504 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2505 __put_user(host_seminfo->semume, &target_seminfo->semume);
2506 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2507 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2508 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2509 unlock_user_struct(target_seminfo, target_addr, 1);
2510 return 0;
2513 union semun {
2514 int val;
2515 struct semid_ds *buf;
2516 unsigned short *array;
2517 struct seminfo *__buf;
2520 union target_semun {
2521 int val;
2522 abi_ulong buf;
2523 abi_ulong array;
2524 abi_ulong __buf;
2527 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2528 abi_ulong target_addr)
2530 int nsems;
2531 unsigned short *array;
2532 union semun semun;
2533 struct semid_ds semid_ds;
2534 int i, ret;
2536 semun.buf = &semid_ds;
2538 ret = semctl(semid, 0, IPC_STAT, semun);
2539 if (ret == -1)
2540 return get_errno(ret);
2542 nsems = semid_ds.sem_nsems;
2544 *host_array = malloc(nsems*sizeof(unsigned short));
2545 array = lock_user(VERIFY_READ, target_addr,
2546 nsems*sizeof(unsigned short), 1);
2547 if (!array)
2548 return -TARGET_EFAULT;
2550 for(i=0; i<nsems; i++) {
2551 __get_user((*host_array)[i], &array[i]);
2553 unlock_user(array, target_addr, 0);
2555 return 0;
2558 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2559 unsigned short **host_array)
2561 int nsems;
2562 unsigned short *array;
2563 union semun semun;
2564 struct semid_ds semid_ds;
2565 int i, ret;
2567 semun.buf = &semid_ds;
2569 ret = semctl(semid, 0, IPC_STAT, semun);
2570 if (ret == -1)
2571 return get_errno(ret);
2573 nsems = semid_ds.sem_nsems;
2575 array = lock_user(VERIFY_WRITE, target_addr,
2576 nsems*sizeof(unsigned short), 0);
2577 if (!array)
2578 return -TARGET_EFAULT;
2580 for(i=0; i<nsems; i++) {
2581 __put_user((*host_array)[i], &array[i]);
2583 free(*host_array);
2584 unlock_user(array, target_addr, 1);
2586 return 0;
2589 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2590 union target_semun target_su)
2592 union semun arg;
2593 struct semid_ds dsarg;
2594 unsigned short *array = NULL;
2595 struct seminfo seminfo;
2596 abi_long ret = -TARGET_EINVAL;
2597 abi_long err;
2598 cmd &= 0xff;
2600 switch( cmd ) {
2601 case GETVAL:
2602 case SETVAL:
2603 arg.val = tswap32(target_su.val);
2604 ret = get_errno(semctl(semid, semnum, cmd, arg));
2605 target_su.val = tswap32(arg.val);
2606 break;
2607 case GETALL:
2608 case SETALL:
2609 err = target_to_host_semarray(semid, &array, target_su.array);
2610 if (err)
2611 return err;
2612 arg.array = array;
2613 ret = get_errno(semctl(semid, semnum, cmd, arg));
2614 err = host_to_target_semarray(semid, target_su.array, &array);
2615 if (err)
2616 return err;
2617 break;
2618 case IPC_STAT:
2619 case IPC_SET:
2620 case SEM_STAT:
2621 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2622 if (err)
2623 return err;
2624 arg.buf = &dsarg;
2625 ret = get_errno(semctl(semid, semnum, cmd, arg));
2626 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2627 if (err)
2628 return err;
2629 break;
2630 case IPC_INFO:
2631 case SEM_INFO:
2632 arg.__buf = &seminfo;
2633 ret = get_errno(semctl(semid, semnum, cmd, arg));
2634 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2635 if (err)
2636 return err;
2637 break;
2638 case IPC_RMID:
2639 case GETPID:
2640 case GETNCNT:
2641 case GETZCNT:
2642 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2643 break;
2646 return ret;
2649 struct target_sembuf {
2650 unsigned short sem_num;
2651 short sem_op;
2652 short sem_flg;
2655 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2656 abi_ulong target_addr,
2657 unsigned nsops)
2659 struct target_sembuf *target_sembuf;
2660 int i;
2662 target_sembuf = lock_user(VERIFY_READ, target_addr,
2663 nsops*sizeof(struct target_sembuf), 1);
2664 if (!target_sembuf)
2665 return -TARGET_EFAULT;
2667 for(i=0; i<nsops; i++) {
2668 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2669 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2670 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2673 unlock_user(target_sembuf, target_addr, 0);
2675 return 0;
2678 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2680 struct sembuf sops[nsops];
2682 if (target_to_host_sembuf(sops, ptr, nsops))
2683 return -TARGET_EFAULT;
2685 return semop(semid, sops, nsops);
2688 struct target_msqid_ds
2690 struct target_ipc_perm msg_perm;
2691 abi_ulong msg_stime;
2692 #if TARGET_ABI_BITS == 32
2693 abi_ulong __unused1;
2694 #endif
2695 abi_ulong msg_rtime;
2696 #if TARGET_ABI_BITS == 32
2697 abi_ulong __unused2;
2698 #endif
2699 abi_ulong msg_ctime;
2700 #if TARGET_ABI_BITS == 32
2701 abi_ulong __unused3;
2702 #endif
2703 abi_ulong __msg_cbytes;
2704 abi_ulong msg_qnum;
2705 abi_ulong msg_qbytes;
2706 abi_ulong msg_lspid;
2707 abi_ulong msg_lrpid;
2708 abi_ulong __unused4;
2709 abi_ulong __unused5;
2712 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2713 abi_ulong target_addr)
2715 struct target_msqid_ds *target_md;
2717 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2718 return -TARGET_EFAULT;
2719 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2720 return -TARGET_EFAULT;
2721 host_md->msg_stime = tswapal(target_md->msg_stime);
2722 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2723 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2724 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2725 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2726 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2727 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2728 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2729 unlock_user_struct(target_md, target_addr, 0);
2730 return 0;
2733 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2734 struct msqid_ds *host_md)
2736 struct target_msqid_ds *target_md;
2738 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2739 return -TARGET_EFAULT;
2740 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2741 return -TARGET_EFAULT;
2742 target_md->msg_stime = tswapal(host_md->msg_stime);
2743 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2744 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2745 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2746 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2747 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2748 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2749 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2750 unlock_user_struct(target_md, target_addr, 1);
2751 return 0;
2754 struct target_msginfo {
2755 int msgpool;
2756 int msgmap;
2757 int msgmax;
2758 int msgmnb;
2759 int msgmni;
2760 int msgssz;
2761 int msgtql;
2762 unsigned short int msgseg;
2765 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2766 struct msginfo *host_msginfo)
2768 struct target_msginfo *target_msginfo;
2769 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2770 return -TARGET_EFAULT;
2771 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2772 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2773 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2774 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2775 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2776 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2777 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2778 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2779 unlock_user_struct(target_msginfo, target_addr, 1);
2780 return 0;
2783 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2785 struct msqid_ds dsarg;
2786 struct msginfo msginfo;
2787 abi_long ret = -TARGET_EINVAL;
2789 cmd &= 0xff;
2791 switch (cmd) {
2792 case IPC_STAT:
2793 case IPC_SET:
2794 case MSG_STAT:
2795 if (target_to_host_msqid_ds(&dsarg,ptr))
2796 return -TARGET_EFAULT;
2797 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2798 if (host_to_target_msqid_ds(ptr,&dsarg))
2799 return -TARGET_EFAULT;
2800 break;
2801 case IPC_RMID:
2802 ret = get_errno(msgctl(msgid, cmd, NULL));
2803 break;
2804 case IPC_INFO:
2805 case MSG_INFO:
2806 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2807 if (host_to_target_msginfo(ptr, &msginfo))
2808 return -TARGET_EFAULT;
2809 break;
2812 return ret;
2815 struct target_msgbuf {
2816 abi_long mtype;
2817 char mtext[1];
2820 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2821 unsigned int msgsz, int msgflg)
2823 struct target_msgbuf *target_mb;
2824 struct msgbuf *host_mb;
2825 abi_long ret = 0;
2827 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2828 return -TARGET_EFAULT;
2829 host_mb = malloc(msgsz+sizeof(long));
2830 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2831 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2832 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2833 free(host_mb);
2834 unlock_user_struct(target_mb, msgp, 0);
2836 return ret;
2839 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2840 unsigned int msgsz, abi_long msgtyp,
2841 int msgflg)
2843 struct target_msgbuf *target_mb;
2844 char *target_mtext;
2845 struct msgbuf *host_mb;
2846 abi_long ret = 0;
2848 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2849 return -TARGET_EFAULT;
2851 host_mb = g_malloc(msgsz+sizeof(long));
2852 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2854 if (ret > 0) {
2855 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2856 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2857 if (!target_mtext) {
2858 ret = -TARGET_EFAULT;
2859 goto end;
2861 memcpy(target_mb->mtext, host_mb->mtext, ret);
2862 unlock_user(target_mtext, target_mtext_addr, ret);
2865 target_mb->mtype = tswapal(host_mb->mtype);
2867 end:
2868 if (target_mb)
2869 unlock_user_struct(target_mb, msgp, 1);
2870 g_free(host_mb);
2871 return ret;
2874 struct target_shmid_ds
2876 struct target_ipc_perm shm_perm;
2877 abi_ulong shm_segsz;
2878 abi_ulong shm_atime;
2879 #if TARGET_ABI_BITS == 32
2880 abi_ulong __unused1;
2881 #endif
2882 abi_ulong shm_dtime;
2883 #if TARGET_ABI_BITS == 32
2884 abi_ulong __unused2;
2885 #endif
2886 abi_ulong shm_ctime;
2887 #if TARGET_ABI_BITS == 32
2888 abi_ulong __unused3;
2889 #endif
2890 int shm_cpid;
2891 int shm_lpid;
2892 abi_ulong shm_nattch;
2893 unsigned long int __unused4;
2894 unsigned long int __unused5;
2897 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2898 abi_ulong target_addr)
2900 struct target_shmid_ds *target_sd;
2902 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2903 return -TARGET_EFAULT;
2904 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2905 return -TARGET_EFAULT;
2906 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2907 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2908 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2909 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2910 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2911 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2912 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2913 unlock_user_struct(target_sd, target_addr, 0);
2914 return 0;
2917 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2918 struct shmid_ds *host_sd)
2920 struct target_shmid_ds *target_sd;
2922 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2923 return -TARGET_EFAULT;
2924 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2925 return -TARGET_EFAULT;
2926 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2927 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2928 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2929 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2930 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2931 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2932 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2933 unlock_user_struct(target_sd, target_addr, 1);
2934 return 0;
2937 struct target_shminfo {
2938 abi_ulong shmmax;
2939 abi_ulong shmmin;
2940 abi_ulong shmmni;
2941 abi_ulong shmseg;
2942 abi_ulong shmall;
2945 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2946 struct shminfo *host_shminfo)
2948 struct target_shminfo *target_shminfo;
2949 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2950 return -TARGET_EFAULT;
2951 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2952 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2953 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2954 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2955 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2956 unlock_user_struct(target_shminfo, target_addr, 1);
2957 return 0;
2960 struct target_shm_info {
2961 int used_ids;
2962 abi_ulong shm_tot;
2963 abi_ulong shm_rss;
2964 abi_ulong shm_swp;
2965 abi_ulong swap_attempts;
2966 abi_ulong swap_successes;
2969 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2970 struct shm_info *host_shm_info)
2972 struct target_shm_info *target_shm_info;
2973 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2974 return -TARGET_EFAULT;
2975 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2976 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2977 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2978 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2979 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2980 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2981 unlock_user_struct(target_shm_info, target_addr, 1);
2982 return 0;
2985 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2987 struct shmid_ds dsarg;
2988 struct shminfo shminfo;
2989 struct shm_info shm_info;
2990 abi_long ret = -TARGET_EINVAL;
2992 cmd &= 0xff;
2994 switch(cmd) {
2995 case IPC_STAT:
2996 case IPC_SET:
2997 case SHM_STAT:
2998 if (target_to_host_shmid_ds(&dsarg, buf))
2999 return -TARGET_EFAULT;
3000 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3001 if (host_to_target_shmid_ds(buf, &dsarg))
3002 return -TARGET_EFAULT;
3003 break;
3004 case IPC_INFO:
3005 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3006 if (host_to_target_shminfo(buf, &shminfo))
3007 return -TARGET_EFAULT;
3008 break;
3009 case SHM_INFO:
3010 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3011 if (host_to_target_shm_info(buf, &shm_info))
3012 return -TARGET_EFAULT;
3013 break;
3014 case IPC_RMID:
3015 case SHM_LOCK:
3016 case SHM_UNLOCK:
3017 ret = get_errno(shmctl(shmid, cmd, NULL));
3018 break;
3021 return ret;
3024 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3026 abi_long raddr;
3027 void *host_raddr;
3028 struct shmid_ds shm_info;
3029 int i,ret;
3031 /* find out the length of the shared memory segment */
3032 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3033 if (is_error(ret)) {
3034 /* can't get length, bail out */
3035 return ret;
3038 mmap_lock();
3040 if (shmaddr)
3041 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3042 else {
3043 abi_ulong mmap_start;
3045 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3047 if (mmap_start == -1) {
3048 errno = ENOMEM;
3049 host_raddr = (void *)-1;
3050 } else
3051 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3054 if (host_raddr == (void *)-1) {
3055 mmap_unlock();
3056 return get_errno((long)host_raddr);
3058 raddr=h2g((unsigned long)host_raddr);
3060 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3061 PAGE_VALID | PAGE_READ |
3062 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3064 for (i = 0; i < N_SHM_REGIONS; i++) {
3065 if (shm_regions[i].start == 0) {
3066 shm_regions[i].start = raddr;
3067 shm_regions[i].size = shm_info.shm_segsz;
3068 break;
3072 mmap_unlock();
3073 return raddr;
3077 static inline abi_long do_shmdt(abi_ulong shmaddr)
3079 int i;
3081 for (i = 0; i < N_SHM_REGIONS; ++i) {
3082 if (shm_regions[i].start == shmaddr) {
3083 shm_regions[i].start = 0;
3084 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3085 break;
3089 return get_errno(shmdt(g2h(shmaddr)));
3092 #ifdef TARGET_NR_ipc
3093 /* ??? This only works with linear mappings. */
3094 /* do_ipc() must return target values and target errnos. */
3095 static abi_long do_ipc(unsigned int call, int first,
3096 int second, int third,
3097 abi_long ptr, abi_long fifth)
3099 int version;
3100 abi_long ret = 0;
3102 version = call >> 16;
3103 call &= 0xffff;
3105 switch (call) {
3106 case IPCOP_semop:
3107 ret = do_semop(first, ptr, second);
3108 break;
3110 case IPCOP_semget:
3111 ret = get_errno(semget(first, second, third));
3112 break;
3114 case IPCOP_semctl:
3115 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3116 break;
3118 case IPCOP_msgget:
3119 ret = get_errno(msgget(first, second));
3120 break;
3122 case IPCOP_msgsnd:
3123 ret = do_msgsnd(first, ptr, second, third);
3124 break;
3126 case IPCOP_msgctl:
3127 ret = do_msgctl(first, second, ptr);
3128 break;
3130 case IPCOP_msgrcv:
3131 switch (version) {
3132 case 0:
3134 struct target_ipc_kludge {
3135 abi_long msgp;
3136 abi_long msgtyp;
3137 } *tmp;
3139 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3140 ret = -TARGET_EFAULT;
3141 break;
3144 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3146 unlock_user_struct(tmp, ptr, 0);
3147 break;
3149 default:
3150 ret = do_msgrcv(first, ptr, second, fifth, third);
3152 break;
3154 case IPCOP_shmat:
3155 switch (version) {
3156 default:
3158 abi_ulong raddr;
3159 raddr = do_shmat(first, ptr, second);
3160 if (is_error(raddr))
3161 return get_errno(raddr);
3162 if (put_user_ual(raddr, third))
3163 return -TARGET_EFAULT;
3164 break;
3166 case 1:
3167 ret = -TARGET_EINVAL;
3168 break;
3170 break;
3171 case IPCOP_shmdt:
3172 ret = do_shmdt(ptr);
3173 break;
3175 case IPCOP_shmget:
3176 /* IPC_* flag values are the same on all linux platforms */
3177 ret = get_errno(shmget(first, second, third));
3178 break;
3180 /* IPC_* and SHM_* command values are the same on all linux platforms */
3181 case IPCOP_shmctl:
3182 ret = do_shmctl(first, second, third);
3183 break;
3184 default:
3185 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3186 ret = -TARGET_ENOSYS;
3187 break;
3189 return ret;
3191 #endif
3193 /* kernel structure types definitions */
3195 #define STRUCT(name, ...) STRUCT_ ## name,
3196 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3197 enum {
3198 #include "syscall_types.h"
3200 #undef STRUCT
3201 #undef STRUCT_SPECIAL
3203 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3204 #define STRUCT_SPECIAL(name)
3205 #include "syscall_types.h"
3206 #undef STRUCT
3207 #undef STRUCT_SPECIAL
3209 typedef struct IOCTLEntry IOCTLEntry;
3211 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3212 int fd, abi_long cmd, abi_long arg);
3214 struct IOCTLEntry {
3215 unsigned int target_cmd;
3216 unsigned int host_cmd;
3217 const char *name;
3218 int access;
3219 do_ioctl_fn *do_ioctl;
3220 const argtype arg_type[5];
3223 #define IOC_R 0x0001
3224 #define IOC_W 0x0002
3225 #define IOC_RW (IOC_R | IOC_W)
3227 #define MAX_STRUCT_SIZE 4096
3229 #ifdef CONFIG_FIEMAP
3230 /* So fiemap access checks don't overflow on 32 bit systems.
3231 * This is very slightly smaller than the limit imposed by
3232 * the underlying kernel.
3234 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3235 / sizeof(struct fiemap_extent))
3237 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3238 int fd, abi_long cmd, abi_long arg)
3240 /* The parameter for this ioctl is a struct fiemap followed
3241 * by an array of struct fiemap_extent whose size is set
3242 * in fiemap->fm_extent_count. The array is filled in by the
3243 * ioctl.
3245 int target_size_in, target_size_out;
3246 struct fiemap *fm;
3247 const argtype *arg_type = ie->arg_type;
3248 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3249 void *argptr, *p;
3250 abi_long ret;
3251 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3252 uint32_t outbufsz;
3253 int free_fm = 0;
3255 assert(arg_type[0] == TYPE_PTR);
3256 assert(ie->access == IOC_RW);
3257 arg_type++;
3258 target_size_in = thunk_type_size(arg_type, 0);
3259 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3260 if (!argptr) {
3261 return -TARGET_EFAULT;
3263 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3264 unlock_user(argptr, arg, 0);
3265 fm = (struct fiemap *)buf_temp;
3266 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3267 return -TARGET_EINVAL;
3270 outbufsz = sizeof (*fm) +
3271 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3273 if (outbufsz > MAX_STRUCT_SIZE) {
3274 /* We can't fit all the extents into the fixed size buffer.
3275 * Allocate one that is large enough and use it instead.
3277 fm = malloc(outbufsz);
3278 if (!fm) {
3279 return -TARGET_ENOMEM;
3281 memcpy(fm, buf_temp, sizeof(struct fiemap));
3282 free_fm = 1;
3284 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3285 if (!is_error(ret)) {
3286 target_size_out = target_size_in;
3287 /* An extent_count of 0 means we were only counting the extents
3288 * so there are no structs to copy
3290 if (fm->fm_extent_count != 0) {
3291 target_size_out += fm->fm_mapped_extents * extent_size;
3293 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3294 if (!argptr) {
3295 ret = -TARGET_EFAULT;
3296 } else {
3297 /* Convert the struct fiemap */
3298 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3299 if (fm->fm_extent_count != 0) {
3300 p = argptr + target_size_in;
3301 /* ...and then all the struct fiemap_extents */
3302 for (i = 0; i < fm->fm_mapped_extents; i++) {
3303 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3304 THUNK_TARGET);
3305 p += extent_size;
3308 unlock_user(argptr, arg, target_size_out);
3311 if (free_fm) {
3312 free(fm);
3314 return ret;
3316 #endif
3318 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3319 int fd, abi_long cmd, abi_long arg)
3321 const argtype *arg_type = ie->arg_type;
3322 int target_size;
3323 void *argptr;
3324 int ret;
3325 struct ifconf *host_ifconf;
3326 uint32_t outbufsz;
3327 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3328 int target_ifreq_size;
3329 int nb_ifreq;
3330 int free_buf = 0;
3331 int i;
3332 int target_ifc_len;
3333 abi_long target_ifc_buf;
3334 int host_ifc_len;
3335 char *host_ifc_buf;
3337 assert(arg_type[0] == TYPE_PTR);
3338 assert(ie->access == IOC_RW);
3340 arg_type++;
3341 target_size = thunk_type_size(arg_type, 0);
3343 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3344 if (!argptr)
3345 return -TARGET_EFAULT;
3346 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3347 unlock_user(argptr, arg, 0);
3349 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3350 target_ifc_len = host_ifconf->ifc_len;
3351 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3353 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3354 nb_ifreq = target_ifc_len / target_ifreq_size;
3355 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3357 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3358 if (outbufsz > MAX_STRUCT_SIZE) {
3359 /* We can't fit all the extents into the fixed size buffer.
3360 * Allocate one that is large enough and use it instead.
3362 host_ifconf = malloc(outbufsz);
3363 if (!host_ifconf) {
3364 return -TARGET_ENOMEM;
3366 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3367 free_buf = 1;
3369 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3371 host_ifconf->ifc_len = host_ifc_len;
3372 host_ifconf->ifc_buf = host_ifc_buf;
3374 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3375 if (!is_error(ret)) {
3376 /* convert host ifc_len to target ifc_len */
3378 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3379 target_ifc_len = nb_ifreq * target_ifreq_size;
3380 host_ifconf->ifc_len = target_ifc_len;
3382 /* restore target ifc_buf */
3384 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3386 /* copy struct ifconf to target user */
3388 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3389 if (!argptr)
3390 return -TARGET_EFAULT;
3391 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3392 unlock_user(argptr, arg, target_size);
3394 /* copy ifreq[] to target user */
3396 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3397 for (i = 0; i < nb_ifreq ; i++) {
3398 thunk_convert(argptr + i * target_ifreq_size,
3399 host_ifc_buf + i * sizeof(struct ifreq),
3400 ifreq_arg_type, THUNK_TARGET);
3402 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3405 if (free_buf) {
3406 free(host_ifconf);
3409 return ret;
3412 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3413 abi_long cmd, abi_long arg)
3415 void *argptr;
3416 struct dm_ioctl *host_dm;
3417 abi_long guest_data;
3418 uint32_t guest_data_size;
3419 int target_size;
3420 const argtype *arg_type = ie->arg_type;
3421 abi_long ret;
3422 void *big_buf = NULL;
3423 char *host_data;
3425 arg_type++;
3426 target_size = thunk_type_size(arg_type, 0);
3427 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3428 if (!argptr) {
3429 ret = -TARGET_EFAULT;
3430 goto out;
3432 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3433 unlock_user(argptr, arg, 0);
3435 /* buf_temp is too small, so fetch things into a bigger buffer */
3436 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3437 memcpy(big_buf, buf_temp, target_size);
3438 buf_temp = big_buf;
3439 host_dm = big_buf;
3441 guest_data = arg + host_dm->data_start;
3442 if ((guest_data - arg) < 0) {
3443 ret = -EINVAL;
3444 goto out;
3446 guest_data_size = host_dm->data_size - host_dm->data_start;
3447 host_data = (char*)host_dm + host_dm->data_start;
3449 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3450 switch (ie->host_cmd) {
3451 case DM_REMOVE_ALL:
3452 case DM_LIST_DEVICES:
3453 case DM_DEV_CREATE:
3454 case DM_DEV_REMOVE:
3455 case DM_DEV_SUSPEND:
3456 case DM_DEV_STATUS:
3457 case DM_DEV_WAIT:
3458 case DM_TABLE_STATUS:
3459 case DM_TABLE_CLEAR:
3460 case DM_TABLE_DEPS:
3461 case DM_LIST_VERSIONS:
3462 /* no input data */
3463 break;
3464 case DM_DEV_RENAME:
3465 case DM_DEV_SET_GEOMETRY:
3466 /* data contains only strings */
3467 memcpy(host_data, argptr, guest_data_size);
3468 break;
3469 case DM_TARGET_MSG:
3470 memcpy(host_data, argptr, guest_data_size);
3471 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3472 break;
3473 case DM_TABLE_LOAD:
3475 void *gspec = argptr;
3476 void *cur_data = host_data;
3477 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3478 int spec_size = thunk_type_size(arg_type, 0);
3479 int i;
3481 for (i = 0; i < host_dm->target_count; i++) {
3482 struct dm_target_spec *spec = cur_data;
3483 uint32_t next;
3484 int slen;
3486 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3487 slen = strlen((char*)gspec + spec_size) + 1;
3488 next = spec->next;
3489 spec->next = sizeof(*spec) + slen;
3490 strcpy((char*)&spec[1], gspec + spec_size);
3491 gspec += next;
3492 cur_data += spec->next;
3494 break;
3496 default:
3497 ret = -TARGET_EINVAL;
3498 goto out;
3500 unlock_user(argptr, guest_data, 0);
3502 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3503 if (!is_error(ret)) {
3504 guest_data = arg + host_dm->data_start;
3505 guest_data_size = host_dm->data_size - host_dm->data_start;
3506 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3507 switch (ie->host_cmd) {
3508 case DM_REMOVE_ALL:
3509 case DM_DEV_CREATE:
3510 case DM_DEV_REMOVE:
3511 case DM_DEV_RENAME:
3512 case DM_DEV_SUSPEND:
3513 case DM_DEV_STATUS:
3514 case DM_TABLE_LOAD:
3515 case DM_TABLE_CLEAR:
3516 case DM_TARGET_MSG:
3517 case DM_DEV_SET_GEOMETRY:
3518 /* no return data */
3519 break;
3520 case DM_LIST_DEVICES:
3522 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3523 uint32_t remaining_data = guest_data_size;
3524 void *cur_data = argptr;
3525 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3526 int nl_size = 12; /* can't use thunk_size due to alignment */
3528 while (1) {
3529 uint32_t next = nl->next;
3530 if (next) {
3531 nl->next = nl_size + (strlen(nl->name) + 1);
3533 if (remaining_data < nl->next) {
3534 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3535 break;
3537 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3538 strcpy(cur_data + nl_size, nl->name);
3539 cur_data += nl->next;
3540 remaining_data -= nl->next;
3541 if (!next) {
3542 break;
3544 nl = (void*)nl + next;
3546 break;
3548 case DM_DEV_WAIT:
3549 case DM_TABLE_STATUS:
3551 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3552 void *cur_data = argptr;
3553 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3554 int spec_size = thunk_type_size(arg_type, 0);
3555 int i;
3557 for (i = 0; i < host_dm->target_count; i++) {
3558 uint32_t next = spec->next;
3559 int slen = strlen((char*)&spec[1]) + 1;
3560 spec->next = (cur_data - argptr) + spec_size + slen;
3561 if (guest_data_size < spec->next) {
3562 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3563 break;
3565 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3566 strcpy(cur_data + spec_size, (char*)&spec[1]);
3567 cur_data = argptr + spec->next;
3568 spec = (void*)host_dm + host_dm->data_start + next;
3570 break;
3572 case DM_TABLE_DEPS:
3574 void *hdata = (void*)host_dm + host_dm->data_start;
3575 int count = *(uint32_t*)hdata;
3576 uint64_t *hdev = hdata + 8;
3577 uint64_t *gdev = argptr + 8;
3578 int i;
3580 *(uint32_t*)argptr = tswap32(count);
3581 for (i = 0; i < count; i++) {
3582 *gdev = tswap64(*hdev);
3583 gdev++;
3584 hdev++;
3586 break;
3588 case DM_LIST_VERSIONS:
3590 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3591 uint32_t remaining_data = guest_data_size;
3592 void *cur_data = argptr;
3593 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3594 int vers_size = thunk_type_size(arg_type, 0);
3596 while (1) {
3597 uint32_t next = vers->next;
3598 if (next) {
3599 vers->next = vers_size + (strlen(vers->name) + 1);
3601 if (remaining_data < vers->next) {
3602 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3603 break;
3605 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3606 strcpy(cur_data + vers_size, vers->name);
3607 cur_data += vers->next;
3608 remaining_data -= vers->next;
3609 if (!next) {
3610 break;
3612 vers = (void*)vers + next;
3614 break;
3616 default:
3617 ret = -TARGET_EINVAL;
3618 goto out;
3620 unlock_user(argptr, guest_data, guest_data_size);
3622 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3623 if (!argptr) {
3624 ret = -TARGET_EFAULT;
3625 goto out;
3627 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3628 unlock_user(argptr, arg, target_size);
3630 out:
3631 g_free(big_buf);
3632 return ret;
3635 static IOCTLEntry ioctl_entries[] = {
3636 #define IOCTL(cmd, access, ...) \
3637 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3638 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3639 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3640 #include "ioctls.h"
3641 { 0, 0, },
3644 /* ??? Implement proper locking for ioctls. */
3645 /* do_ioctl() Must return target values and target errnos. */
3646 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3648 const IOCTLEntry *ie;
3649 const argtype *arg_type;
3650 abi_long ret;
3651 uint8_t buf_temp[MAX_STRUCT_SIZE];
3652 int target_size;
3653 void *argptr;
3655 ie = ioctl_entries;
3656 for(;;) {
3657 if (ie->target_cmd == 0) {
3658 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3659 return -TARGET_ENOSYS;
3661 if (ie->target_cmd == cmd)
3662 break;
3663 ie++;
3665 arg_type = ie->arg_type;
3666 #if defined(DEBUG)
3667 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3668 #endif
3669 if (ie->do_ioctl) {
3670 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3673 switch(arg_type[0]) {
3674 case TYPE_NULL:
3675 /* no argument */
3676 ret = get_errno(ioctl(fd, ie->host_cmd));
3677 break;
3678 case TYPE_PTRVOID:
3679 case TYPE_INT:
3680 /* int argment */
3681 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3682 break;
3683 case TYPE_PTR:
3684 arg_type++;
3685 target_size = thunk_type_size(arg_type, 0);
3686 switch(ie->access) {
3687 case IOC_R:
3688 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3689 if (!is_error(ret)) {
3690 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3691 if (!argptr)
3692 return -TARGET_EFAULT;
3693 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3694 unlock_user(argptr, arg, target_size);
3696 break;
3697 case IOC_W:
3698 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3699 if (!argptr)
3700 return -TARGET_EFAULT;
3701 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3702 unlock_user(argptr, arg, 0);
3703 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3704 break;
3705 default:
3706 case IOC_RW:
3707 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3708 if (!argptr)
3709 return -TARGET_EFAULT;
3710 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3711 unlock_user(argptr, arg, 0);
3712 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3713 if (!is_error(ret)) {
3714 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3715 if (!argptr)
3716 return -TARGET_EFAULT;
3717 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3718 unlock_user(argptr, arg, target_size);
3720 break;
3722 break;
3723 default:
3724 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3725 (long)cmd, arg_type[0]);
3726 ret = -TARGET_ENOSYS;
3727 break;
3729 return ret;
3732 static const bitmask_transtbl iflag_tbl[] = {
3733 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3734 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3735 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3736 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3737 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3738 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3739 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3740 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3741 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3742 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3743 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3744 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3745 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3746 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3747 { 0, 0, 0, 0 }
3750 static const bitmask_transtbl oflag_tbl[] = {
3751 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3752 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3753 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3754 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3755 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3756 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3757 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3758 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3759 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3760 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3761 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3762 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3763 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3764 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3765 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3766 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3767 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3768 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3769 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3770 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3771 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3772 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3773 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3774 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3775 { 0, 0, 0, 0 }
3778 static const bitmask_transtbl cflag_tbl[] = {
3779 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3780 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3781 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3782 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3783 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3784 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3785 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3786 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3787 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3788 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3789 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3790 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3791 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3792 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3793 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3794 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3795 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3796 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3797 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3798 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3799 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3800 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3801 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3802 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3803 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3804 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3805 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3806 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3807 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3808 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3809 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3810 { 0, 0, 0, 0 }
3813 static const bitmask_transtbl lflag_tbl[] = {
3814 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3815 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3816 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3817 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3818 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3819 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3820 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3821 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3822 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3823 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3824 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3825 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3826 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3827 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3828 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3829 { 0, 0, 0, 0 }
3832 static void target_to_host_termios (void *dst, const void *src)
3834 struct host_termios *host = dst;
3835 const struct target_termios *target = src;
3837 host->c_iflag =
3838 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3839 host->c_oflag =
3840 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3841 host->c_cflag =
3842 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3843 host->c_lflag =
3844 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3845 host->c_line = target->c_line;
3847 memset(host->c_cc, 0, sizeof(host->c_cc));
3848 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3849 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3850 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3851 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3852 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3853 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3854 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3855 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3856 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3857 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3858 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3859 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3860 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3861 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3862 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3863 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3864 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3867 static void host_to_target_termios (void *dst, const void *src)
3869 struct target_termios *target = dst;
3870 const struct host_termios *host = src;
3872 target->c_iflag =
3873 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3874 target->c_oflag =
3875 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3876 target->c_cflag =
3877 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3878 target->c_lflag =
3879 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3880 target->c_line = host->c_line;
3882 memset(target->c_cc, 0, sizeof(target->c_cc));
3883 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3884 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3885 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3886 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3887 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3888 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3889 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3890 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3891 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3892 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3893 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3894 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3895 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3896 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3897 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3898 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3899 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3902 static const StructEntry struct_termios_def = {
3903 .convert = { host_to_target_termios, target_to_host_termios },
3904 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3905 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3908 static bitmask_transtbl mmap_flags_tbl[] = {
3909 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3910 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3911 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3912 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3913 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3914 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3915 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3916 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3917 { 0, 0, 0, 0 }
3920 #if defined(TARGET_I386)
3922 /* NOTE: there is really one LDT for all the threads */
3923 static uint8_t *ldt_table;
3925 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3927 int size;
3928 void *p;
3930 if (!ldt_table)
3931 return 0;
3932 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3933 if (size > bytecount)
3934 size = bytecount;
3935 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3936 if (!p)
3937 return -TARGET_EFAULT;
3938 /* ??? Should this by byteswapped? */
3939 memcpy(p, ldt_table, size);
3940 unlock_user(p, ptr, size);
3941 return size;
3944 /* XXX: add locking support */
3945 static abi_long write_ldt(CPUX86State *env,
3946 abi_ulong ptr, unsigned long bytecount, int oldmode)
3948 struct target_modify_ldt_ldt_s ldt_info;
3949 struct target_modify_ldt_ldt_s *target_ldt_info;
3950 int seg_32bit, contents, read_exec_only, limit_in_pages;
3951 int seg_not_present, useable, lm;
3952 uint32_t *lp, entry_1, entry_2;
3954 if (bytecount != sizeof(ldt_info))
3955 return -TARGET_EINVAL;
3956 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3957 return -TARGET_EFAULT;
3958 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3959 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3960 ldt_info.limit = tswap32(target_ldt_info->limit);
3961 ldt_info.flags = tswap32(target_ldt_info->flags);
3962 unlock_user_struct(target_ldt_info, ptr, 0);
3964 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3965 return -TARGET_EINVAL;
3966 seg_32bit = ldt_info.flags & 1;
3967 contents = (ldt_info.flags >> 1) & 3;
3968 read_exec_only = (ldt_info.flags >> 3) & 1;
3969 limit_in_pages = (ldt_info.flags >> 4) & 1;
3970 seg_not_present = (ldt_info.flags >> 5) & 1;
3971 useable = (ldt_info.flags >> 6) & 1;
3972 #ifdef TARGET_ABI32
3973 lm = 0;
3974 #else
3975 lm = (ldt_info.flags >> 7) & 1;
3976 #endif
3977 if (contents == 3) {
3978 if (oldmode)
3979 return -TARGET_EINVAL;
3980 if (seg_not_present == 0)
3981 return -TARGET_EINVAL;
3983 /* allocate the LDT */
3984 if (!ldt_table) {
3985 env->ldt.base = target_mmap(0,
3986 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3987 PROT_READ|PROT_WRITE,
3988 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3989 if (env->ldt.base == -1)
3990 return -TARGET_ENOMEM;
3991 memset(g2h(env->ldt.base), 0,
3992 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3993 env->ldt.limit = 0xffff;
3994 ldt_table = g2h(env->ldt.base);
3997 /* NOTE: same code as Linux kernel */
3998 /* Allow LDTs to be cleared by the user. */
3999 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4000 if (oldmode ||
4001 (contents == 0 &&
4002 read_exec_only == 1 &&
4003 seg_32bit == 0 &&
4004 limit_in_pages == 0 &&
4005 seg_not_present == 1 &&
4006 useable == 0 )) {
4007 entry_1 = 0;
4008 entry_2 = 0;
4009 goto install;
4013 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4014 (ldt_info.limit & 0x0ffff);
4015 entry_2 = (ldt_info.base_addr & 0xff000000) |
4016 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4017 (ldt_info.limit & 0xf0000) |
4018 ((read_exec_only ^ 1) << 9) |
4019 (contents << 10) |
4020 ((seg_not_present ^ 1) << 15) |
4021 (seg_32bit << 22) |
4022 (limit_in_pages << 23) |
4023 (lm << 21) |
4024 0x7000;
4025 if (!oldmode)
4026 entry_2 |= (useable << 20);
4028 /* Install the new entry ... */
4029 install:
4030 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4031 lp[0] = tswap32(entry_1);
4032 lp[1] = tswap32(entry_2);
4033 return 0;
4036 /* specific and weird i386 syscalls */
4037 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4038 unsigned long bytecount)
4040 abi_long ret;
4042 switch (func) {
4043 case 0:
4044 ret = read_ldt(ptr, bytecount);
4045 break;
4046 case 1:
4047 ret = write_ldt(env, ptr, bytecount, 1);
4048 break;
4049 case 0x11:
4050 ret = write_ldt(env, ptr, bytecount, 0);
4051 break;
4052 default:
4053 ret = -TARGET_ENOSYS;
4054 break;
4056 return ret;
4059 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4060 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4062 uint64_t *gdt_table = g2h(env->gdt.base);
4063 struct target_modify_ldt_ldt_s ldt_info;
4064 struct target_modify_ldt_ldt_s *target_ldt_info;
4065 int seg_32bit, contents, read_exec_only, limit_in_pages;
4066 int seg_not_present, useable, lm;
4067 uint32_t *lp, entry_1, entry_2;
4068 int i;
4070 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4071 if (!target_ldt_info)
4072 return -TARGET_EFAULT;
4073 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4074 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4075 ldt_info.limit = tswap32(target_ldt_info->limit);
4076 ldt_info.flags = tswap32(target_ldt_info->flags);
4077 if (ldt_info.entry_number == -1) {
4078 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4079 if (gdt_table[i] == 0) {
4080 ldt_info.entry_number = i;
4081 target_ldt_info->entry_number = tswap32(i);
4082 break;
4086 unlock_user_struct(target_ldt_info, ptr, 1);
4088 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4089 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4090 return -TARGET_EINVAL;
4091 seg_32bit = ldt_info.flags & 1;
4092 contents = (ldt_info.flags >> 1) & 3;
4093 read_exec_only = (ldt_info.flags >> 3) & 1;
4094 limit_in_pages = (ldt_info.flags >> 4) & 1;
4095 seg_not_present = (ldt_info.flags >> 5) & 1;
4096 useable = (ldt_info.flags >> 6) & 1;
4097 #ifdef TARGET_ABI32
4098 lm = 0;
4099 #else
4100 lm = (ldt_info.flags >> 7) & 1;
4101 #endif
4103 if (contents == 3) {
4104 if (seg_not_present == 0)
4105 return -TARGET_EINVAL;
4108 /* NOTE: same code as Linux kernel */
4109 /* Allow LDTs to be cleared by the user. */
4110 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4111 if ((contents == 0 &&
4112 read_exec_only == 1 &&
4113 seg_32bit == 0 &&
4114 limit_in_pages == 0 &&
4115 seg_not_present == 1 &&
4116 useable == 0 )) {
4117 entry_1 = 0;
4118 entry_2 = 0;
4119 goto install;
4123 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4124 (ldt_info.limit & 0x0ffff);
4125 entry_2 = (ldt_info.base_addr & 0xff000000) |
4126 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4127 (ldt_info.limit & 0xf0000) |
4128 ((read_exec_only ^ 1) << 9) |
4129 (contents << 10) |
4130 ((seg_not_present ^ 1) << 15) |
4131 (seg_32bit << 22) |
4132 (limit_in_pages << 23) |
4133 (useable << 20) |
4134 (lm << 21) |
4135 0x7000;
4137 /* Install the new entry ... */
4138 install:
4139 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4140 lp[0] = tswap32(entry_1);
4141 lp[1] = tswap32(entry_2);
4142 return 0;
4145 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4147 struct target_modify_ldt_ldt_s *target_ldt_info;
4148 uint64_t *gdt_table = g2h(env->gdt.base);
4149 uint32_t base_addr, limit, flags;
4150 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4151 int seg_not_present, useable, lm;
4152 uint32_t *lp, entry_1, entry_2;
4154 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4155 if (!target_ldt_info)
4156 return -TARGET_EFAULT;
4157 idx = tswap32(target_ldt_info->entry_number);
4158 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4159 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4160 unlock_user_struct(target_ldt_info, ptr, 1);
4161 return -TARGET_EINVAL;
4163 lp = (uint32_t *)(gdt_table + idx);
4164 entry_1 = tswap32(lp[0]);
4165 entry_2 = tswap32(lp[1]);
4167 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4168 contents = (entry_2 >> 10) & 3;
4169 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4170 seg_32bit = (entry_2 >> 22) & 1;
4171 limit_in_pages = (entry_2 >> 23) & 1;
4172 useable = (entry_2 >> 20) & 1;
4173 #ifdef TARGET_ABI32
4174 lm = 0;
4175 #else
4176 lm = (entry_2 >> 21) & 1;
4177 #endif
4178 flags = (seg_32bit << 0) | (contents << 1) |
4179 (read_exec_only << 3) | (limit_in_pages << 4) |
4180 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4181 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4182 base_addr = (entry_1 >> 16) |
4183 (entry_2 & 0xff000000) |
4184 ((entry_2 & 0xff) << 16);
4185 target_ldt_info->base_addr = tswapal(base_addr);
4186 target_ldt_info->limit = tswap32(limit);
4187 target_ldt_info->flags = tswap32(flags);
4188 unlock_user_struct(target_ldt_info, ptr, 1);
4189 return 0;
4191 #endif /* TARGET_I386 && TARGET_ABI32 */
4193 #ifndef TARGET_ABI32
4194 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4196 abi_long ret = 0;
4197 abi_ulong val;
4198 int idx;
4200 switch(code) {
4201 case TARGET_ARCH_SET_GS:
4202 case TARGET_ARCH_SET_FS:
4203 if (code == TARGET_ARCH_SET_GS)
4204 idx = R_GS;
4205 else
4206 idx = R_FS;
4207 cpu_x86_load_seg(env, idx, 0);
4208 env->segs[idx].base = addr;
4209 break;
4210 case TARGET_ARCH_GET_GS:
4211 case TARGET_ARCH_GET_FS:
4212 if (code == TARGET_ARCH_GET_GS)
4213 idx = R_GS;
4214 else
4215 idx = R_FS;
4216 val = env->segs[idx].base;
4217 if (put_user(val, addr, abi_ulong))
4218 ret = -TARGET_EFAULT;
4219 break;
4220 default:
4221 ret = -TARGET_EINVAL;
4222 break;
4224 return ret;
4226 #endif
4228 #endif /* defined(TARGET_I386) */
4230 #define NEW_STACK_SIZE 0x40000
4232 #if defined(CONFIG_USE_NPTL)
4234 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4235 typedef struct {
4236 CPUArchState *env;
4237 pthread_mutex_t mutex;
4238 pthread_cond_t cond;
4239 pthread_t thread;
4240 uint32_t tid;
4241 abi_ulong child_tidptr;
4242 abi_ulong parent_tidptr;
4243 sigset_t sigmask;
4244 } new_thread_info;
4246 static void *clone_func(void *arg)
4248 new_thread_info *info = arg;
4249 CPUArchState *env;
4250 TaskState *ts;
4252 env = info->env;
4253 thread_env = env;
4254 ts = (TaskState *)thread_env->opaque;
4255 info->tid = gettid();
4256 env->host_tid = info->tid;
4257 task_settid(ts);
4258 if (info->child_tidptr)
4259 put_user_u32(info->tid, info->child_tidptr);
4260 if (info->parent_tidptr)
4261 put_user_u32(info->tid, info->parent_tidptr);
4262 /* Enable signals. */
4263 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4264 /* Signal to the parent that we're ready. */
4265 pthread_mutex_lock(&info->mutex);
4266 pthread_cond_broadcast(&info->cond);
4267 pthread_mutex_unlock(&info->mutex);
4268 /* Wait until the parent has finshed initializing the tls state. */
4269 pthread_mutex_lock(&clone_lock);
4270 pthread_mutex_unlock(&clone_lock);
4271 cpu_loop(env);
4272 /* never exits */
4273 return NULL;
4275 #else
4277 static int clone_func(void *arg)
4279 CPUArchState *env = arg;
4280 cpu_loop(env);
4281 /* never exits */
4282 return 0;
4284 #endif
4286 /* do_fork() Must return host values and target errnos (unlike most
4287 do_*() functions). */
4288 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4289 abi_ulong parent_tidptr, target_ulong newtls,
4290 abi_ulong child_tidptr)
4292 int ret;
4293 TaskState *ts;
4294 CPUArchState *new_env;
4295 #if defined(CONFIG_USE_NPTL)
4296 unsigned int nptl_flags;
4297 sigset_t sigmask;
4298 #else
4299 uint8_t *new_stack;
4300 #endif
4302 /* Emulate vfork() with fork() */
4303 if (flags & CLONE_VFORK)
4304 flags &= ~(CLONE_VFORK | CLONE_VM);
4306 if (flags & CLONE_VM) {
4307 TaskState *parent_ts = (TaskState *)env->opaque;
4308 #if defined(CONFIG_USE_NPTL)
4309 new_thread_info info;
4310 pthread_attr_t attr;
4311 #endif
4312 ts = g_malloc0(sizeof(TaskState));
4313 init_task_state(ts);
4314 /* we create a new CPU instance. */
4315 new_env = cpu_copy(env);
4316 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4317 cpu_reset(ENV_GET_CPU(new_env));
4318 #endif
4319 /* Init regs that differ from the parent. */
4320 cpu_clone_regs(new_env, newsp);
4321 new_env->opaque = ts;
4322 ts->bprm = parent_ts->bprm;
4323 ts->info = parent_ts->info;
4324 #if defined(CONFIG_USE_NPTL)
4325 nptl_flags = flags;
4326 flags &= ~CLONE_NPTL_FLAGS2;
4328 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4329 ts->child_tidptr = child_tidptr;
4332 if (nptl_flags & CLONE_SETTLS)
4333 cpu_set_tls (new_env, newtls);
4335 /* Grab a mutex so that thread setup appears atomic. */
4336 pthread_mutex_lock(&clone_lock);
4338 memset(&info, 0, sizeof(info));
4339 pthread_mutex_init(&info.mutex, NULL);
4340 pthread_mutex_lock(&info.mutex);
4341 pthread_cond_init(&info.cond, NULL);
4342 info.env = new_env;
4343 if (nptl_flags & CLONE_CHILD_SETTID)
4344 info.child_tidptr = child_tidptr;
4345 if (nptl_flags & CLONE_PARENT_SETTID)
4346 info.parent_tidptr = parent_tidptr;
4348 ret = pthread_attr_init(&attr);
4349 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4350 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4351 /* It is not safe to deliver signals until the child has finished
4352 initializing, so temporarily block all signals. */
4353 sigfillset(&sigmask);
4354 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4356 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4357 /* TODO: Free new CPU state if thread creation failed. */
4359 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4360 pthread_attr_destroy(&attr);
4361 if (ret == 0) {
4362 /* Wait for the child to initialize. */
4363 pthread_cond_wait(&info.cond, &info.mutex);
4364 ret = info.tid;
4365 if (flags & CLONE_PARENT_SETTID)
4366 put_user_u32(ret, parent_tidptr);
4367 } else {
4368 ret = -1;
4370 pthread_mutex_unlock(&info.mutex);
4371 pthread_cond_destroy(&info.cond);
4372 pthread_mutex_destroy(&info.mutex);
4373 pthread_mutex_unlock(&clone_lock);
4374 #else
4375 if (flags & CLONE_NPTL_FLAGS2)
4376 return -EINVAL;
4377 /* This is probably going to die very quickly, but do it anyway. */
4378 new_stack = g_malloc0 (NEW_STACK_SIZE);
4379 #ifdef __ia64__
4380 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4381 #else
4382 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4383 #endif
4384 #endif
4385 } else {
4386 /* if no CLONE_VM, we consider it is a fork */
4387 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4388 return -EINVAL;
4389 fork_start();
4390 ret = fork();
4391 if (ret == 0) {
4392 /* Child Process. */
4393 cpu_clone_regs(env, newsp);
4394 fork_end(1);
4395 #if defined(CONFIG_USE_NPTL)
4396 /* There is a race condition here. The parent process could
4397 theoretically read the TID in the child process before the child
4398 tid is set. This would require using either ptrace
4399 (not implemented) or having *_tidptr to point at a shared memory
4400 mapping. We can't repeat the spinlock hack used above because
4401 the child process gets its own copy of the lock. */
4402 if (flags & CLONE_CHILD_SETTID)
4403 put_user_u32(gettid(), child_tidptr);
4404 if (flags & CLONE_PARENT_SETTID)
4405 put_user_u32(gettid(), parent_tidptr);
4406 ts = (TaskState *)env->opaque;
4407 if (flags & CLONE_SETTLS)
4408 cpu_set_tls (env, newtls);
4409 if (flags & CLONE_CHILD_CLEARTID)
4410 ts->child_tidptr = child_tidptr;
4411 #endif
4412 } else {
4413 fork_end(0);
4416 return ret;
4419 /* warning : doesn't handle linux specific flags... */
4420 static int target_to_host_fcntl_cmd(int cmd)
4422 switch(cmd) {
4423 case TARGET_F_DUPFD:
4424 case TARGET_F_GETFD:
4425 case TARGET_F_SETFD:
4426 case TARGET_F_GETFL:
4427 case TARGET_F_SETFL:
4428 return cmd;
4429 case TARGET_F_GETLK:
4430 return F_GETLK;
4431 case TARGET_F_SETLK:
4432 return F_SETLK;
4433 case TARGET_F_SETLKW:
4434 return F_SETLKW;
4435 case TARGET_F_GETOWN:
4436 return F_GETOWN;
4437 case TARGET_F_SETOWN:
4438 return F_SETOWN;
4439 case TARGET_F_GETSIG:
4440 return F_GETSIG;
4441 case TARGET_F_SETSIG:
4442 return F_SETSIG;
4443 #if TARGET_ABI_BITS == 32
4444 case TARGET_F_GETLK64:
4445 return F_GETLK64;
4446 case TARGET_F_SETLK64:
4447 return F_SETLK64;
4448 case TARGET_F_SETLKW64:
4449 return F_SETLKW64;
4450 #endif
4451 case TARGET_F_SETLEASE:
4452 return F_SETLEASE;
4453 case TARGET_F_GETLEASE:
4454 return F_GETLEASE;
4455 #ifdef F_DUPFD_CLOEXEC
4456 case TARGET_F_DUPFD_CLOEXEC:
4457 return F_DUPFD_CLOEXEC;
4458 #endif
4459 case TARGET_F_NOTIFY:
4460 return F_NOTIFY;
4461 default:
4462 return -TARGET_EINVAL;
4464 return -TARGET_EINVAL;
4467 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4469 struct flock fl;
4470 struct target_flock *target_fl;
4471 struct flock64 fl64;
4472 struct target_flock64 *target_fl64;
4473 abi_long ret;
4474 int host_cmd = target_to_host_fcntl_cmd(cmd);
4476 if (host_cmd == -TARGET_EINVAL)
4477 return host_cmd;
4479 switch(cmd) {
4480 case TARGET_F_GETLK:
4481 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4482 return -TARGET_EFAULT;
4483 fl.l_type = tswap16(target_fl->l_type);
4484 fl.l_whence = tswap16(target_fl->l_whence);
4485 fl.l_start = tswapal(target_fl->l_start);
4486 fl.l_len = tswapal(target_fl->l_len);
4487 fl.l_pid = tswap32(target_fl->l_pid);
4488 unlock_user_struct(target_fl, arg, 0);
4489 ret = get_errno(fcntl(fd, host_cmd, &fl));
4490 if (ret == 0) {
4491 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4492 return -TARGET_EFAULT;
4493 target_fl->l_type = tswap16(fl.l_type);
4494 target_fl->l_whence = tswap16(fl.l_whence);
4495 target_fl->l_start = tswapal(fl.l_start);
4496 target_fl->l_len = tswapal(fl.l_len);
4497 target_fl->l_pid = tswap32(fl.l_pid);
4498 unlock_user_struct(target_fl, arg, 1);
4500 break;
4502 case TARGET_F_SETLK:
4503 case TARGET_F_SETLKW:
4504 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4505 return -TARGET_EFAULT;
4506 fl.l_type = tswap16(target_fl->l_type);
4507 fl.l_whence = tswap16(target_fl->l_whence);
4508 fl.l_start = tswapal(target_fl->l_start);
4509 fl.l_len = tswapal(target_fl->l_len);
4510 fl.l_pid = tswap32(target_fl->l_pid);
4511 unlock_user_struct(target_fl, arg, 0);
4512 ret = get_errno(fcntl(fd, host_cmd, &fl));
4513 break;
4515 case TARGET_F_GETLK64:
4516 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4517 return -TARGET_EFAULT;
4518 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4519 fl64.l_whence = tswap16(target_fl64->l_whence);
4520 fl64.l_start = tswap64(target_fl64->l_start);
4521 fl64.l_len = tswap64(target_fl64->l_len);
4522 fl64.l_pid = tswap32(target_fl64->l_pid);
4523 unlock_user_struct(target_fl64, arg, 0);
4524 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4525 if (ret == 0) {
4526 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4527 return -TARGET_EFAULT;
4528 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4529 target_fl64->l_whence = tswap16(fl64.l_whence);
4530 target_fl64->l_start = tswap64(fl64.l_start);
4531 target_fl64->l_len = tswap64(fl64.l_len);
4532 target_fl64->l_pid = tswap32(fl64.l_pid);
4533 unlock_user_struct(target_fl64, arg, 1);
4535 break;
4536 case TARGET_F_SETLK64:
4537 case TARGET_F_SETLKW64:
4538 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4539 return -TARGET_EFAULT;
4540 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4541 fl64.l_whence = tswap16(target_fl64->l_whence);
4542 fl64.l_start = tswap64(target_fl64->l_start);
4543 fl64.l_len = tswap64(target_fl64->l_len);
4544 fl64.l_pid = tswap32(target_fl64->l_pid);
4545 unlock_user_struct(target_fl64, arg, 0);
4546 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4547 break;
4549 case TARGET_F_GETFL:
4550 ret = get_errno(fcntl(fd, host_cmd, arg));
4551 if (ret >= 0) {
4552 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4554 break;
4556 case TARGET_F_SETFL:
4557 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4558 break;
4560 case TARGET_F_SETOWN:
4561 case TARGET_F_GETOWN:
4562 case TARGET_F_SETSIG:
4563 case TARGET_F_GETSIG:
4564 case TARGET_F_SETLEASE:
4565 case TARGET_F_GETLEASE:
4566 ret = get_errno(fcntl(fd, host_cmd, arg));
4567 break;
4569 default:
4570 ret = get_errno(fcntl(fd, cmd, arg));
4571 break;
4573 return ret;
4576 #ifdef USE_UID16
4578 static inline int high2lowuid(int uid)
4580 if (uid > 65535)
4581 return 65534;
4582 else
4583 return uid;
4586 static inline int high2lowgid(int gid)
4588 if (gid > 65535)
4589 return 65534;
4590 else
4591 return gid;
4594 static inline int low2highuid(int uid)
4596 if ((int16_t)uid == -1)
4597 return -1;
4598 else
4599 return uid;
4602 static inline int low2highgid(int gid)
4604 if ((int16_t)gid == -1)
4605 return -1;
4606 else
4607 return gid;
4609 static inline int tswapid(int id)
4611 return tswap16(id);
4613 #else /* !USE_UID16 */
4614 static inline int high2lowuid(int uid)
4616 return uid;
4618 static inline int high2lowgid(int gid)
4620 return gid;
4622 static inline int low2highuid(int uid)
4624 return uid;
4626 static inline int low2highgid(int gid)
4628 return gid;
4630 static inline int tswapid(int id)
4632 return tswap32(id);
4634 #endif /* USE_UID16 */
4636 void syscall_init(void)
4638 IOCTLEntry *ie;
4639 const argtype *arg_type;
4640 int size;
4641 int i;
4643 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4644 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4645 #include "syscall_types.h"
4646 #undef STRUCT
4647 #undef STRUCT_SPECIAL
4649 /* Build target_to_host_errno_table[] table from
4650 * host_to_target_errno_table[]. */
4651 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4652 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4655 /* we patch the ioctl size if necessary. We rely on the fact that
4656 no ioctl has all the bits at '1' in the size field */
4657 ie = ioctl_entries;
4658 while (ie->target_cmd != 0) {
4659 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4660 TARGET_IOC_SIZEMASK) {
4661 arg_type = ie->arg_type;
4662 if (arg_type[0] != TYPE_PTR) {
4663 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4664 ie->target_cmd);
4665 exit(1);
4667 arg_type++;
4668 size = thunk_type_size(arg_type, 0);
4669 ie->target_cmd = (ie->target_cmd &
4670 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4671 (size << TARGET_IOC_SIZESHIFT);
4674 /* automatic consistency check if same arch */
4675 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4676 (defined(__x86_64__) && defined(TARGET_X86_64))
4677 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4678 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4679 ie->name, ie->target_cmd, ie->host_cmd);
4681 #endif
4682 ie++;
4686 #if TARGET_ABI_BITS == 32
4687 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4689 #ifdef TARGET_WORDS_BIGENDIAN
4690 return ((uint64_t)word0 << 32) | word1;
4691 #else
4692 return ((uint64_t)word1 << 32) | word0;
4693 #endif
4695 #else /* TARGET_ABI_BITS == 32 */
4696 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4698 return word0;
4700 #endif /* TARGET_ABI_BITS != 32 */
4702 #ifdef TARGET_NR_truncate64
4703 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4704 abi_long arg2,
4705 abi_long arg3,
4706 abi_long arg4)
4708 if (regpairs_aligned(cpu_env)) {
4709 arg2 = arg3;
4710 arg3 = arg4;
4712 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4714 #endif
4716 #ifdef TARGET_NR_ftruncate64
4717 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4718 abi_long arg2,
4719 abi_long arg3,
4720 abi_long arg4)
4722 if (regpairs_aligned(cpu_env)) {
4723 arg2 = arg3;
4724 arg3 = arg4;
4726 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4728 #endif
4730 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4731 abi_ulong target_addr)
4733 struct target_timespec *target_ts;
4735 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4736 return -TARGET_EFAULT;
4737 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4738 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4739 unlock_user_struct(target_ts, target_addr, 0);
4740 return 0;
4743 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4744 struct timespec *host_ts)
4746 struct target_timespec *target_ts;
4748 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4749 return -TARGET_EFAULT;
4750 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4751 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4752 unlock_user_struct(target_ts, target_addr, 1);
4753 return 0;
4756 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4757 static inline abi_long host_to_target_stat64(void *cpu_env,
4758 abi_ulong target_addr,
4759 struct stat *host_st)
4761 #ifdef TARGET_ARM
4762 if (((CPUARMState *)cpu_env)->eabi) {
4763 struct target_eabi_stat64 *target_st;
4765 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4766 return -TARGET_EFAULT;
4767 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4768 __put_user(host_st->st_dev, &target_st->st_dev);
4769 __put_user(host_st->st_ino, &target_st->st_ino);
4770 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4771 __put_user(host_st->st_ino, &target_st->__st_ino);
4772 #endif
4773 __put_user(host_st->st_mode, &target_st->st_mode);
4774 __put_user(host_st->st_nlink, &target_st->st_nlink);
4775 __put_user(host_st->st_uid, &target_st->st_uid);
4776 __put_user(host_st->st_gid, &target_st->st_gid);
4777 __put_user(host_st->st_rdev, &target_st->st_rdev);
4778 __put_user(host_st->st_size, &target_st->st_size);
4779 __put_user(host_st->st_blksize, &target_st->st_blksize);
4780 __put_user(host_st->st_blocks, &target_st->st_blocks);
4781 __put_user(host_st->st_atime, &target_st->target_st_atime);
4782 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4783 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4784 unlock_user_struct(target_st, target_addr, 1);
4785 } else
4786 #endif
4788 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4789 struct target_stat *target_st;
4790 #else
4791 struct target_stat64 *target_st;
4792 #endif
4794 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4795 return -TARGET_EFAULT;
4796 memset(target_st, 0, sizeof(*target_st));
4797 __put_user(host_st->st_dev, &target_st->st_dev);
4798 __put_user(host_st->st_ino, &target_st->st_ino);
4799 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4800 __put_user(host_st->st_ino, &target_st->__st_ino);
4801 #endif
4802 __put_user(host_st->st_mode, &target_st->st_mode);
4803 __put_user(host_st->st_nlink, &target_st->st_nlink);
4804 __put_user(host_st->st_uid, &target_st->st_uid);
4805 __put_user(host_st->st_gid, &target_st->st_gid);
4806 __put_user(host_st->st_rdev, &target_st->st_rdev);
4807 /* XXX: better use of kernel struct */
4808 __put_user(host_st->st_size, &target_st->st_size);
4809 __put_user(host_st->st_blksize, &target_st->st_blksize);
4810 __put_user(host_st->st_blocks, &target_st->st_blocks);
4811 __put_user(host_st->st_atime, &target_st->target_st_atime);
4812 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4813 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4814 unlock_user_struct(target_st, target_addr, 1);
4817 return 0;
4819 #endif
4821 #if defined(CONFIG_USE_NPTL)
4822 /* ??? Using host futex calls even when target atomic operations
4823 are not really atomic probably breaks things. However implementing
4824 futexes locally would make futexes shared between multiple processes
4825 tricky. However they're probably useless because guest atomic
4826 operations won't work either. */
4827 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4828 target_ulong uaddr2, int val3)
4830 struct timespec ts, *pts;
4831 int base_op;
4833 /* ??? We assume FUTEX_* constants are the same on both host
4834 and target. */
4835 #ifdef FUTEX_CMD_MASK
4836 base_op = op & FUTEX_CMD_MASK;
4837 #else
4838 base_op = op;
4839 #endif
4840 switch (base_op) {
4841 case FUTEX_WAIT:
4842 if (timeout) {
4843 pts = &ts;
4844 target_to_host_timespec(pts, timeout);
4845 } else {
4846 pts = NULL;
4848 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4849 pts, NULL, 0));
4850 case FUTEX_WAKE:
4851 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4852 case FUTEX_FD:
4853 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4854 case FUTEX_REQUEUE:
4855 case FUTEX_CMP_REQUEUE:
4856 case FUTEX_WAKE_OP:
4857 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4858 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4859 But the prototype takes a `struct timespec *'; insert casts
4860 to satisfy the compiler. We do not need to tswap TIMEOUT
4861 since it's not compared to guest memory. */
4862 pts = (struct timespec *)(uintptr_t) timeout;
4863 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4864 g2h(uaddr2),
4865 (base_op == FUTEX_CMP_REQUEUE
4866 ? tswap32(val3)
4867 : val3)));
4868 default:
4869 return -TARGET_ENOSYS;
4872 #endif
4874 /* Map host to target signal numbers for the wait family of syscalls.
4875 Assume all other status bits are the same. */
4876 static int host_to_target_waitstatus(int status)
4878 if (WIFSIGNALED(status)) {
4879 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4881 if (WIFSTOPPED(status)) {
4882 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4883 | (status & 0xff);
4885 return status;
4888 int get_osversion(void)
4890 static int osversion;
4891 struct new_utsname buf;
4892 const char *s;
4893 int i, n, tmp;
4894 if (osversion)
4895 return osversion;
4896 if (qemu_uname_release && *qemu_uname_release) {
4897 s = qemu_uname_release;
4898 } else {
4899 if (sys_uname(&buf))
4900 return 0;
4901 s = buf.release;
4903 tmp = 0;
4904 for (i = 0; i < 3; i++) {
4905 n = 0;
4906 while (*s >= '0' && *s <= '9') {
4907 n *= 10;
4908 n += *s - '0';
4909 s++;
4911 tmp = (tmp << 8) + n;
4912 if (*s == '.')
4913 s++;
4915 osversion = tmp;
4916 return osversion;
4920 static int open_self_maps(void *cpu_env, int fd)
4922 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4923 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4924 #endif
4925 FILE *fp;
4926 char *line = NULL;
4927 size_t len = 0;
4928 ssize_t read;
4930 fp = fopen("/proc/self/maps", "r");
4931 if (fp == NULL) {
4932 return -EACCES;
4935 while ((read = getline(&line, &len, fp)) != -1) {
4936 int fields, dev_maj, dev_min, inode;
4937 uint64_t min, max, offset;
4938 char flag_r, flag_w, flag_x, flag_p;
4939 char path[512] = "";
4940 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4941 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4942 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4944 if ((fields < 10) || (fields > 11)) {
4945 continue;
4947 if (!strncmp(path, "[stack]", 7)) {
4948 continue;
4950 if (h2g_valid(min) && h2g_valid(max)) {
4951 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4952 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
4953 h2g(min), h2g(max), flag_r, flag_w,
4954 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4955 path[0] ? " " : "", path);
4959 free(line);
4960 fclose(fp);
4962 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4963 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4964 (unsigned long long)ts->info->stack_limit,
4965 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4966 & TARGET_PAGE_MASK,
4967 (unsigned long long)0);
4968 #endif
4970 return 0;
4973 static int open_self_stat(void *cpu_env, int fd)
4975 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4976 abi_ulong start_stack = ts->info->start_stack;
4977 int i;
4979 for (i = 0; i < 44; i++) {
4980 char buf[128];
4981 int len;
4982 uint64_t val = 0;
4984 if (i == 0) {
4985 /* pid */
4986 val = getpid();
4987 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4988 } else if (i == 1) {
4989 /* app name */
4990 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
4991 } else if (i == 27) {
4992 /* stack bottom */
4993 val = start_stack;
4994 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
4995 } else {
4996 /* for the rest, there is MasterCard */
4997 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5000 len = strlen(buf);
5001 if (write(fd, buf, len) != len) {
5002 return -1;
5006 return 0;
5009 static int open_self_auxv(void *cpu_env, int fd)
5011 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5012 abi_ulong auxv = ts->info->saved_auxv;
5013 abi_ulong len = ts->info->auxv_len;
5014 char *ptr;
5017 * Auxiliary vector is stored in target process stack.
5018 * read in whole auxv vector and copy it to file
5020 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5021 if (ptr != NULL) {
5022 while (len > 0) {
5023 ssize_t r;
5024 r = write(fd, ptr, len);
5025 if (r <= 0) {
5026 break;
5028 len -= r;
5029 ptr += r;
5031 lseek(fd, 0, SEEK_SET);
5032 unlock_user(ptr, auxv, len);
5035 return 0;
5038 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5040 struct fake_open {
5041 const char *filename;
5042 int (*fill)(void *cpu_env, int fd);
5044 const struct fake_open *fake_open;
5045 static const struct fake_open fakes[] = {
5046 { "/proc/self/maps", open_self_maps },
5047 { "/proc/self/stat", open_self_stat },
5048 { "/proc/self/auxv", open_self_auxv },
5049 { NULL, NULL }
5052 for (fake_open = fakes; fake_open->filename; fake_open++) {
5053 if (!strncmp(pathname, fake_open->filename,
5054 strlen(fake_open->filename))) {
5055 break;
5059 if (fake_open->filename) {
5060 const char *tmpdir;
5061 char filename[PATH_MAX];
5062 int fd, r;
5064 /* create temporary file to map stat to */
5065 tmpdir = getenv("TMPDIR");
5066 if (!tmpdir)
5067 tmpdir = "/tmp";
5068 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5069 fd = mkstemp(filename);
5070 if (fd < 0) {
5071 return fd;
5073 unlink(filename);
5075 if ((r = fake_open->fill(cpu_env, fd))) {
5076 close(fd);
5077 return r;
5079 lseek(fd, 0, SEEK_SET);
5081 return fd;
5084 return get_errno(open(path(pathname), flags, mode));
5087 /* do_syscall() should always have a single exit point at the end so
5088 that actions, such as logging of syscall results, can be performed.
5089 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5090 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5091 abi_long arg2, abi_long arg3, abi_long arg4,
5092 abi_long arg5, abi_long arg6, abi_long arg7,
5093 abi_long arg8)
5095 abi_long ret;
5096 struct stat st;
5097 struct statfs stfs;
5098 void *p;
5100 #ifdef DEBUG
5101 gemu_log("syscall %d", num);
5102 #endif
5103 if(do_strace)
5104 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5106 switch(num) {
5107 case TARGET_NR_exit:
5108 #ifdef CONFIG_USE_NPTL
5109 /* In old applications this may be used to implement _exit(2).
5110 However in threaded applictions it is used for thread termination,
5111 and _exit_group is used for application termination.
5112 Do thread termination if we have more then one thread. */
5113 /* FIXME: This probably breaks if a signal arrives. We should probably
5114 be disabling signals. */
5115 if (first_cpu->next_cpu) {
5116 TaskState *ts;
5117 CPUArchState **lastp;
5118 CPUArchState *p;
5120 cpu_list_lock();
5121 lastp = &first_cpu;
5122 p = first_cpu;
5123 while (p && p != (CPUArchState *)cpu_env) {
5124 lastp = &p->next_cpu;
5125 p = p->next_cpu;
5127 /* If we didn't find the CPU for this thread then something is
5128 horribly wrong. */
5129 if (!p)
5130 abort();
5131 /* Remove the CPU from the list. */
5132 *lastp = p->next_cpu;
5133 cpu_list_unlock();
5134 ts = ((CPUArchState *)cpu_env)->opaque;
5135 if (ts->child_tidptr) {
5136 put_user_u32(0, ts->child_tidptr);
5137 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5138 NULL, NULL, 0);
5140 thread_env = NULL;
5141 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5142 g_free(ts);
5143 pthread_exit(NULL);
5145 #endif
5146 #ifdef TARGET_GPROF
5147 _mcleanup();
5148 #endif
5149 gdb_exit(cpu_env, arg1);
5150 _exit(arg1);
5151 ret = 0; /* avoid warning */
5152 break;
5153 case TARGET_NR_read:
5154 if (arg3 == 0)
5155 ret = 0;
5156 else {
5157 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5158 goto efault;
5159 ret = get_errno(read(arg1, p, arg3));
5160 unlock_user(p, arg2, ret);
5162 break;
5163 case TARGET_NR_write:
5164 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5165 goto efault;
5166 ret = get_errno(write(arg1, p, arg3));
5167 unlock_user(p, arg2, 0);
5168 break;
5169 case TARGET_NR_open:
5170 if (!(p = lock_user_string(arg1)))
5171 goto efault;
5172 ret = get_errno(do_open(cpu_env, p,
5173 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5174 arg3));
5175 unlock_user(p, arg1, 0);
5176 break;
5177 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5178 case TARGET_NR_openat:
5179 if (!(p = lock_user_string(arg2)))
5180 goto efault;
5181 ret = get_errno(sys_openat(arg1,
5182 path(p),
5183 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5184 arg4));
5185 unlock_user(p, arg2, 0);
5186 break;
5187 #endif
5188 case TARGET_NR_close:
5189 ret = get_errno(close(arg1));
5190 break;
5191 case TARGET_NR_brk:
5192 ret = do_brk(arg1);
5193 break;
5194 case TARGET_NR_fork:
5195 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5196 break;
5197 #ifdef TARGET_NR_waitpid
5198 case TARGET_NR_waitpid:
5200 int status;
5201 ret = get_errno(waitpid(arg1, &status, arg3));
5202 if (!is_error(ret) && arg2 && ret
5203 && put_user_s32(host_to_target_waitstatus(status), arg2))
5204 goto efault;
5206 break;
5207 #endif
5208 #ifdef TARGET_NR_waitid
5209 case TARGET_NR_waitid:
5211 siginfo_t info;
5212 info.si_pid = 0;
5213 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5214 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5215 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5216 goto efault;
5217 host_to_target_siginfo(p, &info);
5218 unlock_user(p, arg3, sizeof(target_siginfo_t));
5221 break;
5222 #endif
5223 #ifdef TARGET_NR_creat /* not on alpha */
5224 case TARGET_NR_creat:
5225 if (!(p = lock_user_string(arg1)))
5226 goto efault;
5227 ret = get_errno(creat(p, arg2));
5228 unlock_user(p, arg1, 0);
5229 break;
5230 #endif
5231 case TARGET_NR_link:
5233 void * p2;
5234 p = lock_user_string(arg1);
5235 p2 = lock_user_string(arg2);
5236 if (!p || !p2)
5237 ret = -TARGET_EFAULT;
5238 else
5239 ret = get_errno(link(p, p2));
5240 unlock_user(p2, arg2, 0);
5241 unlock_user(p, arg1, 0);
5243 break;
5244 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5245 case TARGET_NR_linkat:
5247 void * p2 = NULL;
5248 if (!arg2 || !arg4)
5249 goto efault;
5250 p = lock_user_string(arg2);
5251 p2 = lock_user_string(arg4);
5252 if (!p || !p2)
5253 ret = -TARGET_EFAULT;
5254 else
5255 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5256 unlock_user(p, arg2, 0);
5257 unlock_user(p2, arg4, 0);
5259 break;
5260 #endif
5261 case TARGET_NR_unlink:
5262 if (!(p = lock_user_string(arg1)))
5263 goto efault;
5264 ret = get_errno(unlink(p));
5265 unlock_user(p, arg1, 0);
5266 break;
5267 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5268 case TARGET_NR_unlinkat:
5269 if (!(p = lock_user_string(arg2)))
5270 goto efault;
5271 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5272 unlock_user(p, arg2, 0);
5273 break;
5274 #endif
5275 case TARGET_NR_execve:
5277 char **argp, **envp;
5278 int argc, envc;
5279 abi_ulong gp;
5280 abi_ulong guest_argp;
5281 abi_ulong guest_envp;
5282 abi_ulong addr;
5283 char **q;
5284 int total_size = 0;
5286 argc = 0;
5287 guest_argp = arg2;
5288 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5289 if (get_user_ual(addr, gp))
5290 goto efault;
5291 if (!addr)
5292 break;
5293 argc++;
5295 envc = 0;
5296 guest_envp = arg3;
5297 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5298 if (get_user_ual(addr, gp))
5299 goto efault;
5300 if (!addr)
5301 break;
5302 envc++;
5305 argp = alloca((argc + 1) * sizeof(void *));
5306 envp = alloca((envc + 1) * sizeof(void *));
5308 for (gp = guest_argp, q = argp; gp;
5309 gp += sizeof(abi_ulong), q++) {
5310 if (get_user_ual(addr, gp))
5311 goto execve_efault;
5312 if (!addr)
5313 break;
5314 if (!(*q = lock_user_string(addr)))
5315 goto execve_efault;
5316 total_size += strlen(*q) + 1;
5318 *q = NULL;
5320 for (gp = guest_envp, q = envp; gp;
5321 gp += sizeof(abi_ulong), q++) {
5322 if (get_user_ual(addr, gp))
5323 goto execve_efault;
5324 if (!addr)
5325 break;
5326 if (!(*q = lock_user_string(addr)))
5327 goto execve_efault;
5328 total_size += strlen(*q) + 1;
5330 *q = NULL;
5332 /* This case will not be caught by the host's execve() if its
5333 page size is bigger than the target's. */
5334 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5335 ret = -TARGET_E2BIG;
5336 goto execve_end;
5338 if (!(p = lock_user_string(arg1)))
5339 goto execve_efault;
5340 ret = get_errno(execve(p, argp, envp));
5341 unlock_user(p, arg1, 0);
5343 goto execve_end;
5345 execve_efault:
5346 ret = -TARGET_EFAULT;
5348 execve_end:
5349 for (gp = guest_argp, q = argp; *q;
5350 gp += sizeof(abi_ulong), q++) {
5351 if (get_user_ual(addr, gp)
5352 || !addr)
5353 break;
5354 unlock_user(*q, addr, 0);
5356 for (gp = guest_envp, q = envp; *q;
5357 gp += sizeof(abi_ulong), q++) {
5358 if (get_user_ual(addr, gp)
5359 || !addr)
5360 break;
5361 unlock_user(*q, addr, 0);
5364 break;
5365 case TARGET_NR_chdir:
5366 if (!(p = lock_user_string(arg1)))
5367 goto efault;
5368 ret = get_errno(chdir(p));
5369 unlock_user(p, arg1, 0);
5370 break;
5371 #ifdef TARGET_NR_time
5372 case TARGET_NR_time:
5374 time_t host_time;
5375 ret = get_errno(time(&host_time));
5376 if (!is_error(ret)
5377 && arg1
5378 && put_user_sal(host_time, arg1))
5379 goto efault;
5381 break;
5382 #endif
5383 case TARGET_NR_mknod:
5384 if (!(p = lock_user_string(arg1)))
5385 goto efault;
5386 ret = get_errno(mknod(p, arg2, arg3));
5387 unlock_user(p, arg1, 0);
5388 break;
5389 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5390 case TARGET_NR_mknodat:
5391 if (!(p = lock_user_string(arg2)))
5392 goto efault;
5393 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5394 unlock_user(p, arg2, 0);
5395 break;
5396 #endif
5397 case TARGET_NR_chmod:
5398 if (!(p = lock_user_string(arg1)))
5399 goto efault;
5400 ret = get_errno(chmod(p, arg2));
5401 unlock_user(p, arg1, 0);
5402 break;
5403 #ifdef TARGET_NR_break
5404 case TARGET_NR_break:
5405 goto unimplemented;
5406 #endif
5407 #ifdef TARGET_NR_oldstat
5408 case TARGET_NR_oldstat:
5409 goto unimplemented;
5410 #endif
5411 case TARGET_NR_lseek:
5412 ret = get_errno(lseek(arg1, arg2, arg3));
5413 break;
5414 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5415 /* Alpha specific */
5416 case TARGET_NR_getxpid:
5417 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5418 ret = get_errno(getpid());
5419 break;
5420 #endif
5421 #ifdef TARGET_NR_getpid
5422 case TARGET_NR_getpid:
5423 ret = get_errno(getpid());
5424 break;
5425 #endif
5426 case TARGET_NR_mount:
5428 /* need to look at the data field */
5429 void *p2, *p3;
5430 p = lock_user_string(arg1);
5431 p2 = lock_user_string(arg2);
5432 p3 = lock_user_string(arg3);
5433 if (!p || !p2 || !p3)
5434 ret = -TARGET_EFAULT;
5435 else {
5436 /* FIXME - arg5 should be locked, but it isn't clear how to
5437 * do that since it's not guaranteed to be a NULL-terminated
5438 * string.
5440 if ( ! arg5 )
5441 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5442 else
5443 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5445 unlock_user(p, arg1, 0);
5446 unlock_user(p2, arg2, 0);
5447 unlock_user(p3, arg3, 0);
5448 break;
5450 #ifdef TARGET_NR_umount
5451 case TARGET_NR_umount:
5452 if (!(p = lock_user_string(arg1)))
5453 goto efault;
5454 ret = get_errno(umount(p));
5455 unlock_user(p, arg1, 0);
5456 break;
5457 #endif
5458 #ifdef TARGET_NR_stime /* not on alpha */
5459 case TARGET_NR_stime:
5461 time_t host_time;
5462 if (get_user_sal(host_time, arg1))
5463 goto efault;
5464 ret = get_errno(stime(&host_time));
5466 break;
5467 #endif
5468 case TARGET_NR_ptrace:
5469 goto unimplemented;
5470 #ifdef TARGET_NR_alarm /* not on alpha */
5471 case TARGET_NR_alarm:
5472 ret = alarm(arg1);
5473 break;
5474 #endif
5475 #ifdef TARGET_NR_oldfstat
5476 case TARGET_NR_oldfstat:
5477 goto unimplemented;
5478 #endif
5479 #ifdef TARGET_NR_pause /* not on alpha */
5480 case TARGET_NR_pause:
5481 ret = get_errno(pause());
5482 break;
5483 #endif
5484 #ifdef TARGET_NR_utime
5485 case TARGET_NR_utime:
5487 struct utimbuf tbuf, *host_tbuf;
5488 struct target_utimbuf *target_tbuf;
5489 if (arg2) {
5490 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5491 goto efault;
5492 tbuf.actime = tswapal(target_tbuf->actime);
5493 tbuf.modtime = tswapal(target_tbuf->modtime);
5494 unlock_user_struct(target_tbuf, arg2, 0);
5495 host_tbuf = &tbuf;
5496 } else {
5497 host_tbuf = NULL;
5499 if (!(p = lock_user_string(arg1)))
5500 goto efault;
5501 ret = get_errno(utime(p, host_tbuf));
5502 unlock_user(p, arg1, 0);
5504 break;
5505 #endif
5506 case TARGET_NR_utimes:
5508 struct timeval *tvp, tv[2];
5509 if (arg2) {
5510 if (copy_from_user_timeval(&tv[0], arg2)
5511 || copy_from_user_timeval(&tv[1],
5512 arg2 + sizeof(struct target_timeval)))
5513 goto efault;
5514 tvp = tv;
5515 } else {
5516 tvp = NULL;
5518 if (!(p = lock_user_string(arg1)))
5519 goto efault;
5520 ret = get_errno(utimes(p, tvp));
5521 unlock_user(p, arg1, 0);
5523 break;
5524 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5525 case TARGET_NR_futimesat:
5527 struct timeval *tvp, tv[2];
5528 if (arg3) {
5529 if (copy_from_user_timeval(&tv[0], arg3)
5530 || copy_from_user_timeval(&tv[1],
5531 arg3 + sizeof(struct target_timeval)))
5532 goto efault;
5533 tvp = tv;
5534 } else {
5535 tvp = NULL;
5537 if (!(p = lock_user_string(arg2)))
5538 goto efault;
5539 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5540 unlock_user(p, arg2, 0);
5542 break;
5543 #endif
5544 #ifdef TARGET_NR_stty
5545 case TARGET_NR_stty:
5546 goto unimplemented;
5547 #endif
5548 #ifdef TARGET_NR_gtty
5549 case TARGET_NR_gtty:
5550 goto unimplemented;
5551 #endif
5552 case TARGET_NR_access:
5553 if (!(p = lock_user_string(arg1)))
5554 goto efault;
5555 ret = get_errno(access(path(p), arg2));
5556 unlock_user(p, arg1, 0);
5557 break;
5558 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5559 case TARGET_NR_faccessat:
5560 if (!(p = lock_user_string(arg2)))
5561 goto efault;
5562 ret = get_errno(sys_faccessat(arg1, p, arg3));
5563 unlock_user(p, arg2, 0);
5564 break;
5565 #endif
5566 #ifdef TARGET_NR_nice /* not on alpha */
5567 case TARGET_NR_nice:
5568 ret = get_errno(nice(arg1));
5569 break;
5570 #endif
5571 #ifdef TARGET_NR_ftime
5572 case TARGET_NR_ftime:
5573 goto unimplemented;
5574 #endif
5575 case TARGET_NR_sync:
5576 sync();
5577 ret = 0;
5578 break;
5579 case TARGET_NR_kill:
5580 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5581 break;
5582 case TARGET_NR_rename:
5584 void *p2;
5585 p = lock_user_string(arg1);
5586 p2 = lock_user_string(arg2);
5587 if (!p || !p2)
5588 ret = -TARGET_EFAULT;
5589 else
5590 ret = get_errno(rename(p, p2));
5591 unlock_user(p2, arg2, 0);
5592 unlock_user(p, arg1, 0);
5594 break;
5595 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5596 case TARGET_NR_renameat:
5598 void *p2;
5599 p = lock_user_string(arg2);
5600 p2 = lock_user_string(arg4);
5601 if (!p || !p2)
5602 ret = -TARGET_EFAULT;
5603 else
5604 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5605 unlock_user(p2, arg4, 0);
5606 unlock_user(p, arg2, 0);
5608 break;
5609 #endif
5610 case TARGET_NR_mkdir:
5611 if (!(p = lock_user_string(arg1)))
5612 goto efault;
5613 ret = get_errno(mkdir(p, arg2));
5614 unlock_user(p, arg1, 0);
5615 break;
5616 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5617 case TARGET_NR_mkdirat:
5618 if (!(p = lock_user_string(arg2)))
5619 goto efault;
5620 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5621 unlock_user(p, arg2, 0);
5622 break;
5623 #endif
5624 case TARGET_NR_rmdir:
5625 if (!(p = lock_user_string(arg1)))
5626 goto efault;
5627 ret = get_errno(rmdir(p));
5628 unlock_user(p, arg1, 0);
5629 break;
5630 case TARGET_NR_dup:
5631 ret = get_errno(dup(arg1));
5632 break;
5633 case TARGET_NR_pipe:
5634 ret = do_pipe(cpu_env, arg1, 0, 0);
5635 break;
5636 #ifdef TARGET_NR_pipe2
5637 case TARGET_NR_pipe2:
5638 ret = do_pipe(cpu_env, arg1,
5639 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5640 break;
5641 #endif
5642 case TARGET_NR_times:
5644 struct target_tms *tmsp;
5645 struct tms tms;
5646 ret = get_errno(times(&tms));
5647 if (arg1) {
5648 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5649 if (!tmsp)
5650 goto efault;
5651 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5652 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5653 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5654 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5656 if (!is_error(ret))
5657 ret = host_to_target_clock_t(ret);
5659 break;
5660 #ifdef TARGET_NR_prof
5661 case TARGET_NR_prof:
5662 goto unimplemented;
5663 #endif
5664 #ifdef TARGET_NR_signal
5665 case TARGET_NR_signal:
5666 goto unimplemented;
5667 #endif
5668 case TARGET_NR_acct:
5669 if (arg1 == 0) {
5670 ret = get_errno(acct(NULL));
5671 } else {
5672 if (!(p = lock_user_string(arg1)))
5673 goto efault;
5674 ret = get_errno(acct(path(p)));
5675 unlock_user(p, arg1, 0);
5677 break;
5678 #ifdef TARGET_NR_umount2 /* not on alpha */
5679 case TARGET_NR_umount2:
5680 if (!(p = lock_user_string(arg1)))
5681 goto efault;
5682 ret = get_errno(umount2(p, arg2));
5683 unlock_user(p, arg1, 0);
5684 break;
5685 #endif
5686 #ifdef TARGET_NR_lock
5687 case TARGET_NR_lock:
5688 goto unimplemented;
5689 #endif
5690 case TARGET_NR_ioctl:
5691 ret = do_ioctl(arg1, arg2, arg3);
5692 break;
5693 case TARGET_NR_fcntl:
5694 ret = do_fcntl(arg1, arg2, arg3);
5695 break;
5696 #ifdef TARGET_NR_mpx
5697 case TARGET_NR_mpx:
5698 goto unimplemented;
5699 #endif
5700 case TARGET_NR_setpgid:
5701 ret = get_errno(setpgid(arg1, arg2));
5702 break;
5703 #ifdef TARGET_NR_ulimit
5704 case TARGET_NR_ulimit:
5705 goto unimplemented;
5706 #endif
5707 #ifdef TARGET_NR_oldolduname
5708 case TARGET_NR_oldolduname:
5709 goto unimplemented;
5710 #endif
5711 case TARGET_NR_umask:
5712 ret = get_errno(umask(arg1));
5713 break;
5714 case TARGET_NR_chroot:
5715 if (!(p = lock_user_string(arg1)))
5716 goto efault;
5717 ret = get_errno(chroot(p));
5718 unlock_user(p, arg1, 0);
5719 break;
5720 case TARGET_NR_ustat:
5721 goto unimplemented;
5722 case TARGET_NR_dup2:
5723 ret = get_errno(dup2(arg1, arg2));
5724 break;
5725 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5726 case TARGET_NR_dup3:
5727 ret = get_errno(dup3(arg1, arg2, arg3));
5728 break;
5729 #endif
5730 #ifdef TARGET_NR_getppid /* not on alpha */
5731 case TARGET_NR_getppid:
5732 ret = get_errno(getppid());
5733 break;
5734 #endif
5735 case TARGET_NR_getpgrp:
5736 ret = get_errno(getpgrp());
5737 break;
5738 case TARGET_NR_setsid:
5739 ret = get_errno(setsid());
5740 break;
5741 #ifdef TARGET_NR_sigaction
5742 case TARGET_NR_sigaction:
5744 #if defined(TARGET_ALPHA)
5745 struct target_sigaction act, oact, *pact = 0;
5746 struct target_old_sigaction *old_act;
5747 if (arg2) {
5748 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5749 goto efault;
5750 act._sa_handler = old_act->_sa_handler;
5751 target_siginitset(&act.sa_mask, old_act->sa_mask);
5752 act.sa_flags = old_act->sa_flags;
5753 act.sa_restorer = 0;
5754 unlock_user_struct(old_act, arg2, 0);
5755 pact = &act;
5757 ret = get_errno(do_sigaction(arg1, pact, &oact));
5758 if (!is_error(ret) && arg3) {
5759 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5760 goto efault;
5761 old_act->_sa_handler = oact._sa_handler;
5762 old_act->sa_mask = oact.sa_mask.sig[0];
5763 old_act->sa_flags = oact.sa_flags;
5764 unlock_user_struct(old_act, arg3, 1);
5766 #elif defined(TARGET_MIPS)
5767 struct target_sigaction act, oact, *pact, *old_act;
5769 if (arg2) {
5770 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5771 goto efault;
5772 act._sa_handler = old_act->_sa_handler;
5773 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5774 act.sa_flags = old_act->sa_flags;
5775 unlock_user_struct(old_act, arg2, 0);
5776 pact = &act;
5777 } else {
5778 pact = NULL;
5781 ret = get_errno(do_sigaction(arg1, pact, &oact));
5783 if (!is_error(ret) && arg3) {
5784 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5785 goto efault;
5786 old_act->_sa_handler = oact._sa_handler;
5787 old_act->sa_flags = oact.sa_flags;
5788 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5789 old_act->sa_mask.sig[1] = 0;
5790 old_act->sa_mask.sig[2] = 0;
5791 old_act->sa_mask.sig[3] = 0;
5792 unlock_user_struct(old_act, arg3, 1);
5794 #else
5795 struct target_old_sigaction *old_act;
5796 struct target_sigaction act, oact, *pact;
5797 if (arg2) {
5798 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5799 goto efault;
5800 act._sa_handler = old_act->_sa_handler;
5801 target_siginitset(&act.sa_mask, old_act->sa_mask);
5802 act.sa_flags = old_act->sa_flags;
5803 act.sa_restorer = old_act->sa_restorer;
5804 unlock_user_struct(old_act, arg2, 0);
5805 pact = &act;
5806 } else {
5807 pact = NULL;
5809 ret = get_errno(do_sigaction(arg1, pact, &oact));
5810 if (!is_error(ret) && arg3) {
5811 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5812 goto efault;
5813 old_act->_sa_handler = oact._sa_handler;
5814 old_act->sa_mask = oact.sa_mask.sig[0];
5815 old_act->sa_flags = oact.sa_flags;
5816 old_act->sa_restorer = oact.sa_restorer;
5817 unlock_user_struct(old_act, arg3, 1);
5819 #endif
5821 break;
5822 #endif
5823 case TARGET_NR_rt_sigaction:
5825 #if defined(TARGET_ALPHA)
5826 struct target_sigaction act, oact, *pact = 0;
5827 struct target_rt_sigaction *rt_act;
5828 /* ??? arg4 == sizeof(sigset_t). */
5829 if (arg2) {
5830 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5831 goto efault;
5832 act._sa_handler = rt_act->_sa_handler;
5833 act.sa_mask = rt_act->sa_mask;
5834 act.sa_flags = rt_act->sa_flags;
5835 act.sa_restorer = arg5;
5836 unlock_user_struct(rt_act, arg2, 0);
5837 pact = &act;
5839 ret = get_errno(do_sigaction(arg1, pact, &oact));
5840 if (!is_error(ret) && arg3) {
5841 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5842 goto efault;
5843 rt_act->_sa_handler = oact._sa_handler;
5844 rt_act->sa_mask = oact.sa_mask;
5845 rt_act->sa_flags = oact.sa_flags;
5846 unlock_user_struct(rt_act, arg3, 1);
5848 #else
5849 struct target_sigaction *act;
5850 struct target_sigaction *oact;
5852 if (arg2) {
5853 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5854 goto efault;
5855 } else
5856 act = NULL;
5857 if (arg3) {
5858 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5859 ret = -TARGET_EFAULT;
5860 goto rt_sigaction_fail;
5862 } else
5863 oact = NULL;
5864 ret = get_errno(do_sigaction(arg1, act, oact));
5865 rt_sigaction_fail:
5866 if (act)
5867 unlock_user_struct(act, arg2, 0);
5868 if (oact)
5869 unlock_user_struct(oact, arg3, 1);
5870 #endif
5872 break;
5873 #ifdef TARGET_NR_sgetmask /* not on alpha */
5874 case TARGET_NR_sgetmask:
5876 sigset_t cur_set;
5877 abi_ulong target_set;
5878 sigprocmask(0, NULL, &cur_set);
5879 host_to_target_old_sigset(&target_set, &cur_set);
5880 ret = target_set;
5882 break;
5883 #endif
5884 #ifdef TARGET_NR_ssetmask /* not on alpha */
5885 case TARGET_NR_ssetmask:
5887 sigset_t set, oset, cur_set;
5888 abi_ulong target_set = arg1;
5889 sigprocmask(0, NULL, &cur_set);
5890 target_to_host_old_sigset(&set, &target_set);
5891 sigorset(&set, &set, &cur_set);
5892 sigprocmask(SIG_SETMASK, &set, &oset);
5893 host_to_target_old_sigset(&target_set, &oset);
5894 ret = target_set;
5896 break;
5897 #endif
5898 #ifdef TARGET_NR_sigprocmask
5899 case TARGET_NR_sigprocmask:
5901 #if defined(TARGET_ALPHA)
5902 sigset_t set, oldset;
5903 abi_ulong mask;
5904 int how;
5906 switch (arg1) {
5907 case TARGET_SIG_BLOCK:
5908 how = SIG_BLOCK;
5909 break;
5910 case TARGET_SIG_UNBLOCK:
5911 how = SIG_UNBLOCK;
5912 break;
5913 case TARGET_SIG_SETMASK:
5914 how = SIG_SETMASK;
5915 break;
5916 default:
5917 ret = -TARGET_EINVAL;
5918 goto fail;
5920 mask = arg2;
5921 target_to_host_old_sigset(&set, &mask);
5923 ret = get_errno(sigprocmask(how, &set, &oldset));
5924 if (!is_error(ret)) {
5925 host_to_target_old_sigset(&mask, &oldset);
5926 ret = mask;
5927 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5929 #else
5930 sigset_t set, oldset, *set_ptr;
5931 int how;
5933 if (arg2) {
5934 switch (arg1) {
5935 case TARGET_SIG_BLOCK:
5936 how = SIG_BLOCK;
5937 break;
5938 case TARGET_SIG_UNBLOCK:
5939 how = SIG_UNBLOCK;
5940 break;
5941 case TARGET_SIG_SETMASK:
5942 how = SIG_SETMASK;
5943 break;
5944 default:
5945 ret = -TARGET_EINVAL;
5946 goto fail;
5948 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5949 goto efault;
5950 target_to_host_old_sigset(&set, p);
5951 unlock_user(p, arg2, 0);
5952 set_ptr = &set;
5953 } else {
5954 how = 0;
5955 set_ptr = NULL;
5957 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5958 if (!is_error(ret) && arg3) {
5959 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5960 goto efault;
5961 host_to_target_old_sigset(p, &oldset);
5962 unlock_user(p, arg3, sizeof(target_sigset_t));
5964 #endif
5966 break;
5967 #endif
5968 case TARGET_NR_rt_sigprocmask:
5970 int how = arg1;
5971 sigset_t set, oldset, *set_ptr;
5973 if (arg2) {
5974 switch(how) {
5975 case TARGET_SIG_BLOCK:
5976 how = SIG_BLOCK;
5977 break;
5978 case TARGET_SIG_UNBLOCK:
5979 how = SIG_UNBLOCK;
5980 break;
5981 case TARGET_SIG_SETMASK:
5982 how = SIG_SETMASK;
5983 break;
5984 default:
5985 ret = -TARGET_EINVAL;
5986 goto fail;
5988 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5989 goto efault;
5990 target_to_host_sigset(&set, p);
5991 unlock_user(p, arg2, 0);
5992 set_ptr = &set;
5993 } else {
5994 how = 0;
5995 set_ptr = NULL;
5997 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5998 if (!is_error(ret) && arg3) {
5999 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6000 goto efault;
6001 host_to_target_sigset(p, &oldset);
6002 unlock_user(p, arg3, sizeof(target_sigset_t));
6005 break;
6006 #ifdef TARGET_NR_sigpending
6007 case TARGET_NR_sigpending:
6009 sigset_t set;
6010 ret = get_errno(sigpending(&set));
6011 if (!is_error(ret)) {
6012 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6013 goto efault;
6014 host_to_target_old_sigset(p, &set);
6015 unlock_user(p, arg1, sizeof(target_sigset_t));
6018 break;
6019 #endif
6020 case TARGET_NR_rt_sigpending:
6022 sigset_t set;
6023 ret = get_errno(sigpending(&set));
6024 if (!is_error(ret)) {
6025 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6026 goto efault;
6027 host_to_target_sigset(p, &set);
6028 unlock_user(p, arg1, sizeof(target_sigset_t));
6031 break;
6032 #ifdef TARGET_NR_sigsuspend
6033 case TARGET_NR_sigsuspend:
6035 sigset_t set;
6036 #if defined(TARGET_ALPHA)
6037 abi_ulong mask = arg1;
6038 target_to_host_old_sigset(&set, &mask);
6039 #else
6040 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6041 goto efault;
6042 target_to_host_old_sigset(&set, p);
6043 unlock_user(p, arg1, 0);
6044 #endif
6045 ret = get_errno(sigsuspend(&set));
6047 break;
6048 #endif
6049 case TARGET_NR_rt_sigsuspend:
6051 sigset_t set;
6052 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6053 goto efault;
6054 target_to_host_sigset(&set, p);
6055 unlock_user(p, arg1, 0);
6056 ret = get_errno(sigsuspend(&set));
6058 break;
6059 case TARGET_NR_rt_sigtimedwait:
6061 sigset_t set;
6062 struct timespec uts, *puts;
6063 siginfo_t uinfo;
6065 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6066 goto efault;
6067 target_to_host_sigset(&set, p);
6068 unlock_user(p, arg1, 0);
6069 if (arg3) {
6070 puts = &uts;
6071 target_to_host_timespec(puts, arg3);
6072 } else {
6073 puts = NULL;
6075 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6076 if (!is_error(ret) && arg2) {
6077 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6078 goto efault;
6079 host_to_target_siginfo(p, &uinfo);
6080 unlock_user(p, arg2, sizeof(target_siginfo_t));
6083 break;
6084 case TARGET_NR_rt_sigqueueinfo:
6086 siginfo_t uinfo;
6087 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6088 goto efault;
6089 target_to_host_siginfo(&uinfo, p);
6090 unlock_user(p, arg1, 0);
6091 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6093 break;
6094 #ifdef TARGET_NR_sigreturn
6095 case TARGET_NR_sigreturn:
6096 /* NOTE: ret is eax, so not transcoding must be done */
6097 ret = do_sigreturn(cpu_env);
6098 break;
6099 #endif
6100 case TARGET_NR_rt_sigreturn:
6101 /* NOTE: ret is eax, so not transcoding must be done */
6102 ret = do_rt_sigreturn(cpu_env);
6103 break;
6104 case TARGET_NR_sethostname:
6105 if (!(p = lock_user_string(arg1)))
6106 goto efault;
6107 ret = get_errno(sethostname(p, arg2));
6108 unlock_user(p, arg1, 0);
6109 break;
6110 case TARGET_NR_setrlimit:
6112 int resource = target_to_host_resource(arg1);
6113 struct target_rlimit *target_rlim;
6114 struct rlimit rlim;
6115 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6116 goto efault;
6117 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6118 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6119 unlock_user_struct(target_rlim, arg2, 0);
6120 ret = get_errno(setrlimit(resource, &rlim));
6122 break;
6123 case TARGET_NR_getrlimit:
6125 int resource = target_to_host_resource(arg1);
6126 struct target_rlimit *target_rlim;
6127 struct rlimit rlim;
6129 ret = get_errno(getrlimit(resource, &rlim));
6130 if (!is_error(ret)) {
6131 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6132 goto efault;
6133 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6134 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6135 unlock_user_struct(target_rlim, arg2, 1);
6138 break;
6139 case TARGET_NR_getrusage:
6141 struct rusage rusage;
6142 ret = get_errno(getrusage(arg1, &rusage));
6143 if (!is_error(ret)) {
6144 host_to_target_rusage(arg2, &rusage);
6147 break;
6148 case TARGET_NR_gettimeofday:
6150 struct timeval tv;
6151 ret = get_errno(gettimeofday(&tv, NULL));
6152 if (!is_error(ret)) {
6153 if (copy_to_user_timeval(arg1, &tv))
6154 goto efault;
6157 break;
6158 case TARGET_NR_settimeofday:
6160 struct timeval tv;
6161 if (copy_from_user_timeval(&tv, arg1))
6162 goto efault;
6163 ret = get_errno(settimeofday(&tv, NULL));
6165 break;
6166 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6167 case TARGET_NR_select:
6169 struct target_sel_arg_struct *sel;
6170 abi_ulong inp, outp, exp, tvp;
6171 long nsel;
6173 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6174 goto efault;
6175 nsel = tswapal(sel->n);
6176 inp = tswapal(sel->inp);
6177 outp = tswapal(sel->outp);
6178 exp = tswapal(sel->exp);
6179 tvp = tswapal(sel->tvp);
6180 unlock_user_struct(sel, arg1, 0);
6181 ret = do_select(nsel, inp, outp, exp, tvp);
6183 break;
6184 #endif
6185 #ifdef TARGET_NR_pselect6
6186 case TARGET_NR_pselect6:
6188 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6189 fd_set rfds, wfds, efds;
6190 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6191 struct timespec ts, *ts_ptr;
6194 * The 6th arg is actually two args smashed together,
6195 * so we cannot use the C library.
6197 sigset_t set;
6198 struct {
6199 sigset_t *set;
6200 size_t size;
6201 } sig, *sig_ptr;
6203 abi_ulong arg_sigset, arg_sigsize, *arg7;
6204 target_sigset_t *target_sigset;
6206 n = arg1;
6207 rfd_addr = arg2;
6208 wfd_addr = arg3;
6209 efd_addr = arg4;
6210 ts_addr = arg5;
6212 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6213 if (ret) {
6214 goto fail;
6216 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6217 if (ret) {
6218 goto fail;
6220 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6221 if (ret) {
6222 goto fail;
6226 * This takes a timespec, and not a timeval, so we cannot
6227 * use the do_select() helper ...
6229 if (ts_addr) {
6230 if (target_to_host_timespec(&ts, ts_addr)) {
6231 goto efault;
6233 ts_ptr = &ts;
6234 } else {
6235 ts_ptr = NULL;
6238 /* Extract the two packed args for the sigset */
6239 if (arg6) {
6240 sig_ptr = &sig;
6241 sig.size = _NSIG / 8;
6243 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6244 if (!arg7) {
6245 goto efault;
6247 arg_sigset = tswapal(arg7[0]);
6248 arg_sigsize = tswapal(arg7[1]);
6249 unlock_user(arg7, arg6, 0);
6251 if (arg_sigset) {
6252 sig.set = &set;
6253 if (arg_sigsize != sizeof(*target_sigset)) {
6254 /* Like the kernel, we enforce correct size sigsets */
6255 ret = -TARGET_EINVAL;
6256 goto fail;
6258 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6259 sizeof(*target_sigset), 1);
6260 if (!target_sigset) {
6261 goto efault;
6263 target_to_host_sigset(&set, target_sigset);
6264 unlock_user(target_sigset, arg_sigset, 0);
6265 } else {
6266 sig.set = NULL;
6268 } else {
6269 sig_ptr = NULL;
6272 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6273 ts_ptr, sig_ptr));
6275 if (!is_error(ret)) {
6276 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6277 goto efault;
6278 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6279 goto efault;
6280 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6281 goto efault;
6283 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6284 goto efault;
6287 break;
6288 #endif
6289 case TARGET_NR_symlink:
6291 void *p2;
6292 p = lock_user_string(arg1);
6293 p2 = lock_user_string(arg2);
6294 if (!p || !p2)
6295 ret = -TARGET_EFAULT;
6296 else
6297 ret = get_errno(symlink(p, p2));
6298 unlock_user(p2, arg2, 0);
6299 unlock_user(p, arg1, 0);
6301 break;
6302 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6303 case TARGET_NR_symlinkat:
6305 void *p2;
6306 p = lock_user_string(arg1);
6307 p2 = lock_user_string(arg3);
6308 if (!p || !p2)
6309 ret = -TARGET_EFAULT;
6310 else
6311 ret = get_errno(sys_symlinkat(p, arg2, p2));
6312 unlock_user(p2, arg3, 0);
6313 unlock_user(p, arg1, 0);
6315 break;
6316 #endif
6317 #ifdef TARGET_NR_oldlstat
6318 case TARGET_NR_oldlstat:
6319 goto unimplemented;
6320 #endif
6321 case TARGET_NR_readlink:
6323 void *p2, *temp;
6324 p = lock_user_string(arg1);
6325 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6326 if (!p || !p2)
6327 ret = -TARGET_EFAULT;
6328 else {
6329 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6330 char real[PATH_MAX];
6331 temp = realpath(exec_path,real);
6332 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6333 snprintf((char *)p2, arg3, "%s", real);
6335 else
6336 ret = get_errno(readlink(path(p), p2, arg3));
6338 unlock_user(p2, arg2, ret);
6339 unlock_user(p, arg1, 0);
6341 break;
6342 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6343 case TARGET_NR_readlinkat:
6345 void *p2;
6346 p = lock_user_string(arg2);
6347 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6348 if (!p || !p2)
6349 ret = -TARGET_EFAULT;
6350 else
6351 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6352 unlock_user(p2, arg3, ret);
6353 unlock_user(p, arg2, 0);
6355 break;
6356 #endif
6357 #ifdef TARGET_NR_uselib
6358 case TARGET_NR_uselib:
6359 goto unimplemented;
6360 #endif
6361 #ifdef TARGET_NR_swapon
6362 case TARGET_NR_swapon:
6363 if (!(p = lock_user_string(arg1)))
6364 goto efault;
6365 ret = get_errno(swapon(p, arg2));
6366 unlock_user(p, arg1, 0);
6367 break;
6368 #endif
6369 case TARGET_NR_reboot:
6370 if (!(p = lock_user_string(arg4)))
6371 goto efault;
6372 ret = reboot(arg1, arg2, arg3, p);
6373 unlock_user(p, arg4, 0);
6374 break;
6375 #ifdef TARGET_NR_readdir
6376 case TARGET_NR_readdir:
6377 goto unimplemented;
6378 #endif
6379 #ifdef TARGET_NR_mmap
6380 case TARGET_NR_mmap:
6381 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6382 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6383 || defined(TARGET_S390X)
6385 abi_ulong *v;
6386 abi_ulong v1, v2, v3, v4, v5, v6;
6387 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6388 goto efault;
6389 v1 = tswapal(v[0]);
6390 v2 = tswapal(v[1]);
6391 v3 = tswapal(v[2]);
6392 v4 = tswapal(v[3]);
6393 v5 = tswapal(v[4]);
6394 v6 = tswapal(v[5]);
6395 unlock_user(v, arg1, 0);
6396 ret = get_errno(target_mmap(v1, v2, v3,
6397 target_to_host_bitmask(v4, mmap_flags_tbl),
6398 v5, v6));
6400 #else
6401 ret = get_errno(target_mmap(arg1, arg2, arg3,
6402 target_to_host_bitmask(arg4, mmap_flags_tbl),
6403 arg5,
6404 arg6));
6405 #endif
6406 break;
6407 #endif
6408 #ifdef TARGET_NR_mmap2
6409 case TARGET_NR_mmap2:
6410 #ifndef MMAP_SHIFT
6411 #define MMAP_SHIFT 12
6412 #endif
6413 ret = get_errno(target_mmap(arg1, arg2, arg3,
6414 target_to_host_bitmask(arg4, mmap_flags_tbl),
6415 arg5,
6416 arg6 << MMAP_SHIFT));
6417 break;
6418 #endif
6419 case TARGET_NR_munmap:
6420 ret = get_errno(target_munmap(arg1, arg2));
6421 break;
6422 case TARGET_NR_mprotect:
6424 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6425 /* Special hack to detect libc making the stack executable. */
6426 if ((arg3 & PROT_GROWSDOWN)
6427 && arg1 >= ts->info->stack_limit
6428 && arg1 <= ts->info->start_stack) {
6429 arg3 &= ~PROT_GROWSDOWN;
6430 arg2 = arg2 + arg1 - ts->info->stack_limit;
6431 arg1 = ts->info->stack_limit;
6434 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6435 break;
6436 #ifdef TARGET_NR_mremap
6437 case TARGET_NR_mremap:
6438 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6439 break;
6440 #endif
6441 /* ??? msync/mlock/munlock are broken for softmmu. */
6442 #ifdef TARGET_NR_msync
6443 case TARGET_NR_msync:
6444 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6445 break;
6446 #endif
6447 #ifdef TARGET_NR_mlock
6448 case TARGET_NR_mlock:
6449 ret = get_errno(mlock(g2h(arg1), arg2));
6450 break;
6451 #endif
6452 #ifdef TARGET_NR_munlock
6453 case TARGET_NR_munlock:
6454 ret = get_errno(munlock(g2h(arg1), arg2));
6455 break;
6456 #endif
6457 #ifdef TARGET_NR_mlockall
6458 case TARGET_NR_mlockall:
6459 ret = get_errno(mlockall(arg1));
6460 break;
6461 #endif
6462 #ifdef TARGET_NR_munlockall
6463 case TARGET_NR_munlockall:
6464 ret = get_errno(munlockall());
6465 break;
6466 #endif
6467 case TARGET_NR_truncate:
6468 if (!(p = lock_user_string(arg1)))
6469 goto efault;
6470 ret = get_errno(truncate(p, arg2));
6471 unlock_user(p, arg1, 0);
6472 break;
6473 case TARGET_NR_ftruncate:
6474 ret = get_errno(ftruncate(arg1, arg2));
6475 break;
6476 case TARGET_NR_fchmod:
6477 ret = get_errno(fchmod(arg1, arg2));
6478 break;
6479 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6480 case TARGET_NR_fchmodat:
6481 if (!(p = lock_user_string(arg2)))
6482 goto efault;
6483 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6484 unlock_user(p, arg2, 0);
6485 break;
6486 #endif
6487 case TARGET_NR_getpriority:
6488 /* Note that negative values are valid for getpriority, so we must
6489 differentiate based on errno settings. */
6490 errno = 0;
6491 ret = getpriority(arg1, arg2);
6492 if (ret == -1 && errno != 0) {
6493 ret = -host_to_target_errno(errno);
6494 break;
6496 #ifdef TARGET_ALPHA
6497 /* Return value is the unbiased priority. Signal no error. */
6498 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6499 #else
6500 /* Return value is a biased priority to avoid negative numbers. */
6501 ret = 20 - ret;
6502 #endif
6503 break;
6504 case TARGET_NR_setpriority:
6505 ret = get_errno(setpriority(arg1, arg2, arg3));
6506 break;
6507 #ifdef TARGET_NR_profil
6508 case TARGET_NR_profil:
6509 goto unimplemented;
6510 #endif
6511 case TARGET_NR_statfs:
6512 if (!(p = lock_user_string(arg1)))
6513 goto efault;
6514 ret = get_errno(statfs(path(p), &stfs));
6515 unlock_user(p, arg1, 0);
6516 convert_statfs:
6517 if (!is_error(ret)) {
6518 struct target_statfs *target_stfs;
6520 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6521 goto efault;
6522 __put_user(stfs.f_type, &target_stfs->f_type);
6523 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6524 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6525 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6526 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6527 __put_user(stfs.f_files, &target_stfs->f_files);
6528 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6529 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6530 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6531 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6532 unlock_user_struct(target_stfs, arg2, 1);
6534 break;
6535 case TARGET_NR_fstatfs:
6536 ret = get_errno(fstatfs(arg1, &stfs));
6537 goto convert_statfs;
6538 #ifdef TARGET_NR_statfs64
6539 case TARGET_NR_statfs64:
6540 if (!(p = lock_user_string(arg1)))
6541 goto efault;
6542 ret = get_errno(statfs(path(p), &stfs));
6543 unlock_user(p, arg1, 0);
6544 convert_statfs64:
6545 if (!is_error(ret)) {
6546 struct target_statfs64 *target_stfs;
6548 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6549 goto efault;
6550 __put_user(stfs.f_type, &target_stfs->f_type);
6551 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6552 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6553 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6554 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6555 __put_user(stfs.f_files, &target_stfs->f_files);
6556 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6557 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6558 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6559 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6560 unlock_user_struct(target_stfs, arg3, 1);
6562 break;
6563 case TARGET_NR_fstatfs64:
6564 ret = get_errno(fstatfs(arg1, &stfs));
6565 goto convert_statfs64;
6566 #endif
6567 #ifdef TARGET_NR_ioperm
6568 case TARGET_NR_ioperm:
6569 goto unimplemented;
6570 #endif
6571 #ifdef TARGET_NR_socketcall
6572 case TARGET_NR_socketcall:
6573 ret = do_socketcall(arg1, arg2);
6574 break;
6575 #endif
6576 #ifdef TARGET_NR_accept
6577 case TARGET_NR_accept:
6578 ret = do_accept(arg1, arg2, arg3);
6579 break;
6580 #endif
6581 #ifdef TARGET_NR_bind
6582 case TARGET_NR_bind:
6583 ret = do_bind(arg1, arg2, arg3);
6584 break;
6585 #endif
6586 #ifdef TARGET_NR_connect
6587 case TARGET_NR_connect:
6588 ret = do_connect(arg1, arg2, arg3);
6589 break;
6590 #endif
6591 #ifdef TARGET_NR_getpeername
6592 case TARGET_NR_getpeername:
6593 ret = do_getpeername(arg1, arg2, arg3);
6594 break;
6595 #endif
6596 #ifdef TARGET_NR_getsockname
6597 case TARGET_NR_getsockname:
6598 ret = do_getsockname(arg1, arg2, arg3);
6599 break;
6600 #endif
6601 #ifdef TARGET_NR_getsockopt
6602 case TARGET_NR_getsockopt:
6603 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6604 break;
6605 #endif
6606 #ifdef TARGET_NR_listen
6607 case TARGET_NR_listen:
6608 ret = get_errno(listen(arg1, arg2));
6609 break;
6610 #endif
6611 #ifdef TARGET_NR_recv
6612 case TARGET_NR_recv:
6613 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6614 break;
6615 #endif
6616 #ifdef TARGET_NR_recvfrom
6617 case TARGET_NR_recvfrom:
6618 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6619 break;
6620 #endif
6621 #ifdef TARGET_NR_recvmsg
6622 case TARGET_NR_recvmsg:
6623 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6624 break;
6625 #endif
6626 #ifdef TARGET_NR_send
6627 case TARGET_NR_send:
6628 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6629 break;
6630 #endif
6631 #ifdef TARGET_NR_sendmsg
6632 case TARGET_NR_sendmsg:
6633 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6634 break;
6635 #endif
6636 #ifdef TARGET_NR_sendto
6637 case TARGET_NR_sendto:
6638 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6639 break;
6640 #endif
6641 #ifdef TARGET_NR_shutdown
6642 case TARGET_NR_shutdown:
6643 ret = get_errno(shutdown(arg1, arg2));
6644 break;
6645 #endif
6646 #ifdef TARGET_NR_socket
6647 case TARGET_NR_socket:
6648 ret = do_socket(arg1, arg2, arg3);
6649 break;
6650 #endif
6651 #ifdef TARGET_NR_socketpair
6652 case TARGET_NR_socketpair:
6653 ret = do_socketpair(arg1, arg2, arg3, arg4);
6654 break;
6655 #endif
6656 #ifdef TARGET_NR_setsockopt
6657 case TARGET_NR_setsockopt:
6658 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6659 break;
6660 #endif
6662 case TARGET_NR_syslog:
6663 if (!(p = lock_user_string(arg2)))
6664 goto efault;
6665 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6666 unlock_user(p, arg2, 0);
6667 break;
6669 case TARGET_NR_setitimer:
6671 struct itimerval value, ovalue, *pvalue;
6673 if (arg2) {
6674 pvalue = &value;
6675 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6676 || copy_from_user_timeval(&pvalue->it_value,
6677 arg2 + sizeof(struct target_timeval)))
6678 goto efault;
6679 } else {
6680 pvalue = NULL;
6682 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6683 if (!is_error(ret) && arg3) {
6684 if (copy_to_user_timeval(arg3,
6685 &ovalue.it_interval)
6686 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6687 &ovalue.it_value))
6688 goto efault;
6691 break;
6692 case TARGET_NR_getitimer:
6694 struct itimerval value;
6696 ret = get_errno(getitimer(arg1, &value));
6697 if (!is_error(ret) && arg2) {
6698 if (copy_to_user_timeval(arg2,
6699 &value.it_interval)
6700 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6701 &value.it_value))
6702 goto efault;
6705 break;
6706 case TARGET_NR_stat:
6707 if (!(p = lock_user_string(arg1)))
6708 goto efault;
6709 ret = get_errno(stat(path(p), &st));
6710 unlock_user(p, arg1, 0);
6711 goto do_stat;
6712 case TARGET_NR_lstat:
6713 if (!(p = lock_user_string(arg1)))
6714 goto efault;
6715 ret = get_errno(lstat(path(p), &st));
6716 unlock_user(p, arg1, 0);
6717 goto do_stat;
6718 case TARGET_NR_fstat:
6720 ret = get_errno(fstat(arg1, &st));
6721 do_stat:
6722 if (!is_error(ret)) {
6723 struct target_stat *target_st;
6725 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6726 goto efault;
6727 memset(target_st, 0, sizeof(*target_st));
6728 __put_user(st.st_dev, &target_st->st_dev);
6729 __put_user(st.st_ino, &target_st->st_ino);
6730 __put_user(st.st_mode, &target_st->st_mode);
6731 __put_user(st.st_uid, &target_st->st_uid);
6732 __put_user(st.st_gid, &target_st->st_gid);
6733 __put_user(st.st_nlink, &target_st->st_nlink);
6734 __put_user(st.st_rdev, &target_st->st_rdev);
6735 __put_user(st.st_size, &target_st->st_size);
6736 __put_user(st.st_blksize, &target_st->st_blksize);
6737 __put_user(st.st_blocks, &target_st->st_blocks);
6738 __put_user(st.st_atime, &target_st->target_st_atime);
6739 __put_user(st.st_mtime, &target_st->target_st_mtime);
6740 __put_user(st.st_ctime, &target_st->target_st_ctime);
6741 unlock_user_struct(target_st, arg2, 1);
6744 break;
6745 #ifdef TARGET_NR_olduname
6746 case TARGET_NR_olduname:
6747 goto unimplemented;
6748 #endif
6749 #ifdef TARGET_NR_iopl
6750 case TARGET_NR_iopl:
6751 goto unimplemented;
6752 #endif
6753 case TARGET_NR_vhangup:
6754 ret = get_errno(vhangup());
6755 break;
6756 #ifdef TARGET_NR_idle
6757 case TARGET_NR_idle:
6758 goto unimplemented;
6759 #endif
6760 #ifdef TARGET_NR_syscall
6761 case TARGET_NR_syscall:
6762 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6763 arg6, arg7, arg8, 0);
6764 break;
6765 #endif
6766 case TARGET_NR_wait4:
6768 int status;
6769 abi_long status_ptr = arg2;
6770 struct rusage rusage, *rusage_ptr;
6771 abi_ulong target_rusage = arg4;
6772 if (target_rusage)
6773 rusage_ptr = &rusage;
6774 else
6775 rusage_ptr = NULL;
6776 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6777 if (!is_error(ret)) {
6778 if (status_ptr && ret) {
6779 status = host_to_target_waitstatus(status);
6780 if (put_user_s32(status, status_ptr))
6781 goto efault;
6783 if (target_rusage)
6784 host_to_target_rusage(target_rusage, &rusage);
6787 break;
6788 #ifdef TARGET_NR_swapoff
6789 case TARGET_NR_swapoff:
6790 if (!(p = lock_user_string(arg1)))
6791 goto efault;
6792 ret = get_errno(swapoff(p));
6793 unlock_user(p, arg1, 0);
6794 break;
6795 #endif
6796 case TARGET_NR_sysinfo:
6798 struct target_sysinfo *target_value;
6799 struct sysinfo value;
6800 ret = get_errno(sysinfo(&value));
6801 if (!is_error(ret) && arg1)
6803 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6804 goto efault;
6805 __put_user(value.uptime, &target_value->uptime);
6806 __put_user(value.loads[0], &target_value->loads[0]);
6807 __put_user(value.loads[1], &target_value->loads[1]);
6808 __put_user(value.loads[2], &target_value->loads[2]);
6809 __put_user(value.totalram, &target_value->totalram);
6810 __put_user(value.freeram, &target_value->freeram);
6811 __put_user(value.sharedram, &target_value->sharedram);
6812 __put_user(value.bufferram, &target_value->bufferram);
6813 __put_user(value.totalswap, &target_value->totalswap);
6814 __put_user(value.freeswap, &target_value->freeswap);
6815 __put_user(value.procs, &target_value->procs);
6816 __put_user(value.totalhigh, &target_value->totalhigh);
6817 __put_user(value.freehigh, &target_value->freehigh);
6818 __put_user(value.mem_unit, &target_value->mem_unit);
6819 unlock_user_struct(target_value, arg1, 1);
6822 break;
6823 #ifdef TARGET_NR_ipc
6824 case TARGET_NR_ipc:
6825 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6826 break;
6827 #endif
6828 #ifdef TARGET_NR_semget
6829 case TARGET_NR_semget:
6830 ret = get_errno(semget(arg1, arg2, arg3));
6831 break;
6832 #endif
6833 #ifdef TARGET_NR_semop
6834 case TARGET_NR_semop:
6835 ret = get_errno(do_semop(arg1, arg2, arg3));
6836 break;
6837 #endif
6838 #ifdef TARGET_NR_semctl
6839 case TARGET_NR_semctl:
6840 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6841 break;
6842 #endif
6843 #ifdef TARGET_NR_msgctl
6844 case TARGET_NR_msgctl:
6845 ret = do_msgctl(arg1, arg2, arg3);
6846 break;
6847 #endif
6848 #ifdef TARGET_NR_msgget
6849 case TARGET_NR_msgget:
6850 ret = get_errno(msgget(arg1, arg2));
6851 break;
6852 #endif
6853 #ifdef TARGET_NR_msgrcv
6854 case TARGET_NR_msgrcv:
6855 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6856 break;
6857 #endif
6858 #ifdef TARGET_NR_msgsnd
6859 case TARGET_NR_msgsnd:
6860 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6861 break;
6862 #endif
6863 #ifdef TARGET_NR_shmget
6864 case TARGET_NR_shmget:
6865 ret = get_errno(shmget(arg1, arg2, arg3));
6866 break;
6867 #endif
6868 #ifdef TARGET_NR_shmctl
6869 case TARGET_NR_shmctl:
6870 ret = do_shmctl(arg1, arg2, arg3);
6871 break;
6872 #endif
6873 #ifdef TARGET_NR_shmat
6874 case TARGET_NR_shmat:
6875 ret = do_shmat(arg1, arg2, arg3);
6876 break;
6877 #endif
6878 #ifdef TARGET_NR_shmdt
6879 case TARGET_NR_shmdt:
6880 ret = do_shmdt(arg1);
6881 break;
6882 #endif
6883 case TARGET_NR_fsync:
6884 ret = get_errno(fsync(arg1));
6885 break;
6886 case TARGET_NR_clone:
6887 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6888 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6889 #elif defined(TARGET_CRIS)
6890 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6891 #elif defined(TARGET_S390X)
6892 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6893 #else
6894 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6895 #endif
6896 break;
6897 #ifdef __NR_exit_group
6898 /* new thread calls */
6899 case TARGET_NR_exit_group:
6900 #ifdef TARGET_GPROF
6901 _mcleanup();
6902 #endif
6903 gdb_exit(cpu_env, arg1);
6904 ret = get_errno(exit_group(arg1));
6905 break;
6906 #endif
6907 case TARGET_NR_setdomainname:
6908 if (!(p = lock_user_string(arg1)))
6909 goto efault;
6910 ret = get_errno(setdomainname(p, arg2));
6911 unlock_user(p, arg1, 0);
6912 break;
6913 case TARGET_NR_uname:
6914 /* no need to transcode because we use the linux syscall */
6916 struct new_utsname * buf;
6918 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6919 goto efault;
6920 ret = get_errno(sys_uname(buf));
6921 if (!is_error(ret)) {
6922 /* Overrite the native machine name with whatever is being
6923 emulated. */
6924 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6925 /* Allow the user to override the reported release. */
6926 if (qemu_uname_release && *qemu_uname_release)
6927 strcpy (buf->release, qemu_uname_release);
6929 unlock_user_struct(buf, arg1, 1);
6931 break;
6932 #ifdef TARGET_I386
6933 case TARGET_NR_modify_ldt:
6934 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6935 break;
6936 #if !defined(TARGET_X86_64)
6937 case TARGET_NR_vm86old:
6938 goto unimplemented;
6939 case TARGET_NR_vm86:
6940 ret = do_vm86(cpu_env, arg1, arg2);
6941 break;
6942 #endif
6943 #endif
6944 case TARGET_NR_adjtimex:
6945 goto unimplemented;
6946 #ifdef TARGET_NR_create_module
6947 case TARGET_NR_create_module:
6948 #endif
6949 case TARGET_NR_init_module:
6950 case TARGET_NR_delete_module:
6951 #ifdef TARGET_NR_get_kernel_syms
6952 case TARGET_NR_get_kernel_syms:
6953 #endif
6954 goto unimplemented;
6955 case TARGET_NR_quotactl:
6956 goto unimplemented;
6957 case TARGET_NR_getpgid:
6958 ret = get_errno(getpgid(arg1));
6959 break;
6960 case TARGET_NR_fchdir:
6961 ret = get_errno(fchdir(arg1));
6962 break;
6963 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6964 case TARGET_NR_bdflush:
6965 goto unimplemented;
6966 #endif
6967 #ifdef TARGET_NR_sysfs
6968 case TARGET_NR_sysfs:
6969 goto unimplemented;
6970 #endif
6971 case TARGET_NR_personality:
6972 ret = get_errno(personality(arg1));
6973 break;
6974 #ifdef TARGET_NR_afs_syscall
6975 case TARGET_NR_afs_syscall:
6976 goto unimplemented;
6977 #endif
6978 #ifdef TARGET_NR__llseek /* Not on alpha */
6979 case TARGET_NR__llseek:
6981 int64_t res;
6982 #if !defined(__NR_llseek)
6983 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6984 if (res == -1) {
6985 ret = get_errno(res);
6986 } else {
6987 ret = 0;
6989 #else
6990 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6991 #endif
6992 if ((ret == 0) && put_user_s64(res, arg4)) {
6993 goto efault;
6996 break;
6997 #endif
6998 case TARGET_NR_getdents:
6999 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7001 struct target_dirent *target_dirp;
7002 struct linux_dirent *dirp;
7003 abi_long count = arg3;
7005 dirp = malloc(count);
7006 if (!dirp) {
7007 ret = -TARGET_ENOMEM;
7008 goto fail;
7011 ret = get_errno(sys_getdents(arg1, dirp, count));
7012 if (!is_error(ret)) {
7013 struct linux_dirent *de;
7014 struct target_dirent *tde;
7015 int len = ret;
7016 int reclen, treclen;
7017 int count1, tnamelen;
7019 count1 = 0;
7020 de = dirp;
7021 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7022 goto efault;
7023 tde = target_dirp;
7024 while (len > 0) {
7025 reclen = de->d_reclen;
7026 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7027 assert(tnamelen >= 0);
7028 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7029 assert(count1 + treclen <= count);
7030 tde->d_reclen = tswap16(treclen);
7031 tde->d_ino = tswapal(de->d_ino);
7032 tde->d_off = tswapal(de->d_off);
7033 memcpy(tde->d_name, de->d_name, tnamelen);
7034 de = (struct linux_dirent *)((char *)de + reclen);
7035 len -= reclen;
7036 tde = (struct target_dirent *)((char *)tde + treclen);
7037 count1 += treclen;
7039 ret = count1;
7040 unlock_user(target_dirp, arg2, ret);
7042 free(dirp);
7044 #else
7046 struct linux_dirent *dirp;
7047 abi_long count = arg3;
7049 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7050 goto efault;
7051 ret = get_errno(sys_getdents(arg1, dirp, count));
7052 if (!is_error(ret)) {
7053 struct linux_dirent *de;
7054 int len = ret;
7055 int reclen;
7056 de = dirp;
7057 while (len > 0) {
7058 reclen = de->d_reclen;
7059 if (reclen > len)
7060 break;
7061 de->d_reclen = tswap16(reclen);
7062 tswapls(&de->d_ino);
7063 tswapls(&de->d_off);
7064 de = (struct linux_dirent *)((char *)de + reclen);
7065 len -= reclen;
7068 unlock_user(dirp, arg2, ret);
7070 #endif
7071 break;
7072 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7073 case TARGET_NR_getdents64:
7075 struct linux_dirent64 *dirp;
7076 abi_long count = arg3;
7077 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7078 goto efault;
7079 ret = get_errno(sys_getdents64(arg1, dirp, count));
7080 if (!is_error(ret)) {
7081 struct linux_dirent64 *de;
7082 int len = ret;
7083 int reclen;
7084 de = dirp;
7085 while (len > 0) {
7086 reclen = de->d_reclen;
7087 if (reclen > len)
7088 break;
7089 de->d_reclen = tswap16(reclen);
7090 tswap64s((uint64_t *)&de->d_ino);
7091 tswap64s((uint64_t *)&de->d_off);
7092 de = (struct linux_dirent64 *)((char *)de + reclen);
7093 len -= reclen;
7096 unlock_user(dirp, arg2, ret);
7098 break;
7099 #endif /* TARGET_NR_getdents64 */
7100 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7101 #ifdef TARGET_S390X
7102 case TARGET_NR_select:
7103 #else
7104 case TARGET_NR__newselect:
7105 #endif
7106 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7107 break;
7108 #endif
7109 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7110 # ifdef TARGET_NR_poll
7111 case TARGET_NR_poll:
7112 # endif
7113 # ifdef TARGET_NR_ppoll
7114 case TARGET_NR_ppoll:
7115 # endif
7117 struct target_pollfd *target_pfd;
7118 unsigned int nfds = arg2;
7119 int timeout = arg3;
7120 struct pollfd *pfd;
7121 unsigned int i;
7123 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7124 if (!target_pfd)
7125 goto efault;
7127 pfd = alloca(sizeof(struct pollfd) * nfds);
7128 for(i = 0; i < nfds; i++) {
7129 pfd[i].fd = tswap32(target_pfd[i].fd);
7130 pfd[i].events = tswap16(target_pfd[i].events);
7133 # ifdef TARGET_NR_ppoll
7134 if (num == TARGET_NR_ppoll) {
7135 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7136 target_sigset_t *target_set;
7137 sigset_t _set, *set = &_set;
7139 if (arg3) {
7140 if (target_to_host_timespec(timeout_ts, arg3)) {
7141 unlock_user(target_pfd, arg1, 0);
7142 goto efault;
7144 } else {
7145 timeout_ts = NULL;
7148 if (arg4) {
7149 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7150 if (!target_set) {
7151 unlock_user(target_pfd, arg1, 0);
7152 goto efault;
7154 target_to_host_sigset(set, target_set);
7155 } else {
7156 set = NULL;
7159 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7161 if (!is_error(ret) && arg3) {
7162 host_to_target_timespec(arg3, timeout_ts);
7164 if (arg4) {
7165 unlock_user(target_set, arg4, 0);
7167 } else
7168 # endif
7169 ret = get_errno(poll(pfd, nfds, timeout));
7171 if (!is_error(ret)) {
7172 for(i = 0; i < nfds; i++) {
7173 target_pfd[i].revents = tswap16(pfd[i].revents);
7176 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7178 break;
7179 #endif
7180 case TARGET_NR_flock:
7181 /* NOTE: the flock constant seems to be the same for every
7182 Linux platform */
7183 ret = get_errno(flock(arg1, arg2));
7184 break;
7185 case TARGET_NR_readv:
7187 int count = arg3;
7188 struct iovec *vec;
7190 vec = alloca(count * sizeof(struct iovec));
7191 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
7192 goto efault;
7193 ret = get_errno(readv(arg1, vec, count));
7194 unlock_iovec(vec, arg2, count, 1);
7196 break;
7197 case TARGET_NR_writev:
7199 int count = arg3;
7200 struct iovec *vec;
7202 vec = alloca(count * sizeof(struct iovec));
7203 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7204 goto efault;
7205 ret = get_errno(writev(arg1, vec, count));
7206 unlock_iovec(vec, arg2, count, 0);
7208 break;
7209 case TARGET_NR_getsid:
7210 ret = get_errno(getsid(arg1));
7211 break;
7212 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7213 case TARGET_NR_fdatasync:
7214 ret = get_errno(fdatasync(arg1));
7215 break;
7216 #endif
7217 case TARGET_NR__sysctl:
7218 /* We don't implement this, but ENOTDIR is always a safe
7219 return value. */
7220 ret = -TARGET_ENOTDIR;
7221 break;
7222 case TARGET_NR_sched_getaffinity:
7224 unsigned int mask_size;
7225 unsigned long *mask;
7228 * sched_getaffinity needs multiples of ulong, so need to take
7229 * care of mismatches between target ulong and host ulong sizes.
7231 if (arg2 & (sizeof(abi_ulong) - 1)) {
7232 ret = -TARGET_EINVAL;
7233 break;
7235 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7237 mask = alloca(mask_size);
7238 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7240 if (!is_error(ret)) {
7241 if (copy_to_user(arg3, mask, ret)) {
7242 goto efault;
7246 break;
7247 case TARGET_NR_sched_setaffinity:
7249 unsigned int mask_size;
7250 unsigned long *mask;
7253 * sched_setaffinity needs multiples of ulong, so need to take
7254 * care of mismatches between target ulong and host ulong sizes.
7256 if (arg2 & (sizeof(abi_ulong) - 1)) {
7257 ret = -TARGET_EINVAL;
7258 break;
7260 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7262 mask = alloca(mask_size);
7263 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7264 goto efault;
7266 memcpy(mask, p, arg2);
7267 unlock_user_struct(p, arg2, 0);
7269 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7271 break;
7272 case TARGET_NR_sched_setparam:
7274 struct sched_param *target_schp;
7275 struct sched_param schp;
7277 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7278 goto efault;
7279 schp.sched_priority = tswap32(target_schp->sched_priority);
7280 unlock_user_struct(target_schp, arg2, 0);
7281 ret = get_errno(sched_setparam(arg1, &schp));
7283 break;
7284 case TARGET_NR_sched_getparam:
7286 struct sched_param *target_schp;
7287 struct sched_param schp;
7288 ret = get_errno(sched_getparam(arg1, &schp));
7289 if (!is_error(ret)) {
7290 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7291 goto efault;
7292 target_schp->sched_priority = tswap32(schp.sched_priority);
7293 unlock_user_struct(target_schp, arg2, 1);
7296 break;
7297 case TARGET_NR_sched_setscheduler:
7299 struct sched_param *target_schp;
7300 struct sched_param schp;
7301 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7302 goto efault;
7303 schp.sched_priority = tswap32(target_schp->sched_priority);
7304 unlock_user_struct(target_schp, arg3, 0);
7305 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7307 break;
7308 case TARGET_NR_sched_getscheduler:
7309 ret = get_errno(sched_getscheduler(arg1));
7310 break;
7311 case TARGET_NR_sched_yield:
7312 ret = get_errno(sched_yield());
7313 break;
7314 case TARGET_NR_sched_get_priority_max:
7315 ret = get_errno(sched_get_priority_max(arg1));
7316 break;
7317 case TARGET_NR_sched_get_priority_min:
7318 ret = get_errno(sched_get_priority_min(arg1));
7319 break;
7320 case TARGET_NR_sched_rr_get_interval:
7322 struct timespec ts;
7323 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7324 if (!is_error(ret)) {
7325 host_to_target_timespec(arg2, &ts);
7328 break;
7329 case TARGET_NR_nanosleep:
7331 struct timespec req, rem;
7332 target_to_host_timespec(&req, arg1);
7333 ret = get_errno(nanosleep(&req, &rem));
7334 if (is_error(ret) && arg2) {
7335 host_to_target_timespec(arg2, &rem);
7338 break;
7339 #ifdef TARGET_NR_query_module
7340 case TARGET_NR_query_module:
7341 goto unimplemented;
7342 #endif
7343 #ifdef TARGET_NR_nfsservctl
7344 case TARGET_NR_nfsservctl:
7345 goto unimplemented;
7346 #endif
7347 case TARGET_NR_prctl:
7348 switch (arg1) {
7349 case PR_GET_PDEATHSIG:
7351 int deathsig;
7352 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7353 if (!is_error(ret) && arg2
7354 && put_user_ual(deathsig, arg2)) {
7355 goto efault;
7357 break;
7359 #ifdef PR_GET_NAME
7360 case PR_GET_NAME:
7362 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7363 if (!name) {
7364 goto efault;
7366 ret = get_errno(prctl(arg1, (unsigned long)name,
7367 arg3, arg4, arg5));
7368 unlock_user(name, arg2, 16);
7369 break;
7371 case PR_SET_NAME:
7373 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7374 if (!name) {
7375 goto efault;
7377 ret = get_errno(prctl(arg1, (unsigned long)name,
7378 arg3, arg4, arg5));
7379 unlock_user(name, arg2, 0);
7380 break;
7382 #endif
7383 default:
7384 /* Most prctl options have no pointer arguments */
7385 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7386 break;
7388 break;
7389 #ifdef TARGET_NR_arch_prctl
7390 case TARGET_NR_arch_prctl:
7391 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7392 ret = do_arch_prctl(cpu_env, arg1, arg2);
7393 break;
7394 #else
7395 goto unimplemented;
7396 #endif
7397 #endif
7398 #ifdef TARGET_NR_pread
7399 case TARGET_NR_pread:
7400 if (regpairs_aligned(cpu_env))
7401 arg4 = arg5;
7402 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7403 goto efault;
7404 ret = get_errno(pread(arg1, p, arg3, arg4));
7405 unlock_user(p, arg2, ret);
7406 break;
7407 case TARGET_NR_pwrite:
7408 if (regpairs_aligned(cpu_env))
7409 arg4 = arg5;
7410 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7411 goto efault;
7412 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7413 unlock_user(p, arg2, 0);
7414 break;
7415 #endif
7416 #ifdef TARGET_NR_pread64
7417 case TARGET_NR_pread64:
7418 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7419 goto efault;
7420 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7421 unlock_user(p, arg2, ret);
7422 break;
7423 case TARGET_NR_pwrite64:
7424 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7425 goto efault;
7426 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7427 unlock_user(p, arg2, 0);
7428 break;
7429 #endif
7430 case TARGET_NR_getcwd:
7431 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7432 goto efault;
7433 ret = get_errno(sys_getcwd1(p, arg2));
7434 unlock_user(p, arg1, ret);
7435 break;
7436 case TARGET_NR_capget:
7437 goto unimplemented;
7438 case TARGET_NR_capset:
7439 goto unimplemented;
7440 case TARGET_NR_sigaltstack:
7441 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7442 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7443 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7444 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7445 break;
7446 #else
7447 goto unimplemented;
7448 #endif
7449 case TARGET_NR_sendfile:
7450 goto unimplemented;
7451 #ifdef TARGET_NR_getpmsg
7452 case TARGET_NR_getpmsg:
7453 goto unimplemented;
7454 #endif
7455 #ifdef TARGET_NR_putpmsg
7456 case TARGET_NR_putpmsg:
7457 goto unimplemented;
7458 #endif
7459 #ifdef TARGET_NR_vfork
7460 case TARGET_NR_vfork:
7461 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7462 0, 0, 0, 0));
7463 break;
7464 #endif
7465 #ifdef TARGET_NR_ugetrlimit
7466 case TARGET_NR_ugetrlimit:
7468 struct rlimit rlim;
7469 int resource = target_to_host_resource(arg1);
7470 ret = get_errno(getrlimit(resource, &rlim));
7471 if (!is_error(ret)) {
7472 struct target_rlimit *target_rlim;
7473 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7474 goto efault;
7475 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7476 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7477 unlock_user_struct(target_rlim, arg2, 1);
7479 break;
7481 #endif
7482 #ifdef TARGET_NR_truncate64
7483 case TARGET_NR_truncate64:
7484 if (!(p = lock_user_string(arg1)))
7485 goto efault;
7486 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7487 unlock_user(p, arg1, 0);
7488 break;
7489 #endif
7490 #ifdef TARGET_NR_ftruncate64
7491 case TARGET_NR_ftruncate64:
7492 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7493 break;
7494 #endif
7495 #ifdef TARGET_NR_stat64
7496 case TARGET_NR_stat64:
7497 if (!(p = lock_user_string(arg1)))
7498 goto efault;
7499 ret = get_errno(stat(path(p), &st));
7500 unlock_user(p, arg1, 0);
7501 if (!is_error(ret))
7502 ret = host_to_target_stat64(cpu_env, arg2, &st);
7503 break;
7504 #endif
7505 #ifdef TARGET_NR_lstat64
7506 case TARGET_NR_lstat64:
7507 if (!(p = lock_user_string(arg1)))
7508 goto efault;
7509 ret = get_errno(lstat(path(p), &st));
7510 unlock_user(p, arg1, 0);
7511 if (!is_error(ret))
7512 ret = host_to_target_stat64(cpu_env, arg2, &st);
7513 break;
7514 #endif
7515 #ifdef TARGET_NR_fstat64
7516 case TARGET_NR_fstat64:
7517 ret = get_errno(fstat(arg1, &st));
7518 if (!is_error(ret))
7519 ret = host_to_target_stat64(cpu_env, arg2, &st);
7520 break;
7521 #endif
7522 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7523 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7524 #ifdef TARGET_NR_fstatat64
7525 case TARGET_NR_fstatat64:
7526 #endif
7527 #ifdef TARGET_NR_newfstatat
7528 case TARGET_NR_newfstatat:
7529 #endif
7530 if (!(p = lock_user_string(arg2)))
7531 goto efault;
7532 #ifdef __NR_fstatat64
7533 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7534 #else
7535 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7536 #endif
7537 if (!is_error(ret))
7538 ret = host_to_target_stat64(cpu_env, arg3, &st);
7539 break;
7540 #endif
7541 case TARGET_NR_lchown:
7542 if (!(p = lock_user_string(arg1)))
7543 goto efault;
7544 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7545 unlock_user(p, arg1, 0);
7546 break;
7547 #ifdef TARGET_NR_getuid
7548 case TARGET_NR_getuid:
7549 ret = get_errno(high2lowuid(getuid()));
7550 break;
7551 #endif
7552 #ifdef TARGET_NR_getgid
7553 case TARGET_NR_getgid:
7554 ret = get_errno(high2lowgid(getgid()));
7555 break;
7556 #endif
7557 #ifdef TARGET_NR_geteuid
7558 case TARGET_NR_geteuid:
7559 ret = get_errno(high2lowuid(geteuid()));
7560 break;
7561 #endif
7562 #ifdef TARGET_NR_getegid
7563 case TARGET_NR_getegid:
7564 ret = get_errno(high2lowgid(getegid()));
7565 break;
7566 #endif
7567 case TARGET_NR_setreuid:
7568 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7569 break;
7570 case TARGET_NR_setregid:
7571 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7572 break;
7573 case TARGET_NR_getgroups:
7575 int gidsetsize = arg1;
7576 target_id *target_grouplist;
7577 gid_t *grouplist;
7578 int i;
7580 grouplist = alloca(gidsetsize * sizeof(gid_t));
7581 ret = get_errno(getgroups(gidsetsize, grouplist));
7582 if (gidsetsize == 0)
7583 break;
7584 if (!is_error(ret)) {
7585 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7586 if (!target_grouplist)
7587 goto efault;
7588 for(i = 0;i < ret; i++)
7589 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7590 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7593 break;
7594 case TARGET_NR_setgroups:
7596 int gidsetsize = arg1;
7597 target_id *target_grouplist;
7598 gid_t *grouplist;
7599 int i;
7601 grouplist = alloca(gidsetsize * sizeof(gid_t));
7602 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7603 if (!target_grouplist) {
7604 ret = -TARGET_EFAULT;
7605 goto fail;
7607 for(i = 0;i < gidsetsize; i++)
7608 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7609 unlock_user(target_grouplist, arg2, 0);
7610 ret = get_errno(setgroups(gidsetsize, grouplist));
7612 break;
7613 case TARGET_NR_fchown:
7614 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7615 break;
7616 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7617 case TARGET_NR_fchownat:
7618 if (!(p = lock_user_string(arg2)))
7619 goto efault;
7620 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7621 unlock_user(p, arg2, 0);
7622 break;
7623 #endif
7624 #ifdef TARGET_NR_setresuid
7625 case TARGET_NR_setresuid:
7626 ret = get_errno(setresuid(low2highuid(arg1),
7627 low2highuid(arg2),
7628 low2highuid(arg3)));
7629 break;
7630 #endif
7631 #ifdef TARGET_NR_getresuid
7632 case TARGET_NR_getresuid:
7634 uid_t ruid, euid, suid;
7635 ret = get_errno(getresuid(&ruid, &euid, &suid));
7636 if (!is_error(ret)) {
7637 if (put_user_u16(high2lowuid(ruid), arg1)
7638 || put_user_u16(high2lowuid(euid), arg2)
7639 || put_user_u16(high2lowuid(suid), arg3))
7640 goto efault;
7643 break;
7644 #endif
7645 #ifdef TARGET_NR_getresgid
7646 case TARGET_NR_setresgid:
7647 ret = get_errno(setresgid(low2highgid(arg1),
7648 low2highgid(arg2),
7649 low2highgid(arg3)));
7650 break;
7651 #endif
7652 #ifdef TARGET_NR_getresgid
7653 case TARGET_NR_getresgid:
7655 gid_t rgid, egid, sgid;
7656 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7657 if (!is_error(ret)) {
7658 if (put_user_u16(high2lowgid(rgid), arg1)
7659 || put_user_u16(high2lowgid(egid), arg2)
7660 || put_user_u16(high2lowgid(sgid), arg3))
7661 goto efault;
7664 break;
7665 #endif
7666 case TARGET_NR_chown:
7667 if (!(p = lock_user_string(arg1)))
7668 goto efault;
7669 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7670 unlock_user(p, arg1, 0);
7671 break;
7672 case TARGET_NR_setuid:
7673 ret = get_errno(setuid(low2highuid(arg1)));
7674 break;
7675 case TARGET_NR_setgid:
7676 ret = get_errno(setgid(low2highgid(arg1)));
7677 break;
7678 case TARGET_NR_setfsuid:
7679 ret = get_errno(setfsuid(arg1));
7680 break;
7681 case TARGET_NR_setfsgid:
7682 ret = get_errno(setfsgid(arg1));
7683 break;
7685 #ifdef TARGET_NR_lchown32
7686 case TARGET_NR_lchown32:
7687 if (!(p = lock_user_string(arg1)))
7688 goto efault;
7689 ret = get_errno(lchown(p, arg2, arg3));
7690 unlock_user(p, arg1, 0);
7691 break;
7692 #endif
7693 #ifdef TARGET_NR_getuid32
7694 case TARGET_NR_getuid32:
7695 ret = get_errno(getuid());
7696 break;
7697 #endif
7699 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7700 /* Alpha specific */
7701 case TARGET_NR_getxuid:
7703 uid_t euid;
7704 euid=geteuid();
7705 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7707 ret = get_errno(getuid());
7708 break;
7709 #endif
7710 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7711 /* Alpha specific */
7712 case TARGET_NR_getxgid:
7714 uid_t egid;
7715 egid=getegid();
7716 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7718 ret = get_errno(getgid());
7719 break;
7720 #endif
7721 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7722 /* Alpha specific */
7723 case TARGET_NR_osf_getsysinfo:
7724 ret = -TARGET_EOPNOTSUPP;
7725 switch (arg1) {
7726 case TARGET_GSI_IEEE_FP_CONTROL:
7728 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7730 /* Copied from linux ieee_fpcr_to_swcr. */
7731 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7732 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7733 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7734 | SWCR_TRAP_ENABLE_DZE
7735 | SWCR_TRAP_ENABLE_OVF);
7736 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7737 | SWCR_TRAP_ENABLE_INE);
7738 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7739 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7741 if (put_user_u64 (swcr, arg2))
7742 goto efault;
7743 ret = 0;
7745 break;
7747 /* case GSI_IEEE_STATE_AT_SIGNAL:
7748 -- Not implemented in linux kernel.
7749 case GSI_UACPROC:
7750 -- Retrieves current unaligned access state; not much used.
7751 case GSI_PROC_TYPE:
7752 -- Retrieves implver information; surely not used.
7753 case GSI_GET_HWRPB:
7754 -- Grabs a copy of the HWRPB; surely not used.
7757 break;
7758 #endif
7759 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7760 /* Alpha specific */
7761 case TARGET_NR_osf_setsysinfo:
7762 ret = -TARGET_EOPNOTSUPP;
7763 switch (arg1) {
7764 case TARGET_SSI_IEEE_FP_CONTROL:
7766 uint64_t swcr, fpcr, orig_fpcr;
7768 if (get_user_u64 (swcr, arg2)) {
7769 goto efault;
7771 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7772 fpcr = orig_fpcr & FPCR_DYN_MASK;
7774 /* Copied from linux ieee_swcr_to_fpcr. */
7775 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7776 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7777 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7778 | SWCR_TRAP_ENABLE_DZE
7779 | SWCR_TRAP_ENABLE_OVF)) << 48;
7780 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7781 | SWCR_TRAP_ENABLE_INE)) << 57;
7782 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7783 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7785 cpu_alpha_store_fpcr(cpu_env, fpcr);
7786 ret = 0;
7788 break;
7790 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7792 uint64_t exc, fpcr, orig_fpcr;
7793 int si_code;
7795 if (get_user_u64(exc, arg2)) {
7796 goto efault;
7799 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7801 /* We only add to the exception status here. */
7802 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7804 cpu_alpha_store_fpcr(cpu_env, fpcr);
7805 ret = 0;
7807 /* Old exceptions are not signaled. */
7808 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7810 /* If any exceptions set by this call,
7811 and are unmasked, send a signal. */
7812 si_code = 0;
7813 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7814 si_code = TARGET_FPE_FLTRES;
7816 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7817 si_code = TARGET_FPE_FLTUND;
7819 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7820 si_code = TARGET_FPE_FLTOVF;
7822 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7823 si_code = TARGET_FPE_FLTDIV;
7825 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7826 si_code = TARGET_FPE_FLTINV;
7828 if (si_code != 0) {
7829 target_siginfo_t info;
7830 info.si_signo = SIGFPE;
7831 info.si_errno = 0;
7832 info.si_code = si_code;
7833 info._sifields._sigfault._addr
7834 = ((CPUArchState *)cpu_env)->pc;
7835 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7838 break;
7840 /* case SSI_NVPAIRS:
7841 -- Used with SSIN_UACPROC to enable unaligned accesses.
7842 case SSI_IEEE_STATE_AT_SIGNAL:
7843 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7844 -- Not implemented in linux kernel
7847 break;
7848 #endif
7849 #ifdef TARGET_NR_osf_sigprocmask
7850 /* Alpha specific. */
7851 case TARGET_NR_osf_sigprocmask:
7853 abi_ulong mask;
7854 int how;
7855 sigset_t set, oldset;
7857 switch(arg1) {
7858 case TARGET_SIG_BLOCK:
7859 how = SIG_BLOCK;
7860 break;
7861 case TARGET_SIG_UNBLOCK:
7862 how = SIG_UNBLOCK;
7863 break;
7864 case TARGET_SIG_SETMASK:
7865 how = SIG_SETMASK;
7866 break;
7867 default:
7868 ret = -TARGET_EINVAL;
7869 goto fail;
7871 mask = arg2;
7872 target_to_host_old_sigset(&set, &mask);
7873 sigprocmask(how, &set, &oldset);
7874 host_to_target_old_sigset(&mask, &oldset);
7875 ret = mask;
7877 break;
7878 #endif
7880 #ifdef TARGET_NR_getgid32
7881 case TARGET_NR_getgid32:
7882 ret = get_errno(getgid());
7883 break;
7884 #endif
7885 #ifdef TARGET_NR_geteuid32
7886 case TARGET_NR_geteuid32:
7887 ret = get_errno(geteuid());
7888 break;
7889 #endif
7890 #ifdef TARGET_NR_getegid32
7891 case TARGET_NR_getegid32:
7892 ret = get_errno(getegid());
7893 break;
7894 #endif
7895 #ifdef TARGET_NR_setreuid32
7896 case TARGET_NR_setreuid32:
7897 ret = get_errno(setreuid(arg1, arg2));
7898 break;
7899 #endif
7900 #ifdef TARGET_NR_setregid32
7901 case TARGET_NR_setregid32:
7902 ret = get_errno(setregid(arg1, arg2));
7903 break;
7904 #endif
7905 #ifdef TARGET_NR_getgroups32
7906 case TARGET_NR_getgroups32:
7908 int gidsetsize = arg1;
7909 uint32_t *target_grouplist;
7910 gid_t *grouplist;
7911 int i;
7913 grouplist = alloca(gidsetsize * sizeof(gid_t));
7914 ret = get_errno(getgroups(gidsetsize, grouplist));
7915 if (gidsetsize == 0)
7916 break;
7917 if (!is_error(ret)) {
7918 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7919 if (!target_grouplist) {
7920 ret = -TARGET_EFAULT;
7921 goto fail;
7923 for(i = 0;i < ret; i++)
7924 target_grouplist[i] = tswap32(grouplist[i]);
7925 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7928 break;
7929 #endif
7930 #ifdef TARGET_NR_setgroups32
7931 case TARGET_NR_setgroups32:
7933 int gidsetsize = arg1;
7934 uint32_t *target_grouplist;
7935 gid_t *grouplist;
7936 int i;
7938 grouplist = alloca(gidsetsize * sizeof(gid_t));
7939 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7940 if (!target_grouplist) {
7941 ret = -TARGET_EFAULT;
7942 goto fail;
7944 for(i = 0;i < gidsetsize; i++)
7945 grouplist[i] = tswap32(target_grouplist[i]);
7946 unlock_user(target_grouplist, arg2, 0);
7947 ret = get_errno(setgroups(gidsetsize, grouplist));
7949 break;
7950 #endif
7951 #ifdef TARGET_NR_fchown32
7952 case TARGET_NR_fchown32:
7953 ret = get_errno(fchown(arg1, arg2, arg3));
7954 break;
7955 #endif
7956 #ifdef TARGET_NR_setresuid32
7957 case TARGET_NR_setresuid32:
7958 ret = get_errno(setresuid(arg1, arg2, arg3));
7959 break;
7960 #endif
7961 #ifdef TARGET_NR_getresuid32
7962 case TARGET_NR_getresuid32:
7964 uid_t ruid, euid, suid;
7965 ret = get_errno(getresuid(&ruid, &euid, &suid));
7966 if (!is_error(ret)) {
7967 if (put_user_u32(ruid, arg1)
7968 || put_user_u32(euid, arg2)
7969 || put_user_u32(suid, arg3))
7970 goto efault;
7973 break;
7974 #endif
7975 #ifdef TARGET_NR_setresgid32
7976 case TARGET_NR_setresgid32:
7977 ret = get_errno(setresgid(arg1, arg2, arg3));
7978 break;
7979 #endif
7980 #ifdef TARGET_NR_getresgid32
7981 case TARGET_NR_getresgid32:
7983 gid_t rgid, egid, sgid;
7984 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7985 if (!is_error(ret)) {
7986 if (put_user_u32(rgid, arg1)
7987 || put_user_u32(egid, arg2)
7988 || put_user_u32(sgid, arg3))
7989 goto efault;
7992 break;
7993 #endif
7994 #ifdef TARGET_NR_chown32
7995 case TARGET_NR_chown32:
7996 if (!(p = lock_user_string(arg1)))
7997 goto efault;
7998 ret = get_errno(chown(p, arg2, arg3));
7999 unlock_user(p, arg1, 0);
8000 break;
8001 #endif
8002 #ifdef TARGET_NR_setuid32
8003 case TARGET_NR_setuid32:
8004 ret = get_errno(setuid(arg1));
8005 break;
8006 #endif
8007 #ifdef TARGET_NR_setgid32
8008 case TARGET_NR_setgid32:
8009 ret = get_errno(setgid(arg1));
8010 break;
8011 #endif
8012 #ifdef TARGET_NR_setfsuid32
8013 case TARGET_NR_setfsuid32:
8014 ret = get_errno(setfsuid(arg1));
8015 break;
8016 #endif
8017 #ifdef TARGET_NR_setfsgid32
8018 case TARGET_NR_setfsgid32:
8019 ret = get_errno(setfsgid(arg1));
8020 break;
8021 #endif
8023 case TARGET_NR_pivot_root:
8024 goto unimplemented;
8025 #ifdef TARGET_NR_mincore
8026 case TARGET_NR_mincore:
8028 void *a;
8029 ret = -TARGET_EFAULT;
8030 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8031 goto efault;
8032 if (!(p = lock_user_string(arg3)))
8033 goto mincore_fail;
8034 ret = get_errno(mincore(a, arg2, p));
8035 unlock_user(p, arg3, ret);
8036 mincore_fail:
8037 unlock_user(a, arg1, 0);
8039 break;
8040 #endif
8041 #ifdef TARGET_NR_arm_fadvise64_64
8042 case TARGET_NR_arm_fadvise64_64:
8045 * arm_fadvise64_64 looks like fadvise64_64 but
8046 * with different argument order
8048 abi_long temp;
8049 temp = arg3;
8050 arg3 = arg4;
8051 arg4 = temp;
8053 #endif
8054 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8055 #ifdef TARGET_NR_fadvise64_64
8056 case TARGET_NR_fadvise64_64:
8057 #endif
8058 #ifdef TARGET_NR_fadvise64
8059 case TARGET_NR_fadvise64:
8060 #endif
8061 #ifdef TARGET_S390X
8062 switch (arg4) {
8063 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8064 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8065 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8066 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8067 default: break;
8069 #endif
8070 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8071 break;
8072 #endif
8073 #ifdef TARGET_NR_madvise
8074 case TARGET_NR_madvise:
8075 /* A straight passthrough may not be safe because qemu sometimes
8076 turns private flie-backed mappings into anonymous mappings.
8077 This will break MADV_DONTNEED.
8078 This is a hint, so ignoring and returning success is ok. */
8079 ret = get_errno(0);
8080 break;
8081 #endif
8082 #if TARGET_ABI_BITS == 32
8083 case TARGET_NR_fcntl64:
8085 int cmd;
8086 struct flock64 fl;
8087 struct target_flock64 *target_fl;
8088 #ifdef TARGET_ARM
8089 struct target_eabi_flock64 *target_efl;
8090 #endif
8092 cmd = target_to_host_fcntl_cmd(arg2);
8093 if (cmd == -TARGET_EINVAL) {
8094 ret = cmd;
8095 break;
8098 switch(arg2) {
8099 case TARGET_F_GETLK64:
8100 #ifdef TARGET_ARM
8101 if (((CPUARMState *)cpu_env)->eabi) {
8102 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8103 goto efault;
8104 fl.l_type = tswap16(target_efl->l_type);
8105 fl.l_whence = tswap16(target_efl->l_whence);
8106 fl.l_start = tswap64(target_efl->l_start);
8107 fl.l_len = tswap64(target_efl->l_len);
8108 fl.l_pid = tswap32(target_efl->l_pid);
8109 unlock_user_struct(target_efl, arg3, 0);
8110 } else
8111 #endif
8113 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8114 goto efault;
8115 fl.l_type = tswap16(target_fl->l_type);
8116 fl.l_whence = tswap16(target_fl->l_whence);
8117 fl.l_start = tswap64(target_fl->l_start);
8118 fl.l_len = tswap64(target_fl->l_len);
8119 fl.l_pid = tswap32(target_fl->l_pid);
8120 unlock_user_struct(target_fl, arg3, 0);
8122 ret = get_errno(fcntl(arg1, cmd, &fl));
8123 if (ret == 0) {
8124 #ifdef TARGET_ARM
8125 if (((CPUARMState *)cpu_env)->eabi) {
8126 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8127 goto efault;
8128 target_efl->l_type = tswap16(fl.l_type);
8129 target_efl->l_whence = tswap16(fl.l_whence);
8130 target_efl->l_start = tswap64(fl.l_start);
8131 target_efl->l_len = tswap64(fl.l_len);
8132 target_efl->l_pid = tswap32(fl.l_pid);
8133 unlock_user_struct(target_efl, arg3, 1);
8134 } else
8135 #endif
8137 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8138 goto efault;
8139 target_fl->l_type = tswap16(fl.l_type);
8140 target_fl->l_whence = tswap16(fl.l_whence);
8141 target_fl->l_start = tswap64(fl.l_start);
8142 target_fl->l_len = tswap64(fl.l_len);
8143 target_fl->l_pid = tswap32(fl.l_pid);
8144 unlock_user_struct(target_fl, arg3, 1);
8147 break;
8149 case TARGET_F_SETLK64:
8150 case TARGET_F_SETLKW64:
8151 #ifdef TARGET_ARM
8152 if (((CPUARMState *)cpu_env)->eabi) {
8153 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8154 goto efault;
8155 fl.l_type = tswap16(target_efl->l_type);
8156 fl.l_whence = tswap16(target_efl->l_whence);
8157 fl.l_start = tswap64(target_efl->l_start);
8158 fl.l_len = tswap64(target_efl->l_len);
8159 fl.l_pid = tswap32(target_efl->l_pid);
8160 unlock_user_struct(target_efl, arg3, 0);
8161 } else
8162 #endif
8164 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8165 goto efault;
8166 fl.l_type = tswap16(target_fl->l_type);
8167 fl.l_whence = tswap16(target_fl->l_whence);
8168 fl.l_start = tswap64(target_fl->l_start);
8169 fl.l_len = tswap64(target_fl->l_len);
8170 fl.l_pid = tswap32(target_fl->l_pid);
8171 unlock_user_struct(target_fl, arg3, 0);
8173 ret = get_errno(fcntl(arg1, cmd, &fl));
8174 break;
8175 default:
8176 ret = do_fcntl(arg1, arg2, arg3);
8177 break;
8179 break;
8181 #endif
8182 #ifdef TARGET_NR_cacheflush
8183 case TARGET_NR_cacheflush:
8184 /* self-modifying code is handled automatically, so nothing needed */
8185 ret = 0;
8186 break;
8187 #endif
8188 #ifdef TARGET_NR_security
8189 case TARGET_NR_security:
8190 goto unimplemented;
8191 #endif
8192 #ifdef TARGET_NR_getpagesize
8193 case TARGET_NR_getpagesize:
8194 ret = TARGET_PAGE_SIZE;
8195 break;
8196 #endif
8197 case TARGET_NR_gettid:
8198 ret = get_errno(gettid());
8199 break;
8200 #ifdef TARGET_NR_readahead
8201 case TARGET_NR_readahead:
8202 #if TARGET_ABI_BITS == 32
8203 if (regpairs_aligned(cpu_env)) {
8204 arg2 = arg3;
8205 arg3 = arg4;
8206 arg4 = arg5;
8208 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8209 #else
8210 ret = get_errno(readahead(arg1, arg2, arg3));
8211 #endif
8212 break;
8213 #endif
8214 #ifdef CONFIG_ATTR
8215 #ifdef TARGET_NR_setxattr
8216 case TARGET_NR_listxattr:
8217 case TARGET_NR_llistxattr:
8219 void *p, *b = 0;
8220 if (arg2) {
8221 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8222 if (!b) {
8223 ret = -TARGET_EFAULT;
8224 break;
8227 p = lock_user_string(arg1);
8228 if (p) {
8229 if (num == TARGET_NR_listxattr) {
8230 ret = get_errno(listxattr(p, b, arg3));
8231 } else {
8232 ret = get_errno(llistxattr(p, b, arg3));
8234 } else {
8235 ret = -TARGET_EFAULT;
8237 unlock_user(p, arg1, 0);
8238 unlock_user(b, arg2, arg3);
8239 break;
8241 case TARGET_NR_flistxattr:
8243 void *b = 0;
8244 if (arg2) {
8245 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8246 if (!b) {
8247 ret = -TARGET_EFAULT;
8248 break;
8251 ret = get_errno(flistxattr(arg1, b, arg3));
8252 unlock_user(b, arg2, arg3);
8253 break;
8255 case TARGET_NR_setxattr:
8256 case TARGET_NR_lsetxattr:
8258 void *p, *n, *v = 0;
8259 if (arg3) {
8260 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8261 if (!v) {
8262 ret = -TARGET_EFAULT;
8263 break;
8266 p = lock_user_string(arg1);
8267 n = lock_user_string(arg2);
8268 if (p && n) {
8269 if (num == TARGET_NR_setxattr) {
8270 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8271 } else {
8272 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8274 } else {
8275 ret = -TARGET_EFAULT;
8277 unlock_user(p, arg1, 0);
8278 unlock_user(n, arg2, 0);
8279 unlock_user(v, arg3, 0);
8281 break;
8282 case TARGET_NR_fsetxattr:
8284 void *n, *v = 0;
8285 if (arg3) {
8286 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8287 if (!v) {
8288 ret = -TARGET_EFAULT;
8289 break;
8292 n = lock_user_string(arg2);
8293 if (n) {
8294 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8295 } else {
8296 ret = -TARGET_EFAULT;
8298 unlock_user(n, arg2, 0);
8299 unlock_user(v, arg3, 0);
8301 break;
8302 case TARGET_NR_getxattr:
8303 case TARGET_NR_lgetxattr:
8305 void *p, *n, *v = 0;
8306 if (arg3) {
8307 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8308 if (!v) {
8309 ret = -TARGET_EFAULT;
8310 break;
8313 p = lock_user_string(arg1);
8314 n = lock_user_string(arg2);
8315 if (p && n) {
8316 if (num == TARGET_NR_getxattr) {
8317 ret = get_errno(getxattr(p, n, v, arg4));
8318 } else {
8319 ret = get_errno(lgetxattr(p, n, v, arg4));
8321 } else {
8322 ret = -TARGET_EFAULT;
8324 unlock_user(p, arg1, 0);
8325 unlock_user(n, arg2, 0);
8326 unlock_user(v, arg3, arg4);
8328 break;
8329 case TARGET_NR_fgetxattr:
8331 void *n, *v = 0;
8332 if (arg3) {
8333 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8334 if (!v) {
8335 ret = -TARGET_EFAULT;
8336 break;
8339 n = lock_user_string(arg2);
8340 if (n) {
8341 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8342 } else {
8343 ret = -TARGET_EFAULT;
8345 unlock_user(n, arg2, 0);
8346 unlock_user(v, arg3, arg4);
8348 break;
8349 case TARGET_NR_removexattr:
8350 case TARGET_NR_lremovexattr:
8352 void *p, *n;
8353 p = lock_user_string(arg1);
8354 n = lock_user_string(arg2);
8355 if (p && n) {
8356 if (num == TARGET_NR_removexattr) {
8357 ret = get_errno(removexattr(p, n));
8358 } else {
8359 ret = get_errno(lremovexattr(p, n));
8361 } else {
8362 ret = -TARGET_EFAULT;
8364 unlock_user(p, arg1, 0);
8365 unlock_user(n, arg2, 0);
8367 break;
8368 case TARGET_NR_fremovexattr:
8370 void *n;
8371 n = lock_user_string(arg2);
8372 if (n) {
8373 ret = get_errno(fremovexattr(arg1, n));
8374 } else {
8375 ret = -TARGET_EFAULT;
8377 unlock_user(n, arg2, 0);
8379 break;
8380 #endif
8381 #endif /* CONFIG_ATTR */
8382 #ifdef TARGET_NR_set_thread_area
8383 case TARGET_NR_set_thread_area:
8384 #if defined(TARGET_MIPS)
8385 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8386 ret = 0;
8387 break;
8388 #elif defined(TARGET_CRIS)
8389 if (arg1 & 0xff)
8390 ret = -TARGET_EINVAL;
8391 else {
8392 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8393 ret = 0;
8395 break;
8396 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8397 ret = do_set_thread_area(cpu_env, arg1);
8398 break;
8399 #else
8400 goto unimplemented_nowarn;
8401 #endif
8402 #endif
8403 #ifdef TARGET_NR_get_thread_area
8404 case TARGET_NR_get_thread_area:
8405 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8406 ret = do_get_thread_area(cpu_env, arg1);
8407 #else
8408 goto unimplemented_nowarn;
8409 #endif
8410 #endif
8411 #ifdef TARGET_NR_getdomainname
8412 case TARGET_NR_getdomainname:
8413 goto unimplemented_nowarn;
8414 #endif
8416 #ifdef TARGET_NR_clock_gettime
8417 case TARGET_NR_clock_gettime:
8419 struct timespec ts;
8420 ret = get_errno(clock_gettime(arg1, &ts));
8421 if (!is_error(ret)) {
8422 host_to_target_timespec(arg2, &ts);
8424 break;
8426 #endif
8427 #ifdef TARGET_NR_clock_getres
8428 case TARGET_NR_clock_getres:
8430 struct timespec ts;
8431 ret = get_errno(clock_getres(arg1, &ts));
8432 if (!is_error(ret)) {
8433 host_to_target_timespec(arg2, &ts);
8435 break;
8437 #endif
8438 #ifdef TARGET_NR_clock_nanosleep
8439 case TARGET_NR_clock_nanosleep:
8441 struct timespec ts;
8442 target_to_host_timespec(&ts, arg3);
8443 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8444 if (arg4)
8445 host_to_target_timespec(arg4, &ts);
8446 break;
8448 #endif
8450 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8451 case TARGET_NR_set_tid_address:
8452 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8453 break;
8454 #endif
8456 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8457 case TARGET_NR_tkill:
8458 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8459 break;
8460 #endif
8462 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8463 case TARGET_NR_tgkill:
8464 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8465 target_to_host_signal(arg3)));
8466 break;
8467 #endif
8469 #ifdef TARGET_NR_set_robust_list
8470 case TARGET_NR_set_robust_list:
8471 goto unimplemented_nowarn;
8472 #endif
8474 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8475 case TARGET_NR_utimensat:
8477 struct timespec *tsp, ts[2];
8478 if (!arg3) {
8479 tsp = NULL;
8480 } else {
8481 target_to_host_timespec(ts, arg3);
8482 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8483 tsp = ts;
8485 if (!arg2)
8486 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8487 else {
8488 if (!(p = lock_user_string(arg2))) {
8489 ret = -TARGET_EFAULT;
8490 goto fail;
8492 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8493 unlock_user(p, arg2, 0);
8496 break;
8497 #endif
8498 #if defined(CONFIG_USE_NPTL)
8499 case TARGET_NR_futex:
8500 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8501 break;
8502 #endif
8503 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8504 case TARGET_NR_inotify_init:
8505 ret = get_errno(sys_inotify_init());
8506 break;
8507 #endif
8508 #ifdef CONFIG_INOTIFY1
8509 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8510 case TARGET_NR_inotify_init1:
8511 ret = get_errno(sys_inotify_init1(arg1));
8512 break;
8513 #endif
8514 #endif
8515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8516 case TARGET_NR_inotify_add_watch:
8517 p = lock_user_string(arg2);
8518 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8519 unlock_user(p, arg2, 0);
8520 break;
8521 #endif
8522 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8523 case TARGET_NR_inotify_rm_watch:
8524 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8525 break;
8526 #endif
8528 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8529 case TARGET_NR_mq_open:
8531 struct mq_attr posix_mq_attr;
8533 p = lock_user_string(arg1 - 1);
8534 if (arg4 != 0)
8535 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8536 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8537 unlock_user (p, arg1, 0);
8539 break;
8541 case TARGET_NR_mq_unlink:
8542 p = lock_user_string(arg1 - 1);
8543 ret = get_errno(mq_unlink(p));
8544 unlock_user (p, arg1, 0);
8545 break;
8547 case TARGET_NR_mq_timedsend:
8549 struct timespec ts;
8551 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8552 if (arg5 != 0) {
8553 target_to_host_timespec(&ts, arg5);
8554 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8555 host_to_target_timespec(arg5, &ts);
8557 else
8558 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8559 unlock_user (p, arg2, arg3);
8561 break;
8563 case TARGET_NR_mq_timedreceive:
8565 struct timespec ts;
8566 unsigned int prio;
8568 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8569 if (arg5 != 0) {
8570 target_to_host_timespec(&ts, arg5);
8571 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8572 host_to_target_timespec(arg5, &ts);
8574 else
8575 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8576 unlock_user (p, arg2, arg3);
8577 if (arg4 != 0)
8578 put_user_u32(prio, arg4);
8580 break;
8582 /* Not implemented for now... */
8583 /* case TARGET_NR_mq_notify: */
8584 /* break; */
8586 case TARGET_NR_mq_getsetattr:
8588 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8589 ret = 0;
8590 if (arg3 != 0) {
8591 ret = mq_getattr(arg1, &posix_mq_attr_out);
8592 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8594 if (arg2 != 0) {
8595 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8596 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8600 break;
8601 #endif
8603 #ifdef CONFIG_SPLICE
8604 #ifdef TARGET_NR_tee
8605 case TARGET_NR_tee:
8607 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8609 break;
8610 #endif
8611 #ifdef TARGET_NR_splice
8612 case TARGET_NR_splice:
8614 loff_t loff_in, loff_out;
8615 loff_t *ploff_in = NULL, *ploff_out = NULL;
8616 if(arg2) {
8617 get_user_u64(loff_in, arg2);
8618 ploff_in = &loff_in;
8620 if(arg4) {
8621 get_user_u64(loff_out, arg2);
8622 ploff_out = &loff_out;
8624 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8626 break;
8627 #endif
8628 #ifdef TARGET_NR_vmsplice
8629 case TARGET_NR_vmsplice:
8631 int count = arg3;
8632 struct iovec *vec;
8634 vec = alloca(count * sizeof(struct iovec));
8635 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8636 goto efault;
8637 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8638 unlock_iovec(vec, arg2, count, 0);
8640 break;
8641 #endif
8642 #endif /* CONFIG_SPLICE */
8643 #ifdef CONFIG_EVENTFD
8644 #if defined(TARGET_NR_eventfd)
8645 case TARGET_NR_eventfd:
8646 ret = get_errno(eventfd(arg1, 0));
8647 break;
8648 #endif
8649 #if defined(TARGET_NR_eventfd2)
8650 case TARGET_NR_eventfd2:
8651 ret = get_errno(eventfd(arg1, arg2));
8652 break;
8653 #endif
8654 #endif /* CONFIG_EVENTFD */
8655 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8656 case TARGET_NR_fallocate:
8657 #if TARGET_ABI_BITS == 32
8658 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8659 target_offset64(arg5, arg6)));
8660 #else
8661 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8662 #endif
8663 break;
8664 #endif
8665 #if defined(CONFIG_SYNC_FILE_RANGE)
8666 #if defined(TARGET_NR_sync_file_range)
8667 case TARGET_NR_sync_file_range:
8668 #if TARGET_ABI_BITS == 32
8669 #if defined(TARGET_MIPS)
8670 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8671 target_offset64(arg5, arg6), arg7));
8672 #else
8673 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8674 target_offset64(arg4, arg5), arg6));
8675 #endif /* !TARGET_MIPS */
8676 #else
8677 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8678 #endif
8679 break;
8680 #endif
8681 #if defined(TARGET_NR_sync_file_range2)
8682 case TARGET_NR_sync_file_range2:
8683 /* This is like sync_file_range but the arguments are reordered */
8684 #if TARGET_ABI_BITS == 32
8685 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8686 target_offset64(arg5, arg6), arg2));
8687 #else
8688 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8689 #endif
8690 break;
8691 #endif
8692 #endif
8693 #if defined(CONFIG_EPOLL)
8694 #if defined(TARGET_NR_epoll_create)
8695 case TARGET_NR_epoll_create:
8696 ret = get_errno(epoll_create(arg1));
8697 break;
8698 #endif
8699 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8700 case TARGET_NR_epoll_create1:
8701 ret = get_errno(epoll_create1(arg1));
8702 break;
8703 #endif
8704 #if defined(TARGET_NR_epoll_ctl)
8705 case TARGET_NR_epoll_ctl:
8707 struct epoll_event ep;
8708 struct epoll_event *epp = 0;
8709 if (arg4) {
8710 struct target_epoll_event *target_ep;
8711 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8712 goto efault;
8714 ep.events = tswap32(target_ep->events);
8715 /* The epoll_data_t union is just opaque data to the kernel,
8716 * so we transfer all 64 bits across and need not worry what
8717 * actual data type it is.
8719 ep.data.u64 = tswap64(target_ep->data.u64);
8720 unlock_user_struct(target_ep, arg4, 0);
8721 epp = &ep;
8723 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8724 break;
8726 #endif
8728 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8729 #define IMPLEMENT_EPOLL_PWAIT
8730 #endif
8731 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8732 #if defined(TARGET_NR_epoll_wait)
8733 case TARGET_NR_epoll_wait:
8734 #endif
8735 #if defined(IMPLEMENT_EPOLL_PWAIT)
8736 case TARGET_NR_epoll_pwait:
8737 #endif
8739 struct target_epoll_event *target_ep;
8740 struct epoll_event *ep;
8741 int epfd = arg1;
8742 int maxevents = arg3;
8743 int timeout = arg4;
8745 target_ep = lock_user(VERIFY_WRITE, arg2,
8746 maxevents * sizeof(struct target_epoll_event), 1);
8747 if (!target_ep) {
8748 goto efault;
8751 ep = alloca(maxevents * sizeof(struct epoll_event));
8753 switch (num) {
8754 #if defined(IMPLEMENT_EPOLL_PWAIT)
8755 case TARGET_NR_epoll_pwait:
8757 target_sigset_t *target_set;
8758 sigset_t _set, *set = &_set;
8760 if (arg5) {
8761 target_set = lock_user(VERIFY_READ, arg5,
8762 sizeof(target_sigset_t), 1);
8763 if (!target_set) {
8764 unlock_user(target_ep, arg2, 0);
8765 goto efault;
8767 target_to_host_sigset(set, target_set);
8768 unlock_user(target_set, arg5, 0);
8769 } else {
8770 set = NULL;
8773 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8774 break;
8776 #endif
8777 #if defined(TARGET_NR_epoll_wait)
8778 case TARGET_NR_epoll_wait:
8779 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8780 break;
8781 #endif
8782 default:
8783 ret = -TARGET_ENOSYS;
8785 if (!is_error(ret)) {
8786 int i;
8787 for (i = 0; i < ret; i++) {
8788 target_ep[i].events = tswap32(ep[i].events);
8789 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8792 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8793 break;
8795 #endif
8796 #endif
8797 #ifdef TARGET_NR_prlimit64
8798 case TARGET_NR_prlimit64:
8800 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8801 struct target_rlimit64 *target_rnew, *target_rold;
8802 struct host_rlimit64 rnew, rold, *rnewp = 0;
8803 if (arg3) {
8804 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8805 goto efault;
8807 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8808 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8809 unlock_user_struct(target_rnew, arg3, 0);
8810 rnewp = &rnew;
8813 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8814 if (!is_error(ret) && arg4) {
8815 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8816 goto efault;
8818 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8819 target_rold->rlim_max = tswap64(rold.rlim_max);
8820 unlock_user_struct(target_rold, arg4, 1);
8822 break;
8824 #endif
8825 default:
8826 unimplemented:
8827 gemu_log("qemu: Unsupported syscall: %d\n", num);
8828 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8829 unimplemented_nowarn:
8830 #endif
8831 ret = -TARGET_ENOSYS;
8832 break;
8834 fail:
8835 #ifdef DEBUG
8836 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8837 #endif
8838 if(do_strace)
8839 print_syscall_ret(num, ret);
8840 return ret;
8841 efault:
8842 ret = -TARGET_EFAULT;
8843 goto fail;