usb-hid: use hid vmstate macro
[qemu/wangdongxu.git] / linux-user / syscall.c
blob73f9baa6f90ad9f5a2d2ec8e3940107f1b808e45
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #include <linux/vt.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
98 #include "qemu.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
104 #else
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
107 #endif
109 //#define DEBUG
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
116 #undef _syscall0
117 #undef _syscall1
118 #undef _syscall2
119 #undef _syscall3
120 #undef _syscall4
121 #undef _syscall5
122 #undef _syscall6
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
155 type5,arg5) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
165 type6 arg6) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 defined(__s390x__)
202 #define __NR__llseek __NR_lseek
203 #endif
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
213 #endif
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 _syscall2(int, sys_getpriority, int, which, int, who);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #endif
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #endif
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #endif
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
250 static bitmask_transtbl fcntl_flags_tbl[] = {
251 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
252 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
253 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
254 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
255 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
256 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
257 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
258 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
259 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
260 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
261 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
262 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
263 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
266 #endif
267 { 0, 0, 0, 0 }
270 #define COPY_UTSNAME_FIELD(dest, src) \
271 do { \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
275 } while (0)
277 static int sys_uname(struct new_utsname *buf)
279 struct utsname uts_buf;
281 if (uname(&uts_buf) < 0)
282 return (-1);
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 memset(buf, 0, sizeof(*buf));
291 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
292 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
293 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
294 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
295 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
296 #ifdef _GNU_SOURCE
297 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
298 #endif
299 return (0);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf, size_t size)
306 if (getcwd(buf, size) == NULL) {
307 /* getcwd() sets errno */
308 return (-1);
310 return strlen(buf)+1;
313 #ifdef CONFIG_ATFILE
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd, const char *pathname, int mode)
322 return (faccessat(dirfd, pathname, mode, 0));
324 #endif
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
328 return (fchmodat(dirfd, pathname, mode, 0));
330 #endif
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
333 gid_t group, int flags)
335 return (fchownat(dirfd, pathname, owner, group, flags));
337 #endif
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
340 int flags)
342 return (fstatat(dirfd, pathname, buf, flags));
344 #endif
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
347 int flags)
349 return (fstatat(dirfd, pathname, buf, flags));
351 #endif
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd, const char *pathname,
354 const struct timeval times[2])
356 return (futimesat(dirfd, pathname, times));
358 #endif
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd, const char *oldpath,
361 int newdirfd, const char *newpath, int flags)
363 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
365 #endif
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
369 return (mkdirat(dirfd, pathname, mode));
371 #endif
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
374 dev_t dev)
376 return (mknodat(dirfd, pathname, mode, dev));
378 #endif
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
383 * open(2) has extra parameter 'mode' when called with
384 * flag O_CREAT.
386 if ((flags & O_CREAT) != 0) {
387 va_list ap;
388 mode_t mode;
391 * Get the 'mode' parameter and translate it to
392 * host bits.
394 va_start(ap, flags);
395 mode = va_arg(ap, mode_t);
396 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
397 va_end(ap);
399 return (openat(dirfd, pathname, flags, mode));
401 return (openat(dirfd, pathname, flags));
403 #endif
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
407 return (readlinkat(dirfd, pathname, buf, bufsiz));
409 #endif
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd, const char *oldpath,
412 int newdirfd, const char *newpath)
414 return (renameat(olddirfd, oldpath, newdirfd, newpath));
416 #endif
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
420 return (symlinkat(oldpath, newdirfd, newpath));
422 #endif
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
426 return (unlinkat(dirfd, pathname, flags));
428 #endif
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
436 #endif
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
439 #endif
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
442 uid_t,owner,gid_t,group,int,flags)
443 #endif
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
447 struct stat *,buf,int,flags)
448 #endif
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
451 const struct timeval *,times)
452 #endif
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
456 struct stat *,buf,int,flags)
457 #endif
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
460 int,newdirfd,const char *,newpath,int,flags)
461 #endif
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
464 #endif
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
467 mode_t,mode,dev_t,dev)
468 #endif
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
471 #endif
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
474 char *,buf,size_t,bufsize)
475 #endif
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
478 int,newdirfd,const char *,newpath)
479 #endif
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat,const char *,oldpath,
482 int,newdirfd,const char *,newpath)
483 #endif
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
486 #endif
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd, const char *pathname,
492 const struct timespec times[2], int flags)
494 if (pathname == NULL)
495 return futimens(dirfd, times);
496 else
497 return utimensat(dirfd, pathname, times, flags);
499 #else
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
502 const struct timespec *,tsp,int,flags)
503 #endif
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
514 #endif
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
518 return (inotify_add_watch(fd, pathname, mask));
520 #endif
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd, int32_t wd)
524 return (inotify_rm_watch(fd, wd));
526 #endif
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags)
531 return (inotify_init1(flags));
533 #endif
534 #endif
535 #else
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
544 #ifndef __NR_ppoll
545 # define __NR_ppoll -1
546 #endif
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
549 struct timespec *, timeout, const __sigset_t *, sigmask,
550 size_t, sigsetsize)
551 #endif
553 #if defined(TARGET_NR_pselect6)
554 #ifndef __NR_pselect6
555 # define __NR_pselect6 -1
556 #endif
557 #define __NR_sys_pselect6 __NR_pselect6
558 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
559 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
560 #endif
562 #if defined(TARGET_NR_prlimit64)
563 #ifndef __NR_prlimit64
564 # define __NR_prlimit64 -1
565 #endif
566 #define __NR_sys_prlimit64 __NR_prlimit64
567 /* The glibc rlimit structure may not be that used by the underlying syscall */
568 struct host_rlimit64 {
569 uint64_t rlim_cur;
570 uint64_t rlim_max;
572 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
573 const struct host_rlimit64 *, new_limit,
574 struct host_rlimit64 *, old_limit)
575 #endif
577 extern int personality(int);
578 extern int flock(int, int);
579 extern int setfsuid(int);
580 extern int setfsgid(int);
581 extern int setgroups(int, gid_t *);
583 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
584 #ifdef TARGET_ARM
585 static inline int regpairs_aligned(void *cpu_env) {
586 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
588 #elif defined(TARGET_MIPS)
589 static inline int regpairs_aligned(void *cpu_env) { return 1; }
590 #else
591 static inline int regpairs_aligned(void *cpu_env) { return 0; }
592 #endif
594 #define ERRNO_TABLE_SIZE 1200
596 /* target_to_host_errno_table[] is initialized from
597 * host_to_target_errno_table[] in syscall_init(). */
598 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
602 * This list is the union of errno values overridden in asm-<arch>/errno.h
603 * minus the errnos that are not actually generic to all archs.
605 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
606 [EIDRM] = TARGET_EIDRM,
607 [ECHRNG] = TARGET_ECHRNG,
608 [EL2NSYNC] = TARGET_EL2NSYNC,
609 [EL3HLT] = TARGET_EL3HLT,
610 [EL3RST] = TARGET_EL3RST,
611 [ELNRNG] = TARGET_ELNRNG,
612 [EUNATCH] = TARGET_EUNATCH,
613 [ENOCSI] = TARGET_ENOCSI,
614 [EL2HLT] = TARGET_EL2HLT,
615 [EDEADLK] = TARGET_EDEADLK,
616 [ENOLCK] = TARGET_ENOLCK,
617 [EBADE] = TARGET_EBADE,
618 [EBADR] = TARGET_EBADR,
619 [EXFULL] = TARGET_EXFULL,
620 [ENOANO] = TARGET_ENOANO,
621 [EBADRQC] = TARGET_EBADRQC,
622 [EBADSLT] = TARGET_EBADSLT,
623 [EBFONT] = TARGET_EBFONT,
624 [ENOSTR] = TARGET_ENOSTR,
625 [ENODATA] = TARGET_ENODATA,
626 [ETIME] = TARGET_ETIME,
627 [ENOSR] = TARGET_ENOSR,
628 [ENONET] = TARGET_ENONET,
629 [ENOPKG] = TARGET_ENOPKG,
630 [EREMOTE] = TARGET_EREMOTE,
631 [ENOLINK] = TARGET_ENOLINK,
632 [EADV] = TARGET_EADV,
633 [ESRMNT] = TARGET_ESRMNT,
634 [ECOMM] = TARGET_ECOMM,
635 [EPROTO] = TARGET_EPROTO,
636 [EDOTDOT] = TARGET_EDOTDOT,
637 [EMULTIHOP] = TARGET_EMULTIHOP,
638 [EBADMSG] = TARGET_EBADMSG,
639 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
640 [EOVERFLOW] = TARGET_EOVERFLOW,
641 [ENOTUNIQ] = TARGET_ENOTUNIQ,
642 [EBADFD] = TARGET_EBADFD,
643 [EREMCHG] = TARGET_EREMCHG,
644 [ELIBACC] = TARGET_ELIBACC,
645 [ELIBBAD] = TARGET_ELIBBAD,
646 [ELIBSCN] = TARGET_ELIBSCN,
647 [ELIBMAX] = TARGET_ELIBMAX,
648 [ELIBEXEC] = TARGET_ELIBEXEC,
649 [EILSEQ] = TARGET_EILSEQ,
650 [ENOSYS] = TARGET_ENOSYS,
651 [ELOOP] = TARGET_ELOOP,
652 [ERESTART] = TARGET_ERESTART,
653 [ESTRPIPE] = TARGET_ESTRPIPE,
654 [ENOTEMPTY] = TARGET_ENOTEMPTY,
655 [EUSERS] = TARGET_EUSERS,
656 [ENOTSOCK] = TARGET_ENOTSOCK,
657 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
658 [EMSGSIZE] = TARGET_EMSGSIZE,
659 [EPROTOTYPE] = TARGET_EPROTOTYPE,
660 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
661 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
662 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
663 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
664 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
665 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
666 [EADDRINUSE] = TARGET_EADDRINUSE,
667 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
668 [ENETDOWN] = TARGET_ENETDOWN,
669 [ENETUNREACH] = TARGET_ENETUNREACH,
670 [ENETRESET] = TARGET_ENETRESET,
671 [ECONNABORTED] = TARGET_ECONNABORTED,
672 [ECONNRESET] = TARGET_ECONNRESET,
673 [ENOBUFS] = TARGET_ENOBUFS,
674 [EISCONN] = TARGET_EISCONN,
675 [ENOTCONN] = TARGET_ENOTCONN,
676 [EUCLEAN] = TARGET_EUCLEAN,
677 [ENOTNAM] = TARGET_ENOTNAM,
678 [ENAVAIL] = TARGET_ENAVAIL,
679 [EISNAM] = TARGET_EISNAM,
680 [EREMOTEIO] = TARGET_EREMOTEIO,
681 [ESHUTDOWN] = TARGET_ESHUTDOWN,
682 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
683 [ETIMEDOUT] = TARGET_ETIMEDOUT,
684 [ECONNREFUSED] = TARGET_ECONNREFUSED,
685 [EHOSTDOWN] = TARGET_EHOSTDOWN,
686 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
687 [EALREADY] = TARGET_EALREADY,
688 [EINPROGRESS] = TARGET_EINPROGRESS,
689 [ESTALE] = TARGET_ESTALE,
690 [ECANCELED] = TARGET_ECANCELED,
691 [ENOMEDIUM] = TARGET_ENOMEDIUM,
692 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
693 #ifdef ENOKEY
694 [ENOKEY] = TARGET_ENOKEY,
695 #endif
696 #ifdef EKEYEXPIRED
697 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
698 #endif
699 #ifdef EKEYREVOKED
700 [EKEYREVOKED] = TARGET_EKEYREVOKED,
701 #endif
702 #ifdef EKEYREJECTED
703 [EKEYREJECTED] = TARGET_EKEYREJECTED,
704 #endif
705 #ifdef EOWNERDEAD
706 [EOWNERDEAD] = TARGET_EOWNERDEAD,
707 #endif
708 #ifdef ENOTRECOVERABLE
709 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
710 #endif
713 static inline int host_to_target_errno(int err)
715 if(host_to_target_errno_table[err])
716 return host_to_target_errno_table[err];
717 return err;
720 static inline int target_to_host_errno(int err)
722 if (target_to_host_errno_table[err])
723 return target_to_host_errno_table[err];
724 return err;
727 static inline abi_long get_errno(abi_long ret)
729 if (ret == -1)
730 return -host_to_target_errno(errno);
731 else
732 return ret;
735 static inline int is_error(abi_long ret)
737 return (abi_ulong)ret >= (abi_ulong)(-4096);
740 char *target_strerror(int err)
742 return strerror(target_to_host_errno(err));
745 static abi_ulong target_brk;
746 static abi_ulong target_original_brk;
747 static abi_ulong brk_page;
749 void target_set_brk(abi_ulong new_brk)
751 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
752 brk_page = HOST_PAGE_ALIGN(target_brk);
755 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
756 #define DEBUGF_BRK(message, args...)
758 /* do_brk() must return target values and target errnos. */
759 abi_long do_brk(abi_ulong new_brk)
761 abi_long mapped_addr;
762 int new_alloc_size;
764 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
766 if (!new_brk) {
767 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
768 return target_brk;
770 if (new_brk < target_original_brk) {
771 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
772 return target_brk;
775 /* If the new brk is less than the highest page reserved to the
776 * target heap allocation, set it and we're almost done... */
777 if (new_brk <= brk_page) {
778 /* Heap contents are initialized to zero, as for anonymous
779 * mapped pages. */
780 if (new_brk > target_brk) {
781 memset(g2h(target_brk), 0, new_brk - target_brk);
783 target_brk = new_brk;
784 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
785 return target_brk;
788 /* We need to allocate more memory after the brk... Note that
789 * we don't use MAP_FIXED because that will map over the top of
790 * any existing mapping (like the one with the host libc or qemu
791 * itself); instead we treat "mapped but at wrong address" as
792 * a failure and unmap again.
794 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
795 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
796 PROT_READ|PROT_WRITE,
797 MAP_ANON|MAP_PRIVATE, 0, 0));
799 if (mapped_addr == brk_page) {
800 target_brk = new_brk;
801 brk_page = HOST_PAGE_ALIGN(target_brk);
802 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
803 return target_brk;
804 } else if (mapped_addr != -1) {
805 /* Mapped but at wrong address, meaning there wasn't actually
806 * enough space for this brk.
808 target_munmap(mapped_addr, new_alloc_size);
809 mapped_addr = -1;
810 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
812 else {
813 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
816 #if defined(TARGET_ALPHA)
817 /* We (partially) emulate OSF/1 on Alpha, which requires we
818 return a proper errno, not an unchanged brk value. */
819 return -TARGET_ENOMEM;
820 #endif
821 /* For everything else, return the previous break. */
822 return target_brk;
825 static inline abi_long copy_from_user_fdset(fd_set *fds,
826 abi_ulong target_fds_addr,
827 int n)
829 int i, nw, j, k;
830 abi_ulong b, *target_fds;
832 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
833 if (!(target_fds = lock_user(VERIFY_READ,
834 target_fds_addr,
835 sizeof(abi_ulong) * nw,
836 1)))
837 return -TARGET_EFAULT;
839 FD_ZERO(fds);
840 k = 0;
841 for (i = 0; i < nw; i++) {
842 /* grab the abi_ulong */
843 __get_user(b, &target_fds[i]);
844 for (j = 0; j < TARGET_ABI_BITS; j++) {
845 /* check the bit inside the abi_ulong */
846 if ((b >> j) & 1)
847 FD_SET(k, fds);
848 k++;
852 unlock_user(target_fds, target_fds_addr, 0);
854 return 0;
857 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
858 abi_ulong target_fds_addr,
859 int n)
861 if (target_fds_addr) {
862 if (copy_from_user_fdset(fds, target_fds_addr, n))
863 return -TARGET_EFAULT;
864 *fds_ptr = fds;
865 } else {
866 *fds_ptr = NULL;
868 return 0;
871 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
872 const fd_set *fds,
873 int n)
875 int i, nw, j, k;
876 abi_long v;
877 abi_ulong *target_fds;
879 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
880 if (!(target_fds = lock_user(VERIFY_WRITE,
881 target_fds_addr,
882 sizeof(abi_ulong) * nw,
883 0)))
884 return -TARGET_EFAULT;
886 k = 0;
887 for (i = 0; i < nw; i++) {
888 v = 0;
889 for (j = 0; j < TARGET_ABI_BITS; j++) {
890 v |= ((FD_ISSET(k, fds) != 0) << j);
891 k++;
893 __put_user(v, &target_fds[i]);
896 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
898 return 0;
901 #if defined(__alpha__)
902 #define HOST_HZ 1024
903 #else
904 #define HOST_HZ 100
905 #endif
907 static inline abi_long host_to_target_clock_t(long ticks)
909 #if HOST_HZ == TARGET_HZ
910 return ticks;
911 #else
912 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
913 #endif
916 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
917 const struct rusage *rusage)
919 struct target_rusage *target_rusage;
921 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
922 return -TARGET_EFAULT;
923 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
924 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
925 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
926 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
927 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
928 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
929 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
930 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
931 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
932 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
933 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
934 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
935 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
936 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
937 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
938 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
939 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
940 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
941 unlock_user_struct(target_rusage, target_addr, 1);
943 return 0;
946 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
948 target_ulong target_rlim_swap;
949 rlim_t result;
951 target_rlim_swap = tswapl(target_rlim);
952 if (target_rlim_swap == TARGET_RLIM_INFINITY || target_rlim_swap != (rlim_t)target_rlim_swap)
953 result = RLIM_INFINITY;
954 else
955 result = target_rlim_swap;
957 return result;
960 static inline target_ulong host_to_target_rlim(rlim_t rlim)
962 target_ulong target_rlim_swap;
963 target_ulong result;
965 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
966 target_rlim_swap = TARGET_RLIM_INFINITY;
967 else
968 target_rlim_swap = rlim;
969 result = tswapl(target_rlim_swap);
971 return result;
974 static inline int target_to_host_resource(int code)
976 switch (code) {
977 case TARGET_RLIMIT_AS:
978 return RLIMIT_AS;
979 case TARGET_RLIMIT_CORE:
980 return RLIMIT_CORE;
981 case TARGET_RLIMIT_CPU:
982 return RLIMIT_CPU;
983 case TARGET_RLIMIT_DATA:
984 return RLIMIT_DATA;
985 case TARGET_RLIMIT_FSIZE:
986 return RLIMIT_FSIZE;
987 case TARGET_RLIMIT_LOCKS:
988 return RLIMIT_LOCKS;
989 case TARGET_RLIMIT_MEMLOCK:
990 return RLIMIT_MEMLOCK;
991 case TARGET_RLIMIT_MSGQUEUE:
992 return RLIMIT_MSGQUEUE;
993 case TARGET_RLIMIT_NICE:
994 return RLIMIT_NICE;
995 case TARGET_RLIMIT_NOFILE:
996 return RLIMIT_NOFILE;
997 case TARGET_RLIMIT_NPROC:
998 return RLIMIT_NPROC;
999 case TARGET_RLIMIT_RSS:
1000 return RLIMIT_RSS;
1001 case TARGET_RLIMIT_RTPRIO:
1002 return RLIMIT_RTPRIO;
1003 case TARGET_RLIMIT_SIGPENDING:
1004 return RLIMIT_SIGPENDING;
1005 case TARGET_RLIMIT_STACK:
1006 return RLIMIT_STACK;
1007 default:
1008 return code;
1012 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1013 abi_ulong target_tv_addr)
1015 struct target_timeval *target_tv;
1017 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1018 return -TARGET_EFAULT;
1020 __get_user(tv->tv_sec, &target_tv->tv_sec);
1021 __get_user(tv->tv_usec, &target_tv->tv_usec);
1023 unlock_user_struct(target_tv, target_tv_addr, 0);
1025 return 0;
1028 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1029 const struct timeval *tv)
1031 struct target_timeval *target_tv;
1033 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1034 return -TARGET_EFAULT;
1036 __put_user(tv->tv_sec, &target_tv->tv_sec);
1037 __put_user(tv->tv_usec, &target_tv->tv_usec);
1039 unlock_user_struct(target_tv, target_tv_addr, 1);
1041 return 0;
1044 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1045 #include <mqueue.h>
1047 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1048 abi_ulong target_mq_attr_addr)
1050 struct target_mq_attr *target_mq_attr;
1052 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1053 target_mq_attr_addr, 1))
1054 return -TARGET_EFAULT;
1056 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1057 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1058 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1059 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1061 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1063 return 0;
1066 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1067 const struct mq_attr *attr)
1069 struct target_mq_attr *target_mq_attr;
1071 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1072 target_mq_attr_addr, 0))
1073 return -TARGET_EFAULT;
1075 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1076 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1077 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1078 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1080 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1082 return 0;
1084 #endif
1086 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1087 /* do_select() must return target values and target errnos. */
1088 static abi_long do_select(int n,
1089 abi_ulong rfd_addr, abi_ulong wfd_addr,
1090 abi_ulong efd_addr, abi_ulong target_tv_addr)
1092 fd_set rfds, wfds, efds;
1093 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1094 struct timeval tv, *tv_ptr;
1095 abi_long ret;
1097 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1098 if (ret) {
1099 return ret;
1101 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1102 if (ret) {
1103 return ret;
1105 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1106 if (ret) {
1107 return ret;
1110 if (target_tv_addr) {
1111 if (copy_from_user_timeval(&tv, target_tv_addr))
1112 return -TARGET_EFAULT;
1113 tv_ptr = &tv;
1114 } else {
1115 tv_ptr = NULL;
1118 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1120 if (!is_error(ret)) {
1121 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1122 return -TARGET_EFAULT;
1123 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1124 return -TARGET_EFAULT;
1125 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1126 return -TARGET_EFAULT;
1128 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1129 return -TARGET_EFAULT;
1132 return ret;
1134 #endif
1136 static abi_long do_pipe2(int host_pipe[], int flags)
1138 #ifdef CONFIG_PIPE2
1139 return pipe2(host_pipe, flags);
1140 #else
1141 return -ENOSYS;
1142 #endif
1145 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1146 int flags, int is_pipe2)
1148 int host_pipe[2];
1149 abi_long ret;
1150 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1152 if (is_error(ret))
1153 return get_errno(ret);
1155 /* Several targets have special calling conventions for the original
1156 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1157 if (!is_pipe2) {
1158 #if defined(TARGET_ALPHA)
1159 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1160 return host_pipe[0];
1161 #elif defined(TARGET_MIPS)
1162 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1163 return host_pipe[0];
1164 #elif defined(TARGET_SH4)
1165 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1166 return host_pipe[0];
1167 #endif
1170 if (put_user_s32(host_pipe[0], pipedes)
1171 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1172 return -TARGET_EFAULT;
1173 return get_errno(ret);
1176 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1177 abi_ulong target_addr,
1178 socklen_t len)
1180 struct target_ip_mreqn *target_smreqn;
1182 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1183 if (!target_smreqn)
1184 return -TARGET_EFAULT;
1185 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1186 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1187 if (len == sizeof(struct target_ip_mreqn))
1188 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1189 unlock_user(target_smreqn, target_addr, 0);
1191 return 0;
1194 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1195 abi_ulong target_addr,
1196 socklen_t len)
1198 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1199 sa_family_t sa_family;
1200 struct target_sockaddr *target_saddr;
1202 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1203 if (!target_saddr)
1204 return -TARGET_EFAULT;
1206 sa_family = tswap16(target_saddr->sa_family);
1208 /* Oops. The caller might send a incomplete sun_path; sun_path
1209 * must be terminated by \0 (see the manual page), but
1210 * unfortunately it is quite common to specify sockaddr_un
1211 * length as "strlen(x->sun_path)" while it should be
1212 * "strlen(...) + 1". We'll fix that here if needed.
1213 * Linux kernel has a similar feature.
1216 if (sa_family == AF_UNIX) {
1217 if (len < unix_maxlen && len > 0) {
1218 char *cp = (char*)target_saddr;
1220 if ( cp[len-1] && !cp[len] )
1221 len++;
1223 if (len > unix_maxlen)
1224 len = unix_maxlen;
1227 memcpy(addr, target_saddr, len);
1228 addr->sa_family = sa_family;
1229 unlock_user(target_saddr, target_addr, 0);
1231 return 0;
1234 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1235 struct sockaddr *addr,
1236 socklen_t len)
1238 struct target_sockaddr *target_saddr;
1240 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1241 if (!target_saddr)
1242 return -TARGET_EFAULT;
1243 memcpy(target_saddr, addr, len);
1244 target_saddr->sa_family = tswap16(addr->sa_family);
1245 unlock_user(target_saddr, target_addr, len);
1247 return 0;
1250 /* ??? Should this also swap msgh->name? */
1251 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1252 struct target_msghdr *target_msgh)
1254 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1255 abi_long msg_controllen;
1256 abi_ulong target_cmsg_addr;
1257 struct target_cmsghdr *target_cmsg;
1258 socklen_t space = 0;
1260 msg_controllen = tswapl(target_msgh->msg_controllen);
1261 if (msg_controllen < sizeof (struct target_cmsghdr))
1262 goto the_end;
1263 target_cmsg_addr = tswapl(target_msgh->msg_control);
1264 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1265 if (!target_cmsg)
1266 return -TARGET_EFAULT;
1268 while (cmsg && target_cmsg) {
1269 void *data = CMSG_DATA(cmsg);
1270 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1272 int len = tswapl(target_cmsg->cmsg_len)
1273 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1275 space += CMSG_SPACE(len);
1276 if (space > msgh->msg_controllen) {
1277 space -= CMSG_SPACE(len);
1278 gemu_log("Host cmsg overflow\n");
1279 break;
1282 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1283 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1284 cmsg->cmsg_len = CMSG_LEN(len);
1286 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1287 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1288 memcpy(data, target_data, len);
1289 } else {
1290 int *fd = (int *)data;
1291 int *target_fd = (int *)target_data;
1292 int i, numfds = len / sizeof(int);
1294 for (i = 0; i < numfds; i++)
1295 fd[i] = tswap32(target_fd[i]);
1298 cmsg = CMSG_NXTHDR(msgh, cmsg);
1299 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1301 unlock_user(target_cmsg, target_cmsg_addr, 0);
1302 the_end:
1303 msgh->msg_controllen = space;
1304 return 0;
1307 /* ??? Should this also swap msgh->name? */
1308 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1309 struct msghdr *msgh)
1311 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1312 abi_long msg_controllen;
1313 abi_ulong target_cmsg_addr;
1314 struct target_cmsghdr *target_cmsg;
1315 socklen_t space = 0;
1317 msg_controllen = tswapl(target_msgh->msg_controllen);
1318 if (msg_controllen < sizeof (struct target_cmsghdr))
1319 goto the_end;
1320 target_cmsg_addr = tswapl(target_msgh->msg_control);
1321 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1322 if (!target_cmsg)
1323 return -TARGET_EFAULT;
1325 while (cmsg && target_cmsg) {
1326 void *data = CMSG_DATA(cmsg);
1327 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1329 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1331 space += TARGET_CMSG_SPACE(len);
1332 if (space > msg_controllen) {
1333 space -= TARGET_CMSG_SPACE(len);
1334 gemu_log("Target cmsg overflow\n");
1335 break;
1338 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1339 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1340 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1342 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1343 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1344 memcpy(target_data, data, len);
1345 } else {
1346 int *fd = (int *)data;
1347 int *target_fd = (int *)target_data;
1348 int i, numfds = len / sizeof(int);
1350 for (i = 0; i < numfds; i++)
1351 target_fd[i] = tswap32(fd[i]);
1354 cmsg = CMSG_NXTHDR(msgh, cmsg);
1355 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1357 unlock_user(target_cmsg, target_cmsg_addr, space);
1358 the_end:
1359 target_msgh->msg_controllen = tswapl(space);
1360 return 0;
1363 /* do_setsockopt() Must return target values and target errnos. */
1364 static abi_long do_setsockopt(int sockfd, int level, int optname,
1365 abi_ulong optval_addr, socklen_t optlen)
1367 abi_long ret;
1368 int val;
1369 struct ip_mreqn *ip_mreq;
1370 struct ip_mreq_source *ip_mreq_source;
1372 switch(level) {
1373 case SOL_TCP:
1374 /* TCP options all take an 'int' value. */
1375 if (optlen < sizeof(uint32_t))
1376 return -TARGET_EINVAL;
1378 if (get_user_u32(val, optval_addr))
1379 return -TARGET_EFAULT;
1380 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1381 break;
1382 case SOL_IP:
1383 switch(optname) {
1384 case IP_TOS:
1385 case IP_TTL:
1386 case IP_HDRINCL:
1387 case IP_ROUTER_ALERT:
1388 case IP_RECVOPTS:
1389 case IP_RETOPTS:
1390 case IP_PKTINFO:
1391 case IP_MTU_DISCOVER:
1392 case IP_RECVERR:
1393 case IP_RECVTOS:
1394 #ifdef IP_FREEBIND
1395 case IP_FREEBIND:
1396 #endif
1397 case IP_MULTICAST_TTL:
1398 case IP_MULTICAST_LOOP:
1399 val = 0;
1400 if (optlen >= sizeof(uint32_t)) {
1401 if (get_user_u32(val, optval_addr))
1402 return -TARGET_EFAULT;
1403 } else if (optlen >= 1) {
1404 if (get_user_u8(val, optval_addr))
1405 return -TARGET_EFAULT;
1407 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1408 break;
1409 case IP_ADD_MEMBERSHIP:
1410 case IP_DROP_MEMBERSHIP:
1411 if (optlen < sizeof (struct target_ip_mreq) ||
1412 optlen > sizeof (struct target_ip_mreqn))
1413 return -TARGET_EINVAL;
1415 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1416 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1417 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1418 break;
1420 case IP_BLOCK_SOURCE:
1421 case IP_UNBLOCK_SOURCE:
1422 case IP_ADD_SOURCE_MEMBERSHIP:
1423 case IP_DROP_SOURCE_MEMBERSHIP:
1424 if (optlen != sizeof (struct target_ip_mreq_source))
1425 return -TARGET_EINVAL;
1427 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1428 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1429 unlock_user (ip_mreq_source, optval_addr, 0);
1430 break;
1432 default:
1433 goto unimplemented;
1435 break;
1436 case TARGET_SOL_SOCKET:
1437 switch (optname) {
1438 /* Options with 'int' argument. */
1439 case TARGET_SO_DEBUG:
1440 optname = SO_DEBUG;
1441 break;
1442 case TARGET_SO_REUSEADDR:
1443 optname = SO_REUSEADDR;
1444 break;
1445 case TARGET_SO_TYPE:
1446 optname = SO_TYPE;
1447 break;
1448 case TARGET_SO_ERROR:
1449 optname = SO_ERROR;
1450 break;
1451 case TARGET_SO_DONTROUTE:
1452 optname = SO_DONTROUTE;
1453 break;
1454 case TARGET_SO_BROADCAST:
1455 optname = SO_BROADCAST;
1456 break;
1457 case TARGET_SO_SNDBUF:
1458 optname = SO_SNDBUF;
1459 break;
1460 case TARGET_SO_RCVBUF:
1461 optname = SO_RCVBUF;
1462 break;
1463 case TARGET_SO_KEEPALIVE:
1464 optname = SO_KEEPALIVE;
1465 break;
1466 case TARGET_SO_OOBINLINE:
1467 optname = SO_OOBINLINE;
1468 break;
1469 case TARGET_SO_NO_CHECK:
1470 optname = SO_NO_CHECK;
1471 break;
1472 case TARGET_SO_PRIORITY:
1473 optname = SO_PRIORITY;
1474 break;
1475 #ifdef SO_BSDCOMPAT
1476 case TARGET_SO_BSDCOMPAT:
1477 optname = SO_BSDCOMPAT;
1478 break;
1479 #endif
1480 case TARGET_SO_PASSCRED:
1481 optname = SO_PASSCRED;
1482 break;
1483 case TARGET_SO_TIMESTAMP:
1484 optname = SO_TIMESTAMP;
1485 break;
1486 case TARGET_SO_RCVLOWAT:
1487 optname = SO_RCVLOWAT;
1488 break;
1489 case TARGET_SO_RCVTIMEO:
1490 optname = SO_RCVTIMEO;
1491 break;
1492 case TARGET_SO_SNDTIMEO:
1493 optname = SO_SNDTIMEO;
1494 break;
1495 break;
1496 default:
1497 goto unimplemented;
1499 if (optlen < sizeof(uint32_t))
1500 return -TARGET_EINVAL;
1502 if (get_user_u32(val, optval_addr))
1503 return -TARGET_EFAULT;
1504 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1505 break;
1506 default:
1507 unimplemented:
1508 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1509 ret = -TARGET_ENOPROTOOPT;
1511 return ret;
1514 /* do_getsockopt() Must return target values and target errnos. */
1515 static abi_long do_getsockopt(int sockfd, int level, int optname,
1516 abi_ulong optval_addr, abi_ulong optlen)
1518 abi_long ret;
1519 int len, val;
1520 socklen_t lv;
1522 switch(level) {
1523 case TARGET_SOL_SOCKET:
1524 level = SOL_SOCKET;
1525 switch (optname) {
1526 /* These don't just return a single integer */
1527 case TARGET_SO_LINGER:
1528 case TARGET_SO_RCVTIMEO:
1529 case TARGET_SO_SNDTIMEO:
1530 case TARGET_SO_PEERCRED:
1531 case TARGET_SO_PEERNAME:
1532 goto unimplemented;
1533 /* Options with 'int' argument. */
1534 case TARGET_SO_DEBUG:
1535 optname = SO_DEBUG;
1536 goto int_case;
1537 case TARGET_SO_REUSEADDR:
1538 optname = SO_REUSEADDR;
1539 goto int_case;
1540 case TARGET_SO_TYPE:
1541 optname = SO_TYPE;
1542 goto int_case;
1543 case TARGET_SO_ERROR:
1544 optname = SO_ERROR;
1545 goto int_case;
1546 case TARGET_SO_DONTROUTE:
1547 optname = SO_DONTROUTE;
1548 goto int_case;
1549 case TARGET_SO_BROADCAST:
1550 optname = SO_BROADCAST;
1551 goto int_case;
1552 case TARGET_SO_SNDBUF:
1553 optname = SO_SNDBUF;
1554 goto int_case;
1555 case TARGET_SO_RCVBUF:
1556 optname = SO_RCVBUF;
1557 goto int_case;
1558 case TARGET_SO_KEEPALIVE:
1559 optname = SO_KEEPALIVE;
1560 goto int_case;
1561 case TARGET_SO_OOBINLINE:
1562 optname = SO_OOBINLINE;
1563 goto int_case;
1564 case TARGET_SO_NO_CHECK:
1565 optname = SO_NO_CHECK;
1566 goto int_case;
1567 case TARGET_SO_PRIORITY:
1568 optname = SO_PRIORITY;
1569 goto int_case;
1570 #ifdef SO_BSDCOMPAT
1571 case TARGET_SO_BSDCOMPAT:
1572 optname = SO_BSDCOMPAT;
1573 goto int_case;
1574 #endif
1575 case TARGET_SO_PASSCRED:
1576 optname = SO_PASSCRED;
1577 goto int_case;
1578 case TARGET_SO_TIMESTAMP:
1579 optname = SO_TIMESTAMP;
1580 goto int_case;
1581 case TARGET_SO_RCVLOWAT:
1582 optname = SO_RCVLOWAT;
1583 goto int_case;
1584 default:
1585 goto int_case;
1587 break;
1588 case SOL_TCP:
1589 /* TCP options all take an 'int' value. */
1590 int_case:
1591 if (get_user_u32(len, optlen))
1592 return -TARGET_EFAULT;
1593 if (len < 0)
1594 return -TARGET_EINVAL;
1595 lv = sizeof(lv);
1596 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1597 if (ret < 0)
1598 return ret;
1599 if (len > lv)
1600 len = lv;
1601 if (len == 4) {
1602 if (put_user_u32(val, optval_addr))
1603 return -TARGET_EFAULT;
1604 } else {
1605 if (put_user_u8(val, optval_addr))
1606 return -TARGET_EFAULT;
1608 if (put_user_u32(len, optlen))
1609 return -TARGET_EFAULT;
1610 break;
1611 case SOL_IP:
1612 switch(optname) {
1613 case IP_TOS:
1614 case IP_TTL:
1615 case IP_HDRINCL:
1616 case IP_ROUTER_ALERT:
1617 case IP_RECVOPTS:
1618 case IP_RETOPTS:
1619 case IP_PKTINFO:
1620 case IP_MTU_DISCOVER:
1621 case IP_RECVERR:
1622 case IP_RECVTOS:
1623 #ifdef IP_FREEBIND
1624 case IP_FREEBIND:
1625 #endif
1626 case IP_MULTICAST_TTL:
1627 case IP_MULTICAST_LOOP:
1628 if (get_user_u32(len, optlen))
1629 return -TARGET_EFAULT;
1630 if (len < 0)
1631 return -TARGET_EINVAL;
1632 lv = sizeof(lv);
1633 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1634 if (ret < 0)
1635 return ret;
1636 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1637 len = 1;
1638 if (put_user_u32(len, optlen)
1639 || put_user_u8(val, optval_addr))
1640 return -TARGET_EFAULT;
1641 } else {
1642 if (len > sizeof(int))
1643 len = sizeof(int);
1644 if (put_user_u32(len, optlen)
1645 || put_user_u32(val, optval_addr))
1646 return -TARGET_EFAULT;
1648 break;
1649 default:
1650 ret = -TARGET_ENOPROTOOPT;
1651 break;
1653 break;
1654 default:
1655 unimplemented:
1656 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1657 level, optname);
1658 ret = -TARGET_EOPNOTSUPP;
1659 break;
1661 return ret;
1664 /* FIXME
1665 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1666 * other lock functions have a return code of 0 for failure.
1668 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1669 int count, int copy)
1671 struct target_iovec *target_vec;
1672 abi_ulong base;
1673 int i;
1675 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1676 if (!target_vec)
1677 return -TARGET_EFAULT;
1678 for(i = 0;i < count; i++) {
1679 base = tswapl(target_vec[i].iov_base);
1680 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1681 if (vec[i].iov_len != 0) {
1682 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1683 /* Don't check lock_user return value. We must call writev even
1684 if a element has invalid base address. */
1685 } else {
1686 /* zero length pointer is ignored */
1687 vec[i].iov_base = NULL;
1690 unlock_user (target_vec, target_addr, 0);
1691 return 0;
1694 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1695 int count, int copy)
1697 struct target_iovec *target_vec;
1698 abi_ulong base;
1699 int i;
1701 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1702 if (!target_vec)
1703 return -TARGET_EFAULT;
1704 for(i = 0;i < count; i++) {
1705 if (target_vec[i].iov_base) {
1706 base = tswapl(target_vec[i].iov_base);
1707 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1710 unlock_user (target_vec, target_addr, 0);
1712 return 0;
1715 /* do_socket() Must return target values and target errnos. */
1716 static abi_long do_socket(int domain, int type, int protocol)
1718 #if defined(TARGET_MIPS)
1719 switch(type) {
1720 case TARGET_SOCK_DGRAM:
1721 type = SOCK_DGRAM;
1722 break;
1723 case TARGET_SOCK_STREAM:
1724 type = SOCK_STREAM;
1725 break;
1726 case TARGET_SOCK_RAW:
1727 type = SOCK_RAW;
1728 break;
1729 case TARGET_SOCK_RDM:
1730 type = SOCK_RDM;
1731 break;
1732 case TARGET_SOCK_SEQPACKET:
1733 type = SOCK_SEQPACKET;
1734 break;
1735 case TARGET_SOCK_PACKET:
1736 type = SOCK_PACKET;
1737 break;
1739 #endif
1740 if (domain == PF_NETLINK)
1741 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1742 return get_errno(socket(domain, type, protocol));
1745 /* do_bind() Must return target values and target errnos. */
1746 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1747 socklen_t addrlen)
1749 void *addr;
1750 abi_long ret;
1752 if ((int)addrlen < 0) {
1753 return -TARGET_EINVAL;
1756 addr = alloca(addrlen+1);
1758 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1759 if (ret)
1760 return ret;
1762 return get_errno(bind(sockfd, addr, addrlen));
1765 /* do_connect() Must return target values and target errnos. */
1766 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1767 socklen_t addrlen)
1769 void *addr;
1770 abi_long ret;
1772 if ((int)addrlen < 0) {
1773 return -TARGET_EINVAL;
1776 addr = alloca(addrlen);
1778 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1779 if (ret)
1780 return ret;
1782 return get_errno(connect(sockfd, addr, addrlen));
1785 /* do_sendrecvmsg() Must return target values and target errnos. */
1786 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1787 int flags, int send)
1789 abi_long ret, len;
1790 struct target_msghdr *msgp;
1791 struct msghdr msg;
1792 int count;
1793 struct iovec *vec;
1794 abi_ulong target_vec;
1796 /* FIXME */
1797 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1798 msgp,
1799 target_msg,
1800 send ? 1 : 0))
1801 return -TARGET_EFAULT;
1802 if (msgp->msg_name) {
1803 msg.msg_namelen = tswap32(msgp->msg_namelen);
1804 msg.msg_name = alloca(msg.msg_namelen);
1805 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1806 msg.msg_namelen);
1807 if (ret) {
1808 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1809 return ret;
1811 } else {
1812 msg.msg_name = NULL;
1813 msg.msg_namelen = 0;
1815 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1816 msg.msg_control = alloca(msg.msg_controllen);
1817 msg.msg_flags = tswap32(msgp->msg_flags);
1819 count = tswapl(msgp->msg_iovlen);
1820 vec = alloca(count * sizeof(struct iovec));
1821 target_vec = tswapl(msgp->msg_iov);
1822 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1823 msg.msg_iovlen = count;
1824 msg.msg_iov = vec;
1826 if (send) {
1827 ret = target_to_host_cmsg(&msg, msgp);
1828 if (ret == 0)
1829 ret = get_errno(sendmsg(fd, &msg, flags));
1830 } else {
1831 ret = get_errno(recvmsg(fd, &msg, flags));
1832 if (!is_error(ret)) {
1833 len = ret;
1834 ret = host_to_target_cmsg(msgp, &msg);
1835 if (!is_error(ret))
1836 ret = len;
1839 unlock_iovec(vec, target_vec, count, !send);
1840 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1841 return ret;
1844 /* do_accept() Must return target values and target errnos. */
1845 static abi_long do_accept(int fd, abi_ulong target_addr,
1846 abi_ulong target_addrlen_addr)
1848 socklen_t addrlen;
1849 void *addr;
1850 abi_long ret;
1852 if (target_addr == 0)
1853 return get_errno(accept(fd, NULL, NULL));
1855 /* linux returns EINVAL if addrlen pointer is invalid */
1856 if (get_user_u32(addrlen, target_addrlen_addr))
1857 return -TARGET_EINVAL;
1859 if ((int)addrlen < 0) {
1860 return -TARGET_EINVAL;
1863 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1864 return -TARGET_EINVAL;
1866 addr = alloca(addrlen);
1868 ret = get_errno(accept(fd, addr, &addrlen));
1869 if (!is_error(ret)) {
1870 host_to_target_sockaddr(target_addr, addr, addrlen);
1871 if (put_user_u32(addrlen, target_addrlen_addr))
1872 ret = -TARGET_EFAULT;
1874 return ret;
1877 /* do_getpeername() Must return target values and target errnos. */
1878 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1879 abi_ulong target_addrlen_addr)
1881 socklen_t addrlen;
1882 void *addr;
1883 abi_long ret;
1885 if (get_user_u32(addrlen, target_addrlen_addr))
1886 return -TARGET_EFAULT;
1888 if ((int)addrlen < 0) {
1889 return -TARGET_EINVAL;
1892 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1893 return -TARGET_EFAULT;
1895 addr = alloca(addrlen);
1897 ret = get_errno(getpeername(fd, addr, &addrlen));
1898 if (!is_error(ret)) {
1899 host_to_target_sockaddr(target_addr, addr, addrlen);
1900 if (put_user_u32(addrlen, target_addrlen_addr))
1901 ret = -TARGET_EFAULT;
1903 return ret;
1906 /* do_getsockname() Must return target values and target errnos. */
1907 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1908 abi_ulong target_addrlen_addr)
1910 socklen_t addrlen;
1911 void *addr;
1912 abi_long ret;
1914 if (get_user_u32(addrlen, target_addrlen_addr))
1915 return -TARGET_EFAULT;
1917 if ((int)addrlen < 0) {
1918 return -TARGET_EINVAL;
1921 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1922 return -TARGET_EFAULT;
1924 addr = alloca(addrlen);
1926 ret = get_errno(getsockname(fd, addr, &addrlen));
1927 if (!is_error(ret)) {
1928 host_to_target_sockaddr(target_addr, addr, addrlen);
1929 if (put_user_u32(addrlen, target_addrlen_addr))
1930 ret = -TARGET_EFAULT;
1932 return ret;
1935 /* do_socketpair() Must return target values and target errnos. */
1936 static abi_long do_socketpair(int domain, int type, int protocol,
1937 abi_ulong target_tab_addr)
1939 int tab[2];
1940 abi_long ret;
1942 ret = get_errno(socketpair(domain, type, protocol, tab));
1943 if (!is_error(ret)) {
1944 if (put_user_s32(tab[0], target_tab_addr)
1945 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1946 ret = -TARGET_EFAULT;
1948 return ret;
1951 /* do_sendto() Must return target values and target errnos. */
1952 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1953 abi_ulong target_addr, socklen_t addrlen)
1955 void *addr;
1956 void *host_msg;
1957 abi_long ret;
1959 if ((int)addrlen < 0) {
1960 return -TARGET_EINVAL;
1963 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1964 if (!host_msg)
1965 return -TARGET_EFAULT;
1966 if (target_addr) {
1967 addr = alloca(addrlen);
1968 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1969 if (ret) {
1970 unlock_user(host_msg, msg, 0);
1971 return ret;
1973 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1974 } else {
1975 ret = get_errno(send(fd, host_msg, len, flags));
1977 unlock_user(host_msg, msg, 0);
1978 return ret;
1981 /* do_recvfrom() Must return target values and target errnos. */
1982 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1983 abi_ulong target_addr,
1984 abi_ulong target_addrlen)
1986 socklen_t addrlen;
1987 void *addr;
1988 void *host_msg;
1989 abi_long ret;
1991 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1992 if (!host_msg)
1993 return -TARGET_EFAULT;
1994 if (target_addr) {
1995 if (get_user_u32(addrlen, target_addrlen)) {
1996 ret = -TARGET_EFAULT;
1997 goto fail;
1999 if ((int)addrlen < 0) {
2000 ret = -TARGET_EINVAL;
2001 goto fail;
2003 addr = alloca(addrlen);
2004 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2005 } else {
2006 addr = NULL; /* To keep compiler quiet. */
2007 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2009 if (!is_error(ret)) {
2010 if (target_addr) {
2011 host_to_target_sockaddr(target_addr, addr, addrlen);
2012 if (put_user_u32(addrlen, target_addrlen)) {
2013 ret = -TARGET_EFAULT;
2014 goto fail;
2017 unlock_user(host_msg, msg, len);
2018 } else {
2019 fail:
2020 unlock_user(host_msg, msg, 0);
2022 return ret;
2025 #ifdef TARGET_NR_socketcall
2026 /* do_socketcall() Must return target values and target errnos. */
2027 static abi_long do_socketcall(int num, abi_ulong vptr)
2029 abi_long ret;
2030 const int n = sizeof(abi_ulong);
2032 switch(num) {
2033 case SOCKOP_socket:
2035 abi_ulong domain, type, protocol;
2037 if (get_user_ual(domain, vptr)
2038 || get_user_ual(type, vptr + n)
2039 || get_user_ual(protocol, vptr + 2 * n))
2040 return -TARGET_EFAULT;
2042 ret = do_socket(domain, type, protocol);
2044 break;
2045 case SOCKOP_bind:
2047 abi_ulong sockfd;
2048 abi_ulong target_addr;
2049 socklen_t addrlen;
2051 if (get_user_ual(sockfd, vptr)
2052 || get_user_ual(target_addr, vptr + n)
2053 || get_user_ual(addrlen, vptr + 2 * n))
2054 return -TARGET_EFAULT;
2056 ret = do_bind(sockfd, target_addr, addrlen);
2058 break;
2059 case SOCKOP_connect:
2061 abi_ulong sockfd;
2062 abi_ulong target_addr;
2063 socklen_t addrlen;
2065 if (get_user_ual(sockfd, vptr)
2066 || get_user_ual(target_addr, vptr + n)
2067 || get_user_ual(addrlen, vptr + 2 * n))
2068 return -TARGET_EFAULT;
2070 ret = do_connect(sockfd, target_addr, addrlen);
2072 break;
2073 case SOCKOP_listen:
2075 abi_ulong sockfd, backlog;
2077 if (get_user_ual(sockfd, vptr)
2078 || get_user_ual(backlog, vptr + n))
2079 return -TARGET_EFAULT;
2081 ret = get_errno(listen(sockfd, backlog));
2083 break;
2084 case SOCKOP_accept:
2086 abi_ulong sockfd;
2087 abi_ulong target_addr, target_addrlen;
2089 if (get_user_ual(sockfd, vptr)
2090 || get_user_ual(target_addr, vptr + n)
2091 || get_user_ual(target_addrlen, vptr + 2 * n))
2092 return -TARGET_EFAULT;
2094 ret = do_accept(sockfd, target_addr, target_addrlen);
2096 break;
2097 case SOCKOP_getsockname:
2099 abi_ulong sockfd;
2100 abi_ulong target_addr, target_addrlen;
2102 if (get_user_ual(sockfd, vptr)
2103 || get_user_ual(target_addr, vptr + n)
2104 || get_user_ual(target_addrlen, vptr + 2 * n))
2105 return -TARGET_EFAULT;
2107 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2109 break;
2110 case SOCKOP_getpeername:
2112 abi_ulong sockfd;
2113 abi_ulong target_addr, target_addrlen;
2115 if (get_user_ual(sockfd, vptr)
2116 || get_user_ual(target_addr, vptr + n)
2117 || get_user_ual(target_addrlen, vptr + 2 * n))
2118 return -TARGET_EFAULT;
2120 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2122 break;
2123 case SOCKOP_socketpair:
2125 abi_ulong domain, type, protocol;
2126 abi_ulong tab;
2128 if (get_user_ual(domain, vptr)
2129 || get_user_ual(type, vptr + n)
2130 || get_user_ual(protocol, vptr + 2 * n)
2131 || get_user_ual(tab, vptr + 3 * n))
2132 return -TARGET_EFAULT;
2134 ret = do_socketpair(domain, type, protocol, tab);
2136 break;
2137 case SOCKOP_send:
2139 abi_ulong sockfd;
2140 abi_ulong msg;
2141 size_t len;
2142 abi_ulong flags;
2144 if (get_user_ual(sockfd, vptr)
2145 || get_user_ual(msg, vptr + n)
2146 || get_user_ual(len, vptr + 2 * n)
2147 || get_user_ual(flags, vptr + 3 * n))
2148 return -TARGET_EFAULT;
2150 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2152 break;
2153 case SOCKOP_recv:
2155 abi_ulong sockfd;
2156 abi_ulong msg;
2157 size_t len;
2158 abi_ulong flags;
2160 if (get_user_ual(sockfd, vptr)
2161 || get_user_ual(msg, vptr + n)
2162 || get_user_ual(len, vptr + 2 * n)
2163 || get_user_ual(flags, vptr + 3 * n))
2164 return -TARGET_EFAULT;
2166 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2168 break;
2169 case SOCKOP_sendto:
2171 abi_ulong sockfd;
2172 abi_ulong msg;
2173 size_t len;
2174 abi_ulong flags;
2175 abi_ulong addr;
2176 socklen_t addrlen;
2178 if (get_user_ual(sockfd, vptr)
2179 || get_user_ual(msg, vptr + n)
2180 || get_user_ual(len, vptr + 2 * n)
2181 || get_user_ual(flags, vptr + 3 * n)
2182 || get_user_ual(addr, vptr + 4 * n)
2183 || get_user_ual(addrlen, vptr + 5 * n))
2184 return -TARGET_EFAULT;
2186 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2188 break;
2189 case SOCKOP_recvfrom:
2191 abi_ulong sockfd;
2192 abi_ulong msg;
2193 size_t len;
2194 abi_ulong flags;
2195 abi_ulong addr;
2196 socklen_t addrlen;
2198 if (get_user_ual(sockfd, vptr)
2199 || get_user_ual(msg, vptr + n)
2200 || get_user_ual(len, vptr + 2 * n)
2201 || get_user_ual(flags, vptr + 3 * n)
2202 || get_user_ual(addr, vptr + 4 * n)
2203 || get_user_ual(addrlen, vptr + 5 * n))
2204 return -TARGET_EFAULT;
2206 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2208 break;
2209 case SOCKOP_shutdown:
2211 abi_ulong sockfd, how;
2213 if (get_user_ual(sockfd, vptr)
2214 || get_user_ual(how, vptr + n))
2215 return -TARGET_EFAULT;
2217 ret = get_errno(shutdown(sockfd, how));
2219 break;
2220 case SOCKOP_sendmsg:
2221 case SOCKOP_recvmsg:
2223 abi_ulong fd;
2224 abi_ulong target_msg;
2225 abi_ulong flags;
2227 if (get_user_ual(fd, vptr)
2228 || get_user_ual(target_msg, vptr + n)
2229 || get_user_ual(flags, vptr + 2 * n))
2230 return -TARGET_EFAULT;
2232 ret = do_sendrecvmsg(fd, target_msg, flags,
2233 (num == SOCKOP_sendmsg));
2235 break;
2236 case SOCKOP_setsockopt:
2238 abi_ulong sockfd;
2239 abi_ulong level;
2240 abi_ulong optname;
2241 abi_ulong optval;
2242 socklen_t optlen;
2244 if (get_user_ual(sockfd, vptr)
2245 || get_user_ual(level, vptr + n)
2246 || get_user_ual(optname, vptr + 2 * n)
2247 || get_user_ual(optval, vptr + 3 * n)
2248 || get_user_ual(optlen, vptr + 4 * n))
2249 return -TARGET_EFAULT;
2251 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2253 break;
2254 case SOCKOP_getsockopt:
2256 abi_ulong sockfd;
2257 abi_ulong level;
2258 abi_ulong optname;
2259 abi_ulong optval;
2260 socklen_t optlen;
2262 if (get_user_ual(sockfd, vptr)
2263 || get_user_ual(level, vptr + n)
2264 || get_user_ual(optname, vptr + 2 * n)
2265 || get_user_ual(optval, vptr + 3 * n)
2266 || get_user_ual(optlen, vptr + 4 * n))
2267 return -TARGET_EFAULT;
2269 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2271 break;
2272 default:
2273 gemu_log("Unsupported socketcall: %d\n", num);
2274 ret = -TARGET_ENOSYS;
2275 break;
2277 return ret;
2279 #endif
2281 #define N_SHM_REGIONS 32
2283 static struct shm_region {
2284 abi_ulong start;
2285 abi_ulong size;
2286 } shm_regions[N_SHM_REGIONS];
2288 struct target_ipc_perm
2290 abi_long __key;
2291 abi_ulong uid;
2292 abi_ulong gid;
2293 abi_ulong cuid;
2294 abi_ulong cgid;
2295 unsigned short int mode;
2296 unsigned short int __pad1;
2297 unsigned short int __seq;
2298 unsigned short int __pad2;
2299 abi_ulong __unused1;
2300 abi_ulong __unused2;
2303 struct target_semid_ds
2305 struct target_ipc_perm sem_perm;
2306 abi_ulong sem_otime;
2307 abi_ulong __unused1;
2308 abi_ulong sem_ctime;
2309 abi_ulong __unused2;
2310 abi_ulong sem_nsems;
2311 abi_ulong __unused3;
2312 abi_ulong __unused4;
2315 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2316 abi_ulong target_addr)
2318 struct target_ipc_perm *target_ip;
2319 struct target_semid_ds *target_sd;
2321 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2322 return -TARGET_EFAULT;
2323 target_ip = &(target_sd->sem_perm);
2324 host_ip->__key = tswapl(target_ip->__key);
2325 host_ip->uid = tswapl(target_ip->uid);
2326 host_ip->gid = tswapl(target_ip->gid);
2327 host_ip->cuid = tswapl(target_ip->cuid);
2328 host_ip->cgid = tswapl(target_ip->cgid);
2329 host_ip->mode = tswapl(target_ip->mode);
2330 unlock_user_struct(target_sd, target_addr, 0);
2331 return 0;
2334 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2335 struct ipc_perm *host_ip)
2337 struct target_ipc_perm *target_ip;
2338 struct target_semid_ds *target_sd;
2340 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2341 return -TARGET_EFAULT;
2342 target_ip = &(target_sd->sem_perm);
2343 target_ip->__key = tswapl(host_ip->__key);
2344 target_ip->uid = tswapl(host_ip->uid);
2345 target_ip->gid = tswapl(host_ip->gid);
2346 target_ip->cuid = tswapl(host_ip->cuid);
2347 target_ip->cgid = tswapl(host_ip->cgid);
2348 target_ip->mode = tswapl(host_ip->mode);
2349 unlock_user_struct(target_sd, target_addr, 1);
2350 return 0;
2353 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2354 abi_ulong target_addr)
2356 struct target_semid_ds *target_sd;
2358 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2359 return -TARGET_EFAULT;
2360 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2361 return -TARGET_EFAULT;
2362 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2363 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2364 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2365 unlock_user_struct(target_sd, target_addr, 0);
2366 return 0;
2369 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2370 struct semid_ds *host_sd)
2372 struct target_semid_ds *target_sd;
2374 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2375 return -TARGET_EFAULT;
2376 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2377 return -TARGET_EFAULT;;
2378 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2379 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2380 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2381 unlock_user_struct(target_sd, target_addr, 1);
2382 return 0;
2385 struct target_seminfo {
2386 int semmap;
2387 int semmni;
2388 int semmns;
2389 int semmnu;
2390 int semmsl;
2391 int semopm;
2392 int semume;
2393 int semusz;
2394 int semvmx;
2395 int semaem;
2398 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2399 struct seminfo *host_seminfo)
2401 struct target_seminfo *target_seminfo;
2402 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2403 return -TARGET_EFAULT;
2404 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2405 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2406 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2407 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2408 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2409 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2410 __put_user(host_seminfo->semume, &target_seminfo->semume);
2411 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2412 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2413 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2414 unlock_user_struct(target_seminfo, target_addr, 1);
2415 return 0;
2418 union semun {
2419 int val;
2420 struct semid_ds *buf;
2421 unsigned short *array;
2422 struct seminfo *__buf;
2425 union target_semun {
2426 int val;
2427 abi_ulong buf;
2428 abi_ulong array;
2429 abi_ulong __buf;
2432 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2433 abi_ulong target_addr)
2435 int nsems;
2436 unsigned short *array;
2437 union semun semun;
2438 struct semid_ds semid_ds;
2439 int i, ret;
2441 semun.buf = &semid_ds;
2443 ret = semctl(semid, 0, IPC_STAT, semun);
2444 if (ret == -1)
2445 return get_errno(ret);
2447 nsems = semid_ds.sem_nsems;
2449 *host_array = malloc(nsems*sizeof(unsigned short));
2450 array = lock_user(VERIFY_READ, target_addr,
2451 nsems*sizeof(unsigned short), 1);
2452 if (!array)
2453 return -TARGET_EFAULT;
2455 for(i=0; i<nsems; i++) {
2456 __get_user((*host_array)[i], &array[i]);
2458 unlock_user(array, target_addr, 0);
2460 return 0;
2463 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2464 unsigned short **host_array)
2466 int nsems;
2467 unsigned short *array;
2468 union semun semun;
2469 struct semid_ds semid_ds;
2470 int i, ret;
2472 semun.buf = &semid_ds;
2474 ret = semctl(semid, 0, IPC_STAT, semun);
2475 if (ret == -1)
2476 return get_errno(ret);
2478 nsems = semid_ds.sem_nsems;
2480 array = lock_user(VERIFY_WRITE, target_addr,
2481 nsems*sizeof(unsigned short), 0);
2482 if (!array)
2483 return -TARGET_EFAULT;
2485 for(i=0; i<nsems; i++) {
2486 __put_user((*host_array)[i], &array[i]);
2488 free(*host_array);
2489 unlock_user(array, target_addr, 1);
2491 return 0;
2494 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2495 union target_semun target_su)
2497 union semun arg;
2498 struct semid_ds dsarg;
2499 unsigned short *array = NULL;
2500 struct seminfo seminfo;
2501 abi_long ret = -TARGET_EINVAL;
2502 abi_long err;
2503 cmd &= 0xff;
2505 switch( cmd ) {
2506 case GETVAL:
2507 case SETVAL:
2508 arg.val = tswapl(target_su.val);
2509 ret = get_errno(semctl(semid, semnum, cmd, arg));
2510 target_su.val = tswapl(arg.val);
2511 break;
2512 case GETALL:
2513 case SETALL:
2514 err = target_to_host_semarray(semid, &array, target_su.array);
2515 if (err)
2516 return err;
2517 arg.array = array;
2518 ret = get_errno(semctl(semid, semnum, cmd, arg));
2519 err = host_to_target_semarray(semid, target_su.array, &array);
2520 if (err)
2521 return err;
2522 break;
2523 case IPC_STAT:
2524 case IPC_SET:
2525 case SEM_STAT:
2526 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2527 if (err)
2528 return err;
2529 arg.buf = &dsarg;
2530 ret = get_errno(semctl(semid, semnum, cmd, arg));
2531 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2532 if (err)
2533 return err;
2534 break;
2535 case IPC_INFO:
2536 case SEM_INFO:
2537 arg.__buf = &seminfo;
2538 ret = get_errno(semctl(semid, semnum, cmd, arg));
2539 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2540 if (err)
2541 return err;
2542 break;
2543 case IPC_RMID:
2544 case GETPID:
2545 case GETNCNT:
2546 case GETZCNT:
2547 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2548 break;
2551 return ret;
2554 struct target_sembuf {
2555 unsigned short sem_num;
2556 short sem_op;
2557 short sem_flg;
2560 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2561 abi_ulong target_addr,
2562 unsigned nsops)
2564 struct target_sembuf *target_sembuf;
2565 int i;
2567 target_sembuf = lock_user(VERIFY_READ, target_addr,
2568 nsops*sizeof(struct target_sembuf), 1);
2569 if (!target_sembuf)
2570 return -TARGET_EFAULT;
2572 for(i=0; i<nsops; i++) {
2573 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2574 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2575 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2578 unlock_user(target_sembuf, target_addr, 0);
2580 return 0;
2583 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2585 struct sembuf sops[nsops];
2587 if (target_to_host_sembuf(sops, ptr, nsops))
2588 return -TARGET_EFAULT;
2590 return semop(semid, sops, nsops);
2593 struct target_msqid_ds
2595 struct target_ipc_perm msg_perm;
2596 abi_ulong msg_stime;
2597 #if TARGET_ABI_BITS == 32
2598 abi_ulong __unused1;
2599 #endif
2600 abi_ulong msg_rtime;
2601 #if TARGET_ABI_BITS == 32
2602 abi_ulong __unused2;
2603 #endif
2604 abi_ulong msg_ctime;
2605 #if TARGET_ABI_BITS == 32
2606 abi_ulong __unused3;
2607 #endif
2608 abi_ulong __msg_cbytes;
2609 abi_ulong msg_qnum;
2610 abi_ulong msg_qbytes;
2611 abi_ulong msg_lspid;
2612 abi_ulong msg_lrpid;
2613 abi_ulong __unused4;
2614 abi_ulong __unused5;
2617 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2618 abi_ulong target_addr)
2620 struct target_msqid_ds *target_md;
2622 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2623 return -TARGET_EFAULT;
2624 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2625 return -TARGET_EFAULT;
2626 host_md->msg_stime = tswapl(target_md->msg_stime);
2627 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2628 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2629 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2630 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2631 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2632 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2633 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2634 unlock_user_struct(target_md, target_addr, 0);
2635 return 0;
2638 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2639 struct msqid_ds *host_md)
2641 struct target_msqid_ds *target_md;
2643 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2644 return -TARGET_EFAULT;
2645 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2646 return -TARGET_EFAULT;
2647 target_md->msg_stime = tswapl(host_md->msg_stime);
2648 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2649 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2650 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2651 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2652 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2653 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2654 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2655 unlock_user_struct(target_md, target_addr, 1);
2656 return 0;
2659 struct target_msginfo {
2660 int msgpool;
2661 int msgmap;
2662 int msgmax;
2663 int msgmnb;
2664 int msgmni;
2665 int msgssz;
2666 int msgtql;
2667 unsigned short int msgseg;
2670 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2671 struct msginfo *host_msginfo)
2673 struct target_msginfo *target_msginfo;
2674 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2675 return -TARGET_EFAULT;
2676 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2677 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2678 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2679 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2680 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2681 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2682 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2683 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2684 unlock_user_struct(target_msginfo, target_addr, 1);
2685 return 0;
2688 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2690 struct msqid_ds dsarg;
2691 struct msginfo msginfo;
2692 abi_long ret = -TARGET_EINVAL;
2694 cmd &= 0xff;
2696 switch (cmd) {
2697 case IPC_STAT:
2698 case IPC_SET:
2699 case MSG_STAT:
2700 if (target_to_host_msqid_ds(&dsarg,ptr))
2701 return -TARGET_EFAULT;
2702 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2703 if (host_to_target_msqid_ds(ptr,&dsarg))
2704 return -TARGET_EFAULT;
2705 break;
2706 case IPC_RMID:
2707 ret = get_errno(msgctl(msgid, cmd, NULL));
2708 break;
2709 case IPC_INFO:
2710 case MSG_INFO:
2711 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2712 if (host_to_target_msginfo(ptr, &msginfo))
2713 return -TARGET_EFAULT;
2714 break;
2717 return ret;
2720 struct target_msgbuf {
2721 abi_long mtype;
2722 char mtext[1];
2725 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2726 unsigned int msgsz, int msgflg)
2728 struct target_msgbuf *target_mb;
2729 struct msgbuf *host_mb;
2730 abi_long ret = 0;
2732 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2733 return -TARGET_EFAULT;
2734 host_mb = malloc(msgsz+sizeof(long));
2735 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2736 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2737 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2738 free(host_mb);
2739 unlock_user_struct(target_mb, msgp, 0);
2741 return ret;
2744 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2745 unsigned int msgsz, abi_long msgtyp,
2746 int msgflg)
2748 struct target_msgbuf *target_mb;
2749 char *target_mtext;
2750 struct msgbuf *host_mb;
2751 abi_long ret = 0;
2753 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2754 return -TARGET_EFAULT;
2756 host_mb = malloc(msgsz+sizeof(long));
2757 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2759 if (ret > 0) {
2760 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2761 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2762 if (!target_mtext) {
2763 ret = -TARGET_EFAULT;
2764 goto end;
2766 memcpy(target_mb->mtext, host_mb->mtext, ret);
2767 unlock_user(target_mtext, target_mtext_addr, ret);
2770 target_mb->mtype = tswapl(host_mb->mtype);
2771 free(host_mb);
2773 end:
2774 if (target_mb)
2775 unlock_user_struct(target_mb, msgp, 1);
2776 return ret;
2779 struct target_shmid_ds
2781 struct target_ipc_perm shm_perm;
2782 abi_ulong shm_segsz;
2783 abi_ulong shm_atime;
2784 #if TARGET_ABI_BITS == 32
2785 abi_ulong __unused1;
2786 #endif
2787 abi_ulong shm_dtime;
2788 #if TARGET_ABI_BITS == 32
2789 abi_ulong __unused2;
2790 #endif
2791 abi_ulong shm_ctime;
2792 #if TARGET_ABI_BITS == 32
2793 abi_ulong __unused3;
2794 #endif
2795 int shm_cpid;
2796 int shm_lpid;
2797 abi_ulong shm_nattch;
2798 unsigned long int __unused4;
2799 unsigned long int __unused5;
2802 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2803 abi_ulong target_addr)
2805 struct target_shmid_ds *target_sd;
2807 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2808 return -TARGET_EFAULT;
2809 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2810 return -TARGET_EFAULT;
2811 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2812 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2813 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2814 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2815 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2816 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2817 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2818 unlock_user_struct(target_sd, target_addr, 0);
2819 return 0;
2822 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2823 struct shmid_ds *host_sd)
2825 struct target_shmid_ds *target_sd;
2827 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2828 return -TARGET_EFAULT;
2829 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2830 return -TARGET_EFAULT;
2831 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2832 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2833 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2834 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2835 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2836 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2837 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2838 unlock_user_struct(target_sd, target_addr, 1);
2839 return 0;
2842 struct target_shminfo {
2843 abi_ulong shmmax;
2844 abi_ulong shmmin;
2845 abi_ulong shmmni;
2846 abi_ulong shmseg;
2847 abi_ulong shmall;
2850 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2851 struct shminfo *host_shminfo)
2853 struct target_shminfo *target_shminfo;
2854 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2855 return -TARGET_EFAULT;
2856 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2857 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2858 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2859 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2860 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2861 unlock_user_struct(target_shminfo, target_addr, 1);
2862 return 0;
2865 struct target_shm_info {
2866 int used_ids;
2867 abi_ulong shm_tot;
2868 abi_ulong shm_rss;
2869 abi_ulong shm_swp;
2870 abi_ulong swap_attempts;
2871 abi_ulong swap_successes;
2874 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2875 struct shm_info *host_shm_info)
2877 struct target_shm_info *target_shm_info;
2878 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2879 return -TARGET_EFAULT;
2880 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2881 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2882 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2883 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2884 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2885 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2886 unlock_user_struct(target_shm_info, target_addr, 1);
2887 return 0;
2890 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2892 struct shmid_ds dsarg;
2893 struct shminfo shminfo;
2894 struct shm_info shm_info;
2895 abi_long ret = -TARGET_EINVAL;
2897 cmd &= 0xff;
2899 switch(cmd) {
2900 case IPC_STAT:
2901 case IPC_SET:
2902 case SHM_STAT:
2903 if (target_to_host_shmid_ds(&dsarg, buf))
2904 return -TARGET_EFAULT;
2905 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2906 if (host_to_target_shmid_ds(buf, &dsarg))
2907 return -TARGET_EFAULT;
2908 break;
2909 case IPC_INFO:
2910 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2911 if (host_to_target_shminfo(buf, &shminfo))
2912 return -TARGET_EFAULT;
2913 break;
2914 case SHM_INFO:
2915 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2916 if (host_to_target_shm_info(buf, &shm_info))
2917 return -TARGET_EFAULT;
2918 break;
2919 case IPC_RMID:
2920 case SHM_LOCK:
2921 case SHM_UNLOCK:
2922 ret = get_errno(shmctl(shmid, cmd, NULL));
2923 break;
2926 return ret;
2929 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2931 abi_long raddr;
2932 void *host_raddr;
2933 struct shmid_ds shm_info;
2934 int i,ret;
2936 /* find out the length of the shared memory segment */
2937 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2938 if (is_error(ret)) {
2939 /* can't get length, bail out */
2940 return ret;
2943 mmap_lock();
2945 if (shmaddr)
2946 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2947 else {
2948 abi_ulong mmap_start;
2950 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2952 if (mmap_start == -1) {
2953 errno = ENOMEM;
2954 host_raddr = (void *)-1;
2955 } else
2956 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2959 if (host_raddr == (void *)-1) {
2960 mmap_unlock();
2961 return get_errno((long)host_raddr);
2963 raddr=h2g((unsigned long)host_raddr);
2965 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2966 PAGE_VALID | PAGE_READ |
2967 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2969 for (i = 0; i < N_SHM_REGIONS; i++) {
2970 if (shm_regions[i].start == 0) {
2971 shm_regions[i].start = raddr;
2972 shm_regions[i].size = shm_info.shm_segsz;
2973 break;
2977 mmap_unlock();
2978 return raddr;
2982 static inline abi_long do_shmdt(abi_ulong shmaddr)
2984 int i;
2986 for (i = 0; i < N_SHM_REGIONS; ++i) {
2987 if (shm_regions[i].start == shmaddr) {
2988 shm_regions[i].start = 0;
2989 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2990 break;
2994 return get_errno(shmdt(g2h(shmaddr)));
2997 #ifdef TARGET_NR_ipc
2998 /* ??? This only works with linear mappings. */
2999 /* do_ipc() must return target values and target errnos. */
3000 static abi_long do_ipc(unsigned int call, int first,
3001 int second, int third,
3002 abi_long ptr, abi_long fifth)
3004 int version;
3005 abi_long ret = 0;
3007 version = call >> 16;
3008 call &= 0xffff;
3010 switch (call) {
3011 case IPCOP_semop:
3012 ret = do_semop(first, ptr, second);
3013 break;
3015 case IPCOP_semget:
3016 ret = get_errno(semget(first, second, third));
3017 break;
3019 case IPCOP_semctl:
3020 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3021 break;
3023 case IPCOP_msgget:
3024 ret = get_errno(msgget(first, second));
3025 break;
3027 case IPCOP_msgsnd:
3028 ret = do_msgsnd(first, ptr, second, third);
3029 break;
3031 case IPCOP_msgctl:
3032 ret = do_msgctl(first, second, ptr);
3033 break;
3035 case IPCOP_msgrcv:
3036 switch (version) {
3037 case 0:
3039 struct target_ipc_kludge {
3040 abi_long msgp;
3041 abi_long msgtyp;
3042 } *tmp;
3044 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3045 ret = -TARGET_EFAULT;
3046 break;
3049 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3051 unlock_user_struct(tmp, ptr, 0);
3052 break;
3054 default:
3055 ret = do_msgrcv(first, ptr, second, fifth, third);
3057 break;
3059 case IPCOP_shmat:
3060 switch (version) {
3061 default:
3063 abi_ulong raddr;
3064 raddr = do_shmat(first, ptr, second);
3065 if (is_error(raddr))
3066 return get_errno(raddr);
3067 if (put_user_ual(raddr, third))
3068 return -TARGET_EFAULT;
3069 break;
3071 case 1:
3072 ret = -TARGET_EINVAL;
3073 break;
3075 break;
3076 case IPCOP_shmdt:
3077 ret = do_shmdt(ptr);
3078 break;
3080 case IPCOP_shmget:
3081 /* IPC_* flag values are the same on all linux platforms */
3082 ret = get_errno(shmget(first, second, third));
3083 break;
3085 /* IPC_* and SHM_* command values are the same on all linux platforms */
3086 case IPCOP_shmctl:
3087 ret = do_shmctl(first, second, third);
3088 break;
3089 default:
3090 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3091 ret = -TARGET_ENOSYS;
3092 break;
3094 return ret;
3096 #endif
3098 /* kernel structure types definitions */
3100 #define STRUCT(name, ...) STRUCT_ ## name,
3101 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3102 enum {
3103 #include "syscall_types.h"
3105 #undef STRUCT
3106 #undef STRUCT_SPECIAL
3108 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3109 #define STRUCT_SPECIAL(name)
3110 #include "syscall_types.h"
3111 #undef STRUCT
3112 #undef STRUCT_SPECIAL
3114 typedef struct IOCTLEntry IOCTLEntry;
3116 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3117 int fd, abi_long cmd, abi_long arg);
3119 struct IOCTLEntry {
3120 unsigned int target_cmd;
3121 unsigned int host_cmd;
3122 const char *name;
3123 int access;
3124 do_ioctl_fn *do_ioctl;
3125 const argtype arg_type[5];
3128 #define IOC_R 0x0001
3129 #define IOC_W 0x0002
3130 #define IOC_RW (IOC_R | IOC_W)
3132 #define MAX_STRUCT_SIZE 4096
3134 #ifdef CONFIG_FIEMAP
3135 /* So fiemap access checks don't overflow on 32 bit systems.
3136 * This is very slightly smaller than the limit imposed by
3137 * the underlying kernel.
3139 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3140 / sizeof(struct fiemap_extent))
3142 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3143 int fd, abi_long cmd, abi_long arg)
3145 /* The parameter for this ioctl is a struct fiemap followed
3146 * by an array of struct fiemap_extent whose size is set
3147 * in fiemap->fm_extent_count. The array is filled in by the
3148 * ioctl.
3150 int target_size_in, target_size_out;
3151 struct fiemap *fm;
3152 const argtype *arg_type = ie->arg_type;
3153 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3154 void *argptr, *p;
3155 abi_long ret;
3156 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3157 uint32_t outbufsz;
3158 int free_fm = 0;
3160 assert(arg_type[0] == TYPE_PTR);
3161 assert(ie->access == IOC_RW);
3162 arg_type++;
3163 target_size_in = thunk_type_size(arg_type, 0);
3164 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3165 if (!argptr) {
3166 return -TARGET_EFAULT;
3168 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3169 unlock_user(argptr, arg, 0);
3170 fm = (struct fiemap *)buf_temp;
3171 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3172 return -TARGET_EINVAL;
3175 outbufsz = sizeof (*fm) +
3176 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3178 if (outbufsz > MAX_STRUCT_SIZE) {
3179 /* We can't fit all the extents into the fixed size buffer.
3180 * Allocate one that is large enough and use it instead.
3182 fm = malloc(outbufsz);
3183 if (!fm) {
3184 return -TARGET_ENOMEM;
3186 memcpy(fm, buf_temp, sizeof(struct fiemap));
3187 free_fm = 1;
3189 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3190 if (!is_error(ret)) {
3191 target_size_out = target_size_in;
3192 /* An extent_count of 0 means we were only counting the extents
3193 * so there are no structs to copy
3195 if (fm->fm_extent_count != 0) {
3196 target_size_out += fm->fm_mapped_extents * extent_size;
3198 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3199 if (!argptr) {
3200 ret = -TARGET_EFAULT;
3201 } else {
3202 /* Convert the struct fiemap */
3203 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3204 if (fm->fm_extent_count != 0) {
3205 p = argptr + target_size_in;
3206 /* ...and then all the struct fiemap_extents */
3207 for (i = 0; i < fm->fm_mapped_extents; i++) {
3208 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3209 THUNK_TARGET);
3210 p += extent_size;
3213 unlock_user(argptr, arg, target_size_out);
3216 if (free_fm) {
3217 free(fm);
3219 return ret;
3221 #endif
3223 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3224 int fd, abi_long cmd, abi_long arg)
3226 const argtype *arg_type = ie->arg_type;
3227 int target_size;
3228 void *argptr;
3229 int ret;
3230 struct ifconf *host_ifconf;
3231 uint32_t outbufsz;
3232 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3233 int target_ifreq_size;
3234 int nb_ifreq;
3235 int free_buf = 0;
3236 int i;
3237 int target_ifc_len;
3238 abi_long target_ifc_buf;
3239 int host_ifc_len;
3240 char *host_ifc_buf;
3242 assert(arg_type[0] == TYPE_PTR);
3243 assert(ie->access == IOC_RW);
3245 arg_type++;
3246 target_size = thunk_type_size(arg_type, 0);
3248 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3249 if (!argptr)
3250 return -TARGET_EFAULT;
3251 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3252 unlock_user(argptr, arg, 0);
3254 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3255 target_ifc_len = host_ifconf->ifc_len;
3256 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3258 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3259 nb_ifreq = target_ifc_len / target_ifreq_size;
3260 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3262 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3263 if (outbufsz > MAX_STRUCT_SIZE) {
3264 /* We can't fit all the extents into the fixed size buffer.
3265 * Allocate one that is large enough and use it instead.
3267 host_ifconf = malloc(outbufsz);
3268 if (!host_ifconf) {
3269 return -TARGET_ENOMEM;
3271 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3272 free_buf = 1;
3274 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3276 host_ifconf->ifc_len = host_ifc_len;
3277 host_ifconf->ifc_buf = host_ifc_buf;
3279 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3280 if (!is_error(ret)) {
3281 /* convert host ifc_len to target ifc_len */
3283 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3284 target_ifc_len = nb_ifreq * target_ifreq_size;
3285 host_ifconf->ifc_len = target_ifc_len;
3287 /* restore target ifc_buf */
3289 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3291 /* copy struct ifconf to target user */
3293 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3294 if (!argptr)
3295 return -TARGET_EFAULT;
3296 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3297 unlock_user(argptr, arg, target_size);
3299 /* copy ifreq[] to target user */
3301 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3302 for (i = 0; i < nb_ifreq ; i++) {
3303 thunk_convert(argptr + i * target_ifreq_size,
3304 host_ifc_buf + i * sizeof(struct ifreq),
3305 ifreq_arg_type, THUNK_TARGET);
3307 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3310 if (free_buf) {
3311 free(host_ifconf);
3314 return ret;
3317 static IOCTLEntry ioctl_entries[] = {
3318 #define IOCTL(cmd, access, ...) \
3319 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3320 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3321 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3322 #include "ioctls.h"
3323 { 0, 0, },
3326 /* ??? Implement proper locking for ioctls. */
3327 /* do_ioctl() Must return target values and target errnos. */
3328 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3330 const IOCTLEntry *ie;
3331 const argtype *arg_type;
3332 abi_long ret;
3333 uint8_t buf_temp[MAX_STRUCT_SIZE];
3334 int target_size;
3335 void *argptr;
3337 ie = ioctl_entries;
3338 for(;;) {
3339 if (ie->target_cmd == 0) {
3340 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3341 return -TARGET_ENOSYS;
3343 if (ie->target_cmd == cmd)
3344 break;
3345 ie++;
3347 arg_type = ie->arg_type;
3348 #if defined(DEBUG)
3349 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3350 #endif
3351 if (ie->do_ioctl) {
3352 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3355 switch(arg_type[0]) {
3356 case TYPE_NULL:
3357 /* no argument */
3358 ret = get_errno(ioctl(fd, ie->host_cmd));
3359 break;
3360 case TYPE_PTRVOID:
3361 case TYPE_INT:
3362 /* int argment */
3363 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3364 break;
3365 case TYPE_PTR:
3366 arg_type++;
3367 target_size = thunk_type_size(arg_type, 0);
3368 switch(ie->access) {
3369 case IOC_R:
3370 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3371 if (!is_error(ret)) {
3372 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3373 if (!argptr)
3374 return -TARGET_EFAULT;
3375 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3376 unlock_user(argptr, arg, target_size);
3378 break;
3379 case IOC_W:
3380 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3381 if (!argptr)
3382 return -TARGET_EFAULT;
3383 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3384 unlock_user(argptr, arg, 0);
3385 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3386 break;
3387 default:
3388 case IOC_RW:
3389 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3390 if (!argptr)
3391 return -TARGET_EFAULT;
3392 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3393 unlock_user(argptr, arg, 0);
3394 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3395 if (!is_error(ret)) {
3396 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3397 if (!argptr)
3398 return -TARGET_EFAULT;
3399 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3400 unlock_user(argptr, arg, target_size);
3402 break;
3404 break;
3405 default:
3406 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3407 (long)cmd, arg_type[0]);
3408 ret = -TARGET_ENOSYS;
3409 break;
3411 return ret;
3414 static const bitmask_transtbl iflag_tbl[] = {
3415 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3416 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3417 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3418 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3419 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3420 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3421 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3422 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3423 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3424 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3425 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3426 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3427 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3428 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3429 { 0, 0, 0, 0 }
3432 static const bitmask_transtbl oflag_tbl[] = {
3433 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3434 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3435 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3436 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3437 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3438 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3439 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3440 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3441 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3442 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3443 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3444 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3445 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3446 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3447 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3448 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3449 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3450 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3451 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3452 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3453 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3454 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3455 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3456 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3457 { 0, 0, 0, 0 }
3460 static const bitmask_transtbl cflag_tbl[] = {
3461 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3462 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3463 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3464 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3465 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3466 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3467 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3468 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3469 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3470 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3471 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3472 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3473 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3474 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3475 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3476 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3477 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3478 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3479 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3480 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3481 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3482 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3483 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3484 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3485 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3486 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3487 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3488 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3489 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3490 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3491 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3492 { 0, 0, 0, 0 }
3495 static const bitmask_transtbl lflag_tbl[] = {
3496 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3497 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3498 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3499 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3500 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3501 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3502 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3503 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3504 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3505 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3506 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3507 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3508 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3509 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3510 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3511 { 0, 0, 0, 0 }
3514 static void target_to_host_termios (void *dst, const void *src)
3516 struct host_termios *host = dst;
3517 const struct target_termios *target = src;
3519 host->c_iflag =
3520 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3521 host->c_oflag =
3522 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3523 host->c_cflag =
3524 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3525 host->c_lflag =
3526 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3527 host->c_line = target->c_line;
3529 memset(host->c_cc, 0, sizeof(host->c_cc));
3530 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3531 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3532 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3533 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3534 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3535 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3536 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3537 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3538 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3539 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3540 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3541 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3542 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3543 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3544 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3545 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3546 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3549 static void host_to_target_termios (void *dst, const void *src)
3551 struct target_termios *target = dst;
3552 const struct host_termios *host = src;
3554 target->c_iflag =
3555 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3556 target->c_oflag =
3557 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3558 target->c_cflag =
3559 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3560 target->c_lflag =
3561 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3562 target->c_line = host->c_line;
3564 memset(target->c_cc, 0, sizeof(target->c_cc));
3565 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3566 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3567 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3568 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3569 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3570 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3571 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3572 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3573 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3574 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3575 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3576 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3577 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3578 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3579 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3580 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3581 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3584 static const StructEntry struct_termios_def = {
3585 .convert = { host_to_target_termios, target_to_host_termios },
3586 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3587 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3590 static bitmask_transtbl mmap_flags_tbl[] = {
3591 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3592 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3593 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3594 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3595 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3596 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3597 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3598 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3599 { 0, 0, 0, 0 }
3602 #if defined(TARGET_I386)
3604 /* NOTE: there is really one LDT for all the threads */
3605 static uint8_t *ldt_table;
3607 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3609 int size;
3610 void *p;
3612 if (!ldt_table)
3613 return 0;
3614 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3615 if (size > bytecount)
3616 size = bytecount;
3617 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3618 if (!p)
3619 return -TARGET_EFAULT;
3620 /* ??? Should this by byteswapped? */
3621 memcpy(p, ldt_table, size);
3622 unlock_user(p, ptr, size);
3623 return size;
3626 /* XXX: add locking support */
3627 static abi_long write_ldt(CPUX86State *env,
3628 abi_ulong ptr, unsigned long bytecount, int oldmode)
3630 struct target_modify_ldt_ldt_s ldt_info;
3631 struct target_modify_ldt_ldt_s *target_ldt_info;
3632 int seg_32bit, contents, read_exec_only, limit_in_pages;
3633 int seg_not_present, useable, lm;
3634 uint32_t *lp, entry_1, entry_2;
3636 if (bytecount != sizeof(ldt_info))
3637 return -TARGET_EINVAL;
3638 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3639 return -TARGET_EFAULT;
3640 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3641 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3642 ldt_info.limit = tswap32(target_ldt_info->limit);
3643 ldt_info.flags = tswap32(target_ldt_info->flags);
3644 unlock_user_struct(target_ldt_info, ptr, 0);
3646 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3647 return -TARGET_EINVAL;
3648 seg_32bit = ldt_info.flags & 1;
3649 contents = (ldt_info.flags >> 1) & 3;
3650 read_exec_only = (ldt_info.flags >> 3) & 1;
3651 limit_in_pages = (ldt_info.flags >> 4) & 1;
3652 seg_not_present = (ldt_info.flags >> 5) & 1;
3653 useable = (ldt_info.flags >> 6) & 1;
3654 #ifdef TARGET_ABI32
3655 lm = 0;
3656 #else
3657 lm = (ldt_info.flags >> 7) & 1;
3658 #endif
3659 if (contents == 3) {
3660 if (oldmode)
3661 return -TARGET_EINVAL;
3662 if (seg_not_present == 0)
3663 return -TARGET_EINVAL;
3665 /* allocate the LDT */
3666 if (!ldt_table) {
3667 env->ldt.base = target_mmap(0,
3668 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3669 PROT_READ|PROT_WRITE,
3670 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3671 if (env->ldt.base == -1)
3672 return -TARGET_ENOMEM;
3673 memset(g2h(env->ldt.base), 0,
3674 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3675 env->ldt.limit = 0xffff;
3676 ldt_table = g2h(env->ldt.base);
3679 /* NOTE: same code as Linux kernel */
3680 /* Allow LDTs to be cleared by the user. */
3681 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3682 if (oldmode ||
3683 (contents == 0 &&
3684 read_exec_only == 1 &&
3685 seg_32bit == 0 &&
3686 limit_in_pages == 0 &&
3687 seg_not_present == 1 &&
3688 useable == 0 )) {
3689 entry_1 = 0;
3690 entry_2 = 0;
3691 goto install;
3695 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3696 (ldt_info.limit & 0x0ffff);
3697 entry_2 = (ldt_info.base_addr & 0xff000000) |
3698 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3699 (ldt_info.limit & 0xf0000) |
3700 ((read_exec_only ^ 1) << 9) |
3701 (contents << 10) |
3702 ((seg_not_present ^ 1) << 15) |
3703 (seg_32bit << 22) |
3704 (limit_in_pages << 23) |
3705 (lm << 21) |
3706 0x7000;
3707 if (!oldmode)
3708 entry_2 |= (useable << 20);
3710 /* Install the new entry ... */
3711 install:
3712 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3713 lp[0] = tswap32(entry_1);
3714 lp[1] = tswap32(entry_2);
3715 return 0;
3718 /* specific and weird i386 syscalls */
3719 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3720 unsigned long bytecount)
3722 abi_long ret;
3724 switch (func) {
3725 case 0:
3726 ret = read_ldt(ptr, bytecount);
3727 break;
3728 case 1:
3729 ret = write_ldt(env, ptr, bytecount, 1);
3730 break;
3731 case 0x11:
3732 ret = write_ldt(env, ptr, bytecount, 0);
3733 break;
3734 default:
3735 ret = -TARGET_ENOSYS;
3736 break;
3738 return ret;
3741 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3742 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3744 uint64_t *gdt_table = g2h(env->gdt.base);
3745 struct target_modify_ldt_ldt_s ldt_info;
3746 struct target_modify_ldt_ldt_s *target_ldt_info;
3747 int seg_32bit, contents, read_exec_only, limit_in_pages;
3748 int seg_not_present, useable, lm;
3749 uint32_t *lp, entry_1, entry_2;
3750 int i;
3752 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3753 if (!target_ldt_info)
3754 return -TARGET_EFAULT;
3755 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3756 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3757 ldt_info.limit = tswap32(target_ldt_info->limit);
3758 ldt_info.flags = tswap32(target_ldt_info->flags);
3759 if (ldt_info.entry_number == -1) {
3760 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3761 if (gdt_table[i] == 0) {
3762 ldt_info.entry_number = i;
3763 target_ldt_info->entry_number = tswap32(i);
3764 break;
3768 unlock_user_struct(target_ldt_info, ptr, 1);
3770 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3771 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3772 return -TARGET_EINVAL;
3773 seg_32bit = ldt_info.flags & 1;
3774 contents = (ldt_info.flags >> 1) & 3;
3775 read_exec_only = (ldt_info.flags >> 3) & 1;
3776 limit_in_pages = (ldt_info.flags >> 4) & 1;
3777 seg_not_present = (ldt_info.flags >> 5) & 1;
3778 useable = (ldt_info.flags >> 6) & 1;
3779 #ifdef TARGET_ABI32
3780 lm = 0;
3781 #else
3782 lm = (ldt_info.flags >> 7) & 1;
3783 #endif
3785 if (contents == 3) {
3786 if (seg_not_present == 0)
3787 return -TARGET_EINVAL;
3790 /* NOTE: same code as Linux kernel */
3791 /* Allow LDTs to be cleared by the user. */
3792 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3793 if ((contents == 0 &&
3794 read_exec_only == 1 &&
3795 seg_32bit == 0 &&
3796 limit_in_pages == 0 &&
3797 seg_not_present == 1 &&
3798 useable == 0 )) {
3799 entry_1 = 0;
3800 entry_2 = 0;
3801 goto install;
3805 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3806 (ldt_info.limit & 0x0ffff);
3807 entry_2 = (ldt_info.base_addr & 0xff000000) |
3808 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3809 (ldt_info.limit & 0xf0000) |
3810 ((read_exec_only ^ 1) << 9) |
3811 (contents << 10) |
3812 ((seg_not_present ^ 1) << 15) |
3813 (seg_32bit << 22) |
3814 (limit_in_pages << 23) |
3815 (useable << 20) |
3816 (lm << 21) |
3817 0x7000;
3819 /* Install the new entry ... */
3820 install:
3821 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3822 lp[0] = tswap32(entry_1);
3823 lp[1] = tswap32(entry_2);
3824 return 0;
3827 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3829 struct target_modify_ldt_ldt_s *target_ldt_info;
3830 uint64_t *gdt_table = g2h(env->gdt.base);
3831 uint32_t base_addr, limit, flags;
3832 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3833 int seg_not_present, useable, lm;
3834 uint32_t *lp, entry_1, entry_2;
3836 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3837 if (!target_ldt_info)
3838 return -TARGET_EFAULT;
3839 idx = tswap32(target_ldt_info->entry_number);
3840 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3841 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3842 unlock_user_struct(target_ldt_info, ptr, 1);
3843 return -TARGET_EINVAL;
3845 lp = (uint32_t *)(gdt_table + idx);
3846 entry_1 = tswap32(lp[0]);
3847 entry_2 = tswap32(lp[1]);
3849 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3850 contents = (entry_2 >> 10) & 3;
3851 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3852 seg_32bit = (entry_2 >> 22) & 1;
3853 limit_in_pages = (entry_2 >> 23) & 1;
3854 useable = (entry_2 >> 20) & 1;
3855 #ifdef TARGET_ABI32
3856 lm = 0;
3857 #else
3858 lm = (entry_2 >> 21) & 1;
3859 #endif
3860 flags = (seg_32bit << 0) | (contents << 1) |
3861 (read_exec_only << 3) | (limit_in_pages << 4) |
3862 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3863 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3864 base_addr = (entry_1 >> 16) |
3865 (entry_2 & 0xff000000) |
3866 ((entry_2 & 0xff) << 16);
3867 target_ldt_info->base_addr = tswapl(base_addr);
3868 target_ldt_info->limit = tswap32(limit);
3869 target_ldt_info->flags = tswap32(flags);
3870 unlock_user_struct(target_ldt_info, ptr, 1);
3871 return 0;
3873 #endif /* TARGET_I386 && TARGET_ABI32 */
3875 #ifndef TARGET_ABI32
3876 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3878 abi_long ret = 0;
3879 abi_ulong val;
3880 int idx;
3882 switch(code) {
3883 case TARGET_ARCH_SET_GS:
3884 case TARGET_ARCH_SET_FS:
3885 if (code == TARGET_ARCH_SET_GS)
3886 idx = R_GS;
3887 else
3888 idx = R_FS;
3889 cpu_x86_load_seg(env, idx, 0);
3890 env->segs[idx].base = addr;
3891 break;
3892 case TARGET_ARCH_GET_GS:
3893 case TARGET_ARCH_GET_FS:
3894 if (code == TARGET_ARCH_GET_GS)
3895 idx = R_GS;
3896 else
3897 idx = R_FS;
3898 val = env->segs[idx].base;
3899 if (put_user(val, addr, abi_ulong))
3900 ret = -TARGET_EFAULT;
3901 break;
3902 default:
3903 ret = -TARGET_EINVAL;
3904 break;
3906 return ret;
3908 #endif
3910 #endif /* defined(TARGET_I386) */
3912 #define NEW_STACK_SIZE 0x40000
3914 #if defined(CONFIG_USE_NPTL)
3916 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3917 typedef struct {
3918 CPUState *env;
3919 pthread_mutex_t mutex;
3920 pthread_cond_t cond;
3921 pthread_t thread;
3922 uint32_t tid;
3923 abi_ulong child_tidptr;
3924 abi_ulong parent_tidptr;
3925 sigset_t sigmask;
3926 } new_thread_info;
3928 static void *clone_func(void *arg)
3930 new_thread_info *info = arg;
3931 CPUState *env;
3932 TaskState *ts;
3934 env = info->env;
3935 thread_env = env;
3936 ts = (TaskState *)thread_env->opaque;
3937 info->tid = gettid();
3938 env->host_tid = info->tid;
3939 task_settid(ts);
3940 if (info->child_tidptr)
3941 put_user_u32(info->tid, info->child_tidptr);
3942 if (info->parent_tidptr)
3943 put_user_u32(info->tid, info->parent_tidptr);
3944 /* Enable signals. */
3945 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3946 /* Signal to the parent that we're ready. */
3947 pthread_mutex_lock(&info->mutex);
3948 pthread_cond_broadcast(&info->cond);
3949 pthread_mutex_unlock(&info->mutex);
3950 /* Wait until the parent has finshed initializing the tls state. */
3951 pthread_mutex_lock(&clone_lock);
3952 pthread_mutex_unlock(&clone_lock);
3953 cpu_loop(env);
3954 /* never exits */
3955 return NULL;
3957 #else
3959 static int clone_func(void *arg)
3961 CPUState *env = arg;
3962 cpu_loop(env);
3963 /* never exits */
3964 return 0;
3966 #endif
3968 /* do_fork() Must return host values and target errnos (unlike most
3969 do_*() functions). */
3970 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3971 abi_ulong parent_tidptr, target_ulong newtls,
3972 abi_ulong child_tidptr)
3974 int ret;
3975 TaskState *ts;
3976 CPUState *new_env;
3977 #if defined(CONFIG_USE_NPTL)
3978 unsigned int nptl_flags;
3979 sigset_t sigmask;
3980 #else
3981 uint8_t *new_stack;
3982 #endif
3984 /* Emulate vfork() with fork() */
3985 if (flags & CLONE_VFORK)
3986 flags &= ~(CLONE_VFORK | CLONE_VM);
3988 if (flags & CLONE_VM) {
3989 TaskState *parent_ts = (TaskState *)env->opaque;
3990 #if defined(CONFIG_USE_NPTL)
3991 new_thread_info info;
3992 pthread_attr_t attr;
3993 #endif
3994 ts = qemu_mallocz(sizeof(TaskState));
3995 init_task_state(ts);
3996 /* we create a new CPU instance. */
3997 new_env = cpu_copy(env);
3998 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3999 cpu_reset(new_env);
4000 #endif
4001 /* Init regs that differ from the parent. */
4002 cpu_clone_regs(new_env, newsp);
4003 new_env->opaque = ts;
4004 ts->bprm = parent_ts->bprm;
4005 ts->info = parent_ts->info;
4006 #if defined(CONFIG_USE_NPTL)
4007 nptl_flags = flags;
4008 flags &= ~CLONE_NPTL_FLAGS2;
4010 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4011 ts->child_tidptr = child_tidptr;
4014 if (nptl_flags & CLONE_SETTLS)
4015 cpu_set_tls (new_env, newtls);
4017 /* Grab a mutex so that thread setup appears atomic. */
4018 pthread_mutex_lock(&clone_lock);
4020 memset(&info, 0, sizeof(info));
4021 pthread_mutex_init(&info.mutex, NULL);
4022 pthread_mutex_lock(&info.mutex);
4023 pthread_cond_init(&info.cond, NULL);
4024 info.env = new_env;
4025 if (nptl_flags & CLONE_CHILD_SETTID)
4026 info.child_tidptr = child_tidptr;
4027 if (nptl_flags & CLONE_PARENT_SETTID)
4028 info.parent_tidptr = parent_tidptr;
4030 ret = pthread_attr_init(&attr);
4031 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4032 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4033 /* It is not safe to deliver signals until the child has finished
4034 initializing, so temporarily block all signals. */
4035 sigfillset(&sigmask);
4036 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4038 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4039 /* TODO: Free new CPU state if thread creation failed. */
4041 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4042 pthread_attr_destroy(&attr);
4043 if (ret == 0) {
4044 /* Wait for the child to initialize. */
4045 pthread_cond_wait(&info.cond, &info.mutex);
4046 ret = info.tid;
4047 if (flags & CLONE_PARENT_SETTID)
4048 put_user_u32(ret, parent_tidptr);
4049 } else {
4050 ret = -1;
4052 pthread_mutex_unlock(&info.mutex);
4053 pthread_cond_destroy(&info.cond);
4054 pthread_mutex_destroy(&info.mutex);
4055 pthread_mutex_unlock(&clone_lock);
4056 #else
4057 if (flags & CLONE_NPTL_FLAGS2)
4058 return -EINVAL;
4059 /* This is probably going to die very quickly, but do it anyway. */
4060 new_stack = qemu_mallocz (NEW_STACK_SIZE);
4061 #ifdef __ia64__
4062 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4063 #else
4064 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4065 #endif
4066 #endif
4067 } else {
4068 /* if no CLONE_VM, we consider it is a fork */
4069 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4070 return -EINVAL;
4071 fork_start();
4072 ret = fork();
4073 if (ret == 0) {
4074 /* Child Process. */
4075 cpu_clone_regs(env, newsp);
4076 fork_end(1);
4077 #if defined(CONFIG_USE_NPTL)
4078 /* There is a race condition here. The parent process could
4079 theoretically read the TID in the child process before the child
4080 tid is set. This would require using either ptrace
4081 (not implemented) or having *_tidptr to point at a shared memory
4082 mapping. We can't repeat the spinlock hack used above because
4083 the child process gets its own copy of the lock. */
4084 if (flags & CLONE_CHILD_SETTID)
4085 put_user_u32(gettid(), child_tidptr);
4086 if (flags & CLONE_PARENT_SETTID)
4087 put_user_u32(gettid(), parent_tidptr);
4088 ts = (TaskState *)env->opaque;
4089 if (flags & CLONE_SETTLS)
4090 cpu_set_tls (env, newtls);
4091 if (flags & CLONE_CHILD_CLEARTID)
4092 ts->child_tidptr = child_tidptr;
4093 #endif
4094 } else {
4095 fork_end(0);
4098 return ret;
4101 /* warning : doesn't handle linux specific flags... */
4102 static int target_to_host_fcntl_cmd(int cmd)
4104 switch(cmd) {
4105 case TARGET_F_DUPFD:
4106 case TARGET_F_GETFD:
4107 case TARGET_F_SETFD:
4108 case TARGET_F_GETFL:
4109 case TARGET_F_SETFL:
4110 return cmd;
4111 case TARGET_F_GETLK:
4112 return F_GETLK;
4113 case TARGET_F_SETLK:
4114 return F_SETLK;
4115 case TARGET_F_SETLKW:
4116 return F_SETLKW;
4117 case TARGET_F_GETOWN:
4118 return F_GETOWN;
4119 case TARGET_F_SETOWN:
4120 return F_SETOWN;
4121 case TARGET_F_GETSIG:
4122 return F_GETSIG;
4123 case TARGET_F_SETSIG:
4124 return F_SETSIG;
4125 #if TARGET_ABI_BITS == 32
4126 case TARGET_F_GETLK64:
4127 return F_GETLK64;
4128 case TARGET_F_SETLK64:
4129 return F_SETLK64;
4130 case TARGET_F_SETLKW64:
4131 return F_SETLKW64;
4132 #endif
4133 case TARGET_F_SETLEASE:
4134 return F_SETLEASE;
4135 case TARGET_F_GETLEASE:
4136 return F_GETLEASE;
4137 #ifdef F_DUPFD_CLOEXEC
4138 case TARGET_F_DUPFD_CLOEXEC:
4139 return F_DUPFD_CLOEXEC;
4140 #endif
4141 case TARGET_F_NOTIFY:
4142 return F_NOTIFY;
4143 default:
4144 return -TARGET_EINVAL;
4146 return -TARGET_EINVAL;
4149 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4151 struct flock fl;
4152 struct target_flock *target_fl;
4153 struct flock64 fl64;
4154 struct target_flock64 *target_fl64;
4155 abi_long ret;
4156 int host_cmd = target_to_host_fcntl_cmd(cmd);
4158 if (host_cmd == -TARGET_EINVAL)
4159 return host_cmd;
4161 switch(cmd) {
4162 case TARGET_F_GETLK:
4163 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4164 return -TARGET_EFAULT;
4165 fl.l_type = tswap16(target_fl->l_type);
4166 fl.l_whence = tswap16(target_fl->l_whence);
4167 fl.l_start = tswapl(target_fl->l_start);
4168 fl.l_len = tswapl(target_fl->l_len);
4169 fl.l_pid = tswap32(target_fl->l_pid);
4170 unlock_user_struct(target_fl, arg, 0);
4171 ret = get_errno(fcntl(fd, host_cmd, &fl));
4172 if (ret == 0) {
4173 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4174 return -TARGET_EFAULT;
4175 target_fl->l_type = tswap16(fl.l_type);
4176 target_fl->l_whence = tswap16(fl.l_whence);
4177 target_fl->l_start = tswapl(fl.l_start);
4178 target_fl->l_len = tswapl(fl.l_len);
4179 target_fl->l_pid = tswap32(fl.l_pid);
4180 unlock_user_struct(target_fl, arg, 1);
4182 break;
4184 case TARGET_F_SETLK:
4185 case TARGET_F_SETLKW:
4186 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4187 return -TARGET_EFAULT;
4188 fl.l_type = tswap16(target_fl->l_type);
4189 fl.l_whence = tswap16(target_fl->l_whence);
4190 fl.l_start = tswapl(target_fl->l_start);
4191 fl.l_len = tswapl(target_fl->l_len);
4192 fl.l_pid = tswap32(target_fl->l_pid);
4193 unlock_user_struct(target_fl, arg, 0);
4194 ret = get_errno(fcntl(fd, host_cmd, &fl));
4195 break;
4197 case TARGET_F_GETLK64:
4198 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4199 return -TARGET_EFAULT;
4200 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4201 fl64.l_whence = tswap16(target_fl64->l_whence);
4202 fl64.l_start = tswapl(target_fl64->l_start);
4203 fl64.l_len = tswapl(target_fl64->l_len);
4204 fl64.l_pid = tswap32(target_fl64->l_pid);
4205 unlock_user_struct(target_fl64, arg, 0);
4206 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4207 if (ret == 0) {
4208 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4209 return -TARGET_EFAULT;
4210 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4211 target_fl64->l_whence = tswap16(fl64.l_whence);
4212 target_fl64->l_start = tswapl(fl64.l_start);
4213 target_fl64->l_len = tswapl(fl64.l_len);
4214 target_fl64->l_pid = tswap32(fl64.l_pid);
4215 unlock_user_struct(target_fl64, arg, 1);
4217 break;
4218 case TARGET_F_SETLK64:
4219 case TARGET_F_SETLKW64:
4220 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4221 return -TARGET_EFAULT;
4222 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4223 fl64.l_whence = tswap16(target_fl64->l_whence);
4224 fl64.l_start = tswapl(target_fl64->l_start);
4225 fl64.l_len = tswapl(target_fl64->l_len);
4226 fl64.l_pid = tswap32(target_fl64->l_pid);
4227 unlock_user_struct(target_fl64, arg, 0);
4228 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4229 break;
4231 case TARGET_F_GETFL:
4232 ret = get_errno(fcntl(fd, host_cmd, arg));
4233 if (ret >= 0) {
4234 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4236 break;
4238 case TARGET_F_SETFL:
4239 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4240 break;
4242 case TARGET_F_SETOWN:
4243 case TARGET_F_GETOWN:
4244 case TARGET_F_SETSIG:
4245 case TARGET_F_GETSIG:
4246 case TARGET_F_SETLEASE:
4247 case TARGET_F_GETLEASE:
4248 ret = get_errno(fcntl(fd, host_cmd, arg));
4249 break;
4251 default:
4252 ret = get_errno(fcntl(fd, cmd, arg));
4253 break;
4255 return ret;
4258 #ifdef USE_UID16
4260 static inline int high2lowuid(int uid)
4262 if (uid > 65535)
4263 return 65534;
4264 else
4265 return uid;
4268 static inline int high2lowgid(int gid)
4270 if (gid > 65535)
4271 return 65534;
4272 else
4273 return gid;
4276 static inline int low2highuid(int uid)
4278 if ((int16_t)uid == -1)
4279 return -1;
4280 else
4281 return uid;
4284 static inline int low2highgid(int gid)
4286 if ((int16_t)gid == -1)
4287 return -1;
4288 else
4289 return gid;
4291 static inline int tswapid(int id)
4293 return tswap16(id);
4295 #else /* !USE_UID16 */
4296 static inline int high2lowuid(int uid)
4298 return uid;
4300 static inline int high2lowgid(int gid)
4302 return gid;
4304 static inline int low2highuid(int uid)
4306 return uid;
4308 static inline int low2highgid(int gid)
4310 return gid;
4312 static inline int tswapid(int id)
4314 return tswap32(id);
4316 #endif /* USE_UID16 */
4318 void syscall_init(void)
4320 IOCTLEntry *ie;
4321 const argtype *arg_type;
4322 int size;
4323 int i;
4325 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4326 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4327 #include "syscall_types.h"
4328 #undef STRUCT
4329 #undef STRUCT_SPECIAL
4331 /* we patch the ioctl size if necessary. We rely on the fact that
4332 no ioctl has all the bits at '1' in the size field */
4333 ie = ioctl_entries;
4334 while (ie->target_cmd != 0) {
4335 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4336 TARGET_IOC_SIZEMASK) {
4337 arg_type = ie->arg_type;
4338 if (arg_type[0] != TYPE_PTR) {
4339 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4340 ie->target_cmd);
4341 exit(1);
4343 arg_type++;
4344 size = thunk_type_size(arg_type, 0);
4345 ie->target_cmd = (ie->target_cmd &
4346 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4347 (size << TARGET_IOC_SIZESHIFT);
4350 /* Build target_to_host_errno_table[] table from
4351 * host_to_target_errno_table[]. */
4352 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4353 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4355 /* automatic consistency check if same arch */
4356 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4357 (defined(__x86_64__) && defined(TARGET_X86_64))
4358 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4359 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4360 ie->name, ie->target_cmd, ie->host_cmd);
4362 #endif
4363 ie++;
4367 #if TARGET_ABI_BITS == 32
4368 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4370 #ifdef TARGET_WORDS_BIGENDIAN
4371 return ((uint64_t)word0 << 32) | word1;
4372 #else
4373 return ((uint64_t)word1 << 32) | word0;
4374 #endif
4376 #else /* TARGET_ABI_BITS == 32 */
4377 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4379 return word0;
4381 #endif /* TARGET_ABI_BITS != 32 */
4383 #ifdef TARGET_NR_truncate64
4384 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4385 abi_long arg2,
4386 abi_long arg3,
4387 abi_long arg4)
4389 if (regpairs_aligned(cpu_env)) {
4390 arg2 = arg3;
4391 arg3 = arg4;
4393 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4395 #endif
4397 #ifdef TARGET_NR_ftruncate64
4398 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4399 abi_long arg2,
4400 abi_long arg3,
4401 abi_long arg4)
4403 if (regpairs_aligned(cpu_env)) {
4404 arg2 = arg3;
4405 arg3 = arg4;
4407 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4409 #endif
4411 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4412 abi_ulong target_addr)
4414 struct target_timespec *target_ts;
4416 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4417 return -TARGET_EFAULT;
4418 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4419 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4420 unlock_user_struct(target_ts, target_addr, 0);
4421 return 0;
4424 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4425 struct timespec *host_ts)
4427 struct target_timespec *target_ts;
4429 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4430 return -TARGET_EFAULT;
4431 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4432 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4433 unlock_user_struct(target_ts, target_addr, 1);
4434 return 0;
4437 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4438 static inline abi_long host_to_target_stat64(void *cpu_env,
4439 abi_ulong target_addr,
4440 struct stat *host_st)
4442 #ifdef TARGET_ARM
4443 if (((CPUARMState *)cpu_env)->eabi) {
4444 struct target_eabi_stat64 *target_st;
4446 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4447 return -TARGET_EFAULT;
4448 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4449 __put_user(host_st->st_dev, &target_st->st_dev);
4450 __put_user(host_st->st_ino, &target_st->st_ino);
4451 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4452 __put_user(host_st->st_ino, &target_st->__st_ino);
4453 #endif
4454 __put_user(host_st->st_mode, &target_st->st_mode);
4455 __put_user(host_st->st_nlink, &target_st->st_nlink);
4456 __put_user(host_st->st_uid, &target_st->st_uid);
4457 __put_user(host_st->st_gid, &target_st->st_gid);
4458 __put_user(host_st->st_rdev, &target_st->st_rdev);
4459 __put_user(host_st->st_size, &target_st->st_size);
4460 __put_user(host_st->st_blksize, &target_st->st_blksize);
4461 __put_user(host_st->st_blocks, &target_st->st_blocks);
4462 __put_user(host_st->st_atime, &target_st->target_st_atime);
4463 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4464 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4465 unlock_user_struct(target_st, target_addr, 1);
4466 } else
4467 #endif
4469 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4470 struct target_stat *target_st;
4471 #else
4472 struct target_stat64 *target_st;
4473 #endif
4475 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4476 return -TARGET_EFAULT;
4477 memset(target_st, 0, sizeof(*target_st));
4478 __put_user(host_st->st_dev, &target_st->st_dev);
4479 __put_user(host_st->st_ino, &target_st->st_ino);
4480 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4481 __put_user(host_st->st_ino, &target_st->__st_ino);
4482 #endif
4483 __put_user(host_st->st_mode, &target_st->st_mode);
4484 __put_user(host_st->st_nlink, &target_st->st_nlink);
4485 __put_user(host_st->st_uid, &target_st->st_uid);
4486 __put_user(host_st->st_gid, &target_st->st_gid);
4487 __put_user(host_st->st_rdev, &target_st->st_rdev);
4488 /* XXX: better use of kernel struct */
4489 __put_user(host_st->st_size, &target_st->st_size);
4490 __put_user(host_st->st_blksize, &target_st->st_blksize);
4491 __put_user(host_st->st_blocks, &target_st->st_blocks);
4492 __put_user(host_st->st_atime, &target_st->target_st_atime);
4493 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4494 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4495 unlock_user_struct(target_st, target_addr, 1);
4498 return 0;
4500 #endif
4502 #if defined(CONFIG_USE_NPTL)
4503 /* ??? Using host futex calls even when target atomic operations
4504 are not really atomic probably breaks things. However implementing
4505 futexes locally would make futexes shared between multiple processes
4506 tricky. However they're probably useless because guest atomic
4507 operations won't work either. */
4508 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4509 target_ulong uaddr2, int val3)
4511 struct timespec ts, *pts;
4512 int base_op;
4514 /* ??? We assume FUTEX_* constants are the same on both host
4515 and target. */
4516 #ifdef FUTEX_CMD_MASK
4517 base_op = op & FUTEX_CMD_MASK;
4518 #else
4519 base_op = op;
4520 #endif
4521 switch (base_op) {
4522 case FUTEX_WAIT:
4523 if (timeout) {
4524 pts = &ts;
4525 target_to_host_timespec(pts, timeout);
4526 } else {
4527 pts = NULL;
4529 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4530 pts, NULL, 0));
4531 case FUTEX_WAKE:
4532 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4533 case FUTEX_FD:
4534 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4535 case FUTEX_REQUEUE:
4536 case FUTEX_CMP_REQUEUE:
4537 case FUTEX_WAKE_OP:
4538 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4539 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4540 But the prototype takes a `struct timespec *'; insert casts
4541 to satisfy the compiler. We do not need to tswap TIMEOUT
4542 since it's not compared to guest memory. */
4543 pts = (struct timespec *)(uintptr_t) timeout;
4544 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4545 g2h(uaddr2),
4546 (base_op == FUTEX_CMP_REQUEUE
4547 ? tswap32(val3)
4548 : val3)));
4549 default:
4550 return -TARGET_ENOSYS;
4553 #endif
4555 /* Map host to target signal numbers for the wait family of syscalls.
4556 Assume all other status bits are the same. */
4557 static int host_to_target_waitstatus(int status)
4559 if (WIFSIGNALED(status)) {
4560 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4562 if (WIFSTOPPED(status)) {
4563 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4564 | (status & 0xff);
4566 return status;
4569 int get_osversion(void)
4571 static int osversion;
4572 struct new_utsname buf;
4573 const char *s;
4574 int i, n, tmp;
4575 if (osversion)
4576 return osversion;
4577 if (qemu_uname_release && *qemu_uname_release) {
4578 s = qemu_uname_release;
4579 } else {
4580 if (sys_uname(&buf))
4581 return 0;
4582 s = buf.release;
4584 tmp = 0;
4585 for (i = 0; i < 3; i++) {
4586 n = 0;
4587 while (*s >= '0' && *s <= '9') {
4588 n *= 10;
4589 n += *s - '0';
4590 s++;
4592 tmp = (tmp << 8) + n;
4593 if (*s == '.')
4594 s++;
4596 osversion = tmp;
4597 return osversion;
4600 /* do_syscall() should always have a single exit point at the end so
4601 that actions, such as logging of syscall results, can be performed.
4602 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4603 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4604 abi_long arg2, abi_long arg3, abi_long arg4,
4605 abi_long arg5, abi_long arg6, abi_long arg7,
4606 abi_long arg8)
4608 abi_long ret;
4609 struct stat st;
4610 struct statfs stfs;
4611 void *p;
4613 #ifdef DEBUG
4614 gemu_log("syscall %d", num);
4615 #endif
4616 if(do_strace)
4617 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4619 switch(num) {
4620 case TARGET_NR_exit:
4621 #ifdef CONFIG_USE_NPTL
4622 /* In old applications this may be used to implement _exit(2).
4623 However in threaded applictions it is used for thread termination,
4624 and _exit_group is used for application termination.
4625 Do thread termination if we have more then one thread. */
4626 /* FIXME: This probably breaks if a signal arrives. We should probably
4627 be disabling signals. */
4628 if (first_cpu->next_cpu) {
4629 TaskState *ts;
4630 CPUState **lastp;
4631 CPUState *p;
4633 cpu_list_lock();
4634 lastp = &first_cpu;
4635 p = first_cpu;
4636 while (p && p != (CPUState *)cpu_env) {
4637 lastp = &p->next_cpu;
4638 p = p->next_cpu;
4640 /* If we didn't find the CPU for this thread then something is
4641 horribly wrong. */
4642 if (!p)
4643 abort();
4644 /* Remove the CPU from the list. */
4645 *lastp = p->next_cpu;
4646 cpu_list_unlock();
4647 ts = ((CPUState *)cpu_env)->opaque;
4648 if (ts->child_tidptr) {
4649 put_user_u32(0, ts->child_tidptr);
4650 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4651 NULL, NULL, 0);
4653 thread_env = NULL;
4654 qemu_free(cpu_env);
4655 qemu_free(ts);
4656 pthread_exit(NULL);
4658 #endif
4659 #ifdef TARGET_GPROF
4660 _mcleanup();
4661 #endif
4662 gdb_exit(cpu_env, arg1);
4663 _exit(arg1);
4664 ret = 0; /* avoid warning */
4665 break;
4666 case TARGET_NR_read:
4667 if (arg3 == 0)
4668 ret = 0;
4669 else {
4670 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4671 goto efault;
4672 ret = get_errno(read(arg1, p, arg3));
4673 unlock_user(p, arg2, ret);
4675 break;
4676 case TARGET_NR_write:
4677 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4678 goto efault;
4679 ret = get_errno(write(arg1, p, arg3));
4680 unlock_user(p, arg2, 0);
4681 break;
4682 case TARGET_NR_open:
4683 if (!(p = lock_user_string(arg1)))
4684 goto efault;
4685 ret = get_errno(open(path(p),
4686 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4687 arg3));
4688 unlock_user(p, arg1, 0);
4689 break;
4690 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4691 case TARGET_NR_openat:
4692 if (!(p = lock_user_string(arg2)))
4693 goto efault;
4694 ret = get_errno(sys_openat(arg1,
4695 path(p),
4696 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4697 arg4));
4698 unlock_user(p, arg2, 0);
4699 break;
4700 #endif
4701 case TARGET_NR_close:
4702 ret = get_errno(close(arg1));
4703 break;
4704 case TARGET_NR_brk:
4705 ret = do_brk(arg1);
4706 break;
4707 case TARGET_NR_fork:
4708 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4709 break;
4710 #ifdef TARGET_NR_waitpid
4711 case TARGET_NR_waitpid:
4713 int status;
4714 ret = get_errno(waitpid(arg1, &status, arg3));
4715 if (!is_error(ret) && arg2
4716 && put_user_s32(host_to_target_waitstatus(status), arg2))
4717 goto efault;
4719 break;
4720 #endif
4721 #ifdef TARGET_NR_waitid
4722 case TARGET_NR_waitid:
4724 siginfo_t info;
4725 info.si_pid = 0;
4726 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4727 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4728 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4729 goto efault;
4730 host_to_target_siginfo(p, &info);
4731 unlock_user(p, arg3, sizeof(target_siginfo_t));
4734 break;
4735 #endif
4736 #ifdef TARGET_NR_creat /* not on alpha */
4737 case TARGET_NR_creat:
4738 if (!(p = lock_user_string(arg1)))
4739 goto efault;
4740 ret = get_errno(creat(p, arg2));
4741 unlock_user(p, arg1, 0);
4742 break;
4743 #endif
4744 case TARGET_NR_link:
4746 void * p2;
4747 p = lock_user_string(arg1);
4748 p2 = lock_user_string(arg2);
4749 if (!p || !p2)
4750 ret = -TARGET_EFAULT;
4751 else
4752 ret = get_errno(link(p, p2));
4753 unlock_user(p2, arg2, 0);
4754 unlock_user(p, arg1, 0);
4756 break;
4757 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4758 case TARGET_NR_linkat:
4760 void * p2 = NULL;
4761 if (!arg2 || !arg4)
4762 goto efault;
4763 p = lock_user_string(arg2);
4764 p2 = lock_user_string(arg4);
4765 if (!p || !p2)
4766 ret = -TARGET_EFAULT;
4767 else
4768 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4769 unlock_user(p, arg2, 0);
4770 unlock_user(p2, arg4, 0);
4772 break;
4773 #endif
4774 case TARGET_NR_unlink:
4775 if (!(p = lock_user_string(arg1)))
4776 goto efault;
4777 ret = get_errno(unlink(p));
4778 unlock_user(p, arg1, 0);
4779 break;
4780 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4781 case TARGET_NR_unlinkat:
4782 if (!(p = lock_user_string(arg2)))
4783 goto efault;
4784 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4785 unlock_user(p, arg2, 0);
4786 break;
4787 #endif
4788 case TARGET_NR_execve:
4790 char **argp, **envp;
4791 int argc, envc;
4792 abi_ulong gp;
4793 abi_ulong guest_argp;
4794 abi_ulong guest_envp;
4795 abi_ulong addr;
4796 char **q;
4798 argc = 0;
4799 guest_argp = arg2;
4800 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4801 if (get_user_ual(addr, gp))
4802 goto efault;
4803 if (!addr)
4804 break;
4805 argc++;
4807 envc = 0;
4808 guest_envp = arg3;
4809 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4810 if (get_user_ual(addr, gp))
4811 goto efault;
4812 if (!addr)
4813 break;
4814 envc++;
4817 argp = alloca((argc + 1) * sizeof(void *));
4818 envp = alloca((envc + 1) * sizeof(void *));
4820 for (gp = guest_argp, q = argp; gp;
4821 gp += sizeof(abi_ulong), q++) {
4822 if (get_user_ual(addr, gp))
4823 goto execve_efault;
4824 if (!addr)
4825 break;
4826 if (!(*q = lock_user_string(addr)))
4827 goto execve_efault;
4829 *q = NULL;
4831 for (gp = guest_envp, q = envp; gp;
4832 gp += sizeof(abi_ulong), q++) {
4833 if (get_user_ual(addr, gp))
4834 goto execve_efault;
4835 if (!addr)
4836 break;
4837 if (!(*q = lock_user_string(addr)))
4838 goto execve_efault;
4840 *q = NULL;
4842 if (!(p = lock_user_string(arg1)))
4843 goto execve_efault;
4844 ret = get_errno(execve(p, argp, envp));
4845 unlock_user(p, arg1, 0);
4847 goto execve_end;
4849 execve_efault:
4850 ret = -TARGET_EFAULT;
4852 execve_end:
4853 for (gp = guest_argp, q = argp; *q;
4854 gp += sizeof(abi_ulong), q++) {
4855 if (get_user_ual(addr, gp)
4856 || !addr)
4857 break;
4858 unlock_user(*q, addr, 0);
4860 for (gp = guest_envp, q = envp; *q;
4861 gp += sizeof(abi_ulong), q++) {
4862 if (get_user_ual(addr, gp)
4863 || !addr)
4864 break;
4865 unlock_user(*q, addr, 0);
4868 break;
4869 case TARGET_NR_chdir:
4870 if (!(p = lock_user_string(arg1)))
4871 goto efault;
4872 ret = get_errno(chdir(p));
4873 unlock_user(p, arg1, 0);
4874 break;
4875 #ifdef TARGET_NR_time
4876 case TARGET_NR_time:
4878 time_t host_time;
4879 ret = get_errno(time(&host_time));
4880 if (!is_error(ret)
4881 && arg1
4882 && put_user_sal(host_time, arg1))
4883 goto efault;
4885 break;
4886 #endif
4887 case TARGET_NR_mknod:
4888 if (!(p = lock_user_string(arg1)))
4889 goto efault;
4890 ret = get_errno(mknod(p, arg2, arg3));
4891 unlock_user(p, arg1, 0);
4892 break;
4893 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4894 case TARGET_NR_mknodat:
4895 if (!(p = lock_user_string(arg2)))
4896 goto efault;
4897 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4898 unlock_user(p, arg2, 0);
4899 break;
4900 #endif
4901 case TARGET_NR_chmod:
4902 if (!(p = lock_user_string(arg1)))
4903 goto efault;
4904 ret = get_errno(chmod(p, arg2));
4905 unlock_user(p, arg1, 0);
4906 break;
4907 #ifdef TARGET_NR_break
4908 case TARGET_NR_break:
4909 goto unimplemented;
4910 #endif
4911 #ifdef TARGET_NR_oldstat
4912 case TARGET_NR_oldstat:
4913 goto unimplemented;
4914 #endif
4915 case TARGET_NR_lseek:
4916 ret = get_errno(lseek(arg1, arg2, arg3));
4917 break;
4918 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4919 /* Alpha specific */
4920 case TARGET_NR_getxpid:
4921 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4922 ret = get_errno(getpid());
4923 break;
4924 #endif
4925 #ifdef TARGET_NR_getpid
4926 case TARGET_NR_getpid:
4927 ret = get_errno(getpid());
4928 break;
4929 #endif
4930 case TARGET_NR_mount:
4932 /* need to look at the data field */
4933 void *p2, *p3;
4934 p = lock_user_string(arg1);
4935 p2 = lock_user_string(arg2);
4936 p3 = lock_user_string(arg3);
4937 if (!p || !p2 || !p3)
4938 ret = -TARGET_EFAULT;
4939 else {
4940 /* FIXME - arg5 should be locked, but it isn't clear how to
4941 * do that since it's not guaranteed to be a NULL-terminated
4942 * string.
4944 if ( ! arg5 )
4945 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4946 else
4947 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4949 unlock_user(p, arg1, 0);
4950 unlock_user(p2, arg2, 0);
4951 unlock_user(p3, arg3, 0);
4952 break;
4954 #ifdef TARGET_NR_umount
4955 case TARGET_NR_umount:
4956 if (!(p = lock_user_string(arg1)))
4957 goto efault;
4958 ret = get_errno(umount(p));
4959 unlock_user(p, arg1, 0);
4960 break;
4961 #endif
4962 #ifdef TARGET_NR_stime /* not on alpha */
4963 case TARGET_NR_stime:
4965 time_t host_time;
4966 if (get_user_sal(host_time, arg1))
4967 goto efault;
4968 ret = get_errno(stime(&host_time));
4970 break;
4971 #endif
4972 case TARGET_NR_ptrace:
4973 goto unimplemented;
4974 #ifdef TARGET_NR_alarm /* not on alpha */
4975 case TARGET_NR_alarm:
4976 ret = alarm(arg1);
4977 break;
4978 #endif
4979 #ifdef TARGET_NR_oldfstat
4980 case TARGET_NR_oldfstat:
4981 goto unimplemented;
4982 #endif
4983 #ifdef TARGET_NR_pause /* not on alpha */
4984 case TARGET_NR_pause:
4985 ret = get_errno(pause());
4986 break;
4987 #endif
4988 #ifdef TARGET_NR_utime
4989 case TARGET_NR_utime:
4991 struct utimbuf tbuf, *host_tbuf;
4992 struct target_utimbuf *target_tbuf;
4993 if (arg2) {
4994 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4995 goto efault;
4996 tbuf.actime = tswapl(target_tbuf->actime);
4997 tbuf.modtime = tswapl(target_tbuf->modtime);
4998 unlock_user_struct(target_tbuf, arg2, 0);
4999 host_tbuf = &tbuf;
5000 } else {
5001 host_tbuf = NULL;
5003 if (!(p = lock_user_string(arg1)))
5004 goto efault;
5005 ret = get_errno(utime(p, host_tbuf));
5006 unlock_user(p, arg1, 0);
5008 break;
5009 #endif
5010 case TARGET_NR_utimes:
5012 struct timeval *tvp, tv[2];
5013 if (arg2) {
5014 if (copy_from_user_timeval(&tv[0], arg2)
5015 || copy_from_user_timeval(&tv[1],
5016 arg2 + sizeof(struct target_timeval)))
5017 goto efault;
5018 tvp = tv;
5019 } else {
5020 tvp = NULL;
5022 if (!(p = lock_user_string(arg1)))
5023 goto efault;
5024 ret = get_errno(utimes(p, tvp));
5025 unlock_user(p, arg1, 0);
5027 break;
5028 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5029 case TARGET_NR_futimesat:
5031 struct timeval *tvp, tv[2];
5032 if (arg3) {
5033 if (copy_from_user_timeval(&tv[0], arg3)
5034 || copy_from_user_timeval(&tv[1],
5035 arg3 + sizeof(struct target_timeval)))
5036 goto efault;
5037 tvp = tv;
5038 } else {
5039 tvp = NULL;
5041 if (!(p = lock_user_string(arg2)))
5042 goto efault;
5043 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5044 unlock_user(p, arg2, 0);
5046 break;
5047 #endif
5048 #ifdef TARGET_NR_stty
5049 case TARGET_NR_stty:
5050 goto unimplemented;
5051 #endif
5052 #ifdef TARGET_NR_gtty
5053 case TARGET_NR_gtty:
5054 goto unimplemented;
5055 #endif
5056 case TARGET_NR_access:
5057 if (!(p = lock_user_string(arg1)))
5058 goto efault;
5059 ret = get_errno(access(path(p), arg2));
5060 unlock_user(p, arg1, 0);
5061 break;
5062 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5063 case TARGET_NR_faccessat:
5064 if (!(p = lock_user_string(arg2)))
5065 goto efault;
5066 ret = get_errno(sys_faccessat(arg1, p, arg3));
5067 unlock_user(p, arg2, 0);
5068 break;
5069 #endif
5070 #ifdef TARGET_NR_nice /* not on alpha */
5071 case TARGET_NR_nice:
5072 ret = get_errno(nice(arg1));
5073 break;
5074 #endif
5075 #ifdef TARGET_NR_ftime
5076 case TARGET_NR_ftime:
5077 goto unimplemented;
5078 #endif
5079 case TARGET_NR_sync:
5080 sync();
5081 ret = 0;
5082 break;
5083 case TARGET_NR_kill:
5084 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5085 break;
5086 case TARGET_NR_rename:
5088 void *p2;
5089 p = lock_user_string(arg1);
5090 p2 = lock_user_string(arg2);
5091 if (!p || !p2)
5092 ret = -TARGET_EFAULT;
5093 else
5094 ret = get_errno(rename(p, p2));
5095 unlock_user(p2, arg2, 0);
5096 unlock_user(p, arg1, 0);
5098 break;
5099 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5100 case TARGET_NR_renameat:
5102 void *p2;
5103 p = lock_user_string(arg2);
5104 p2 = lock_user_string(arg4);
5105 if (!p || !p2)
5106 ret = -TARGET_EFAULT;
5107 else
5108 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5109 unlock_user(p2, arg4, 0);
5110 unlock_user(p, arg2, 0);
5112 break;
5113 #endif
5114 case TARGET_NR_mkdir:
5115 if (!(p = lock_user_string(arg1)))
5116 goto efault;
5117 ret = get_errno(mkdir(p, arg2));
5118 unlock_user(p, arg1, 0);
5119 break;
5120 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5121 case TARGET_NR_mkdirat:
5122 if (!(p = lock_user_string(arg2)))
5123 goto efault;
5124 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5125 unlock_user(p, arg2, 0);
5126 break;
5127 #endif
5128 case TARGET_NR_rmdir:
5129 if (!(p = lock_user_string(arg1)))
5130 goto efault;
5131 ret = get_errno(rmdir(p));
5132 unlock_user(p, arg1, 0);
5133 break;
5134 case TARGET_NR_dup:
5135 ret = get_errno(dup(arg1));
5136 break;
5137 case TARGET_NR_pipe:
5138 ret = do_pipe(cpu_env, arg1, 0, 0);
5139 break;
5140 #ifdef TARGET_NR_pipe2
5141 case TARGET_NR_pipe2:
5142 ret = do_pipe(cpu_env, arg1, arg2, 1);
5143 break;
5144 #endif
5145 case TARGET_NR_times:
5147 struct target_tms *tmsp;
5148 struct tms tms;
5149 ret = get_errno(times(&tms));
5150 if (arg1) {
5151 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5152 if (!tmsp)
5153 goto efault;
5154 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5155 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5156 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5157 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5159 if (!is_error(ret))
5160 ret = host_to_target_clock_t(ret);
5162 break;
5163 #ifdef TARGET_NR_prof
5164 case TARGET_NR_prof:
5165 goto unimplemented;
5166 #endif
5167 #ifdef TARGET_NR_signal
5168 case TARGET_NR_signal:
5169 goto unimplemented;
5170 #endif
5171 case TARGET_NR_acct:
5172 if (arg1 == 0) {
5173 ret = get_errno(acct(NULL));
5174 } else {
5175 if (!(p = lock_user_string(arg1)))
5176 goto efault;
5177 ret = get_errno(acct(path(p)));
5178 unlock_user(p, arg1, 0);
5180 break;
5181 #ifdef TARGET_NR_umount2 /* not on alpha */
5182 case TARGET_NR_umount2:
5183 if (!(p = lock_user_string(arg1)))
5184 goto efault;
5185 ret = get_errno(umount2(p, arg2));
5186 unlock_user(p, arg1, 0);
5187 break;
5188 #endif
5189 #ifdef TARGET_NR_lock
5190 case TARGET_NR_lock:
5191 goto unimplemented;
5192 #endif
5193 case TARGET_NR_ioctl:
5194 ret = do_ioctl(arg1, arg2, arg3);
5195 break;
5196 case TARGET_NR_fcntl:
5197 ret = do_fcntl(arg1, arg2, arg3);
5198 break;
5199 #ifdef TARGET_NR_mpx
5200 case TARGET_NR_mpx:
5201 goto unimplemented;
5202 #endif
5203 case TARGET_NR_setpgid:
5204 ret = get_errno(setpgid(arg1, arg2));
5205 break;
5206 #ifdef TARGET_NR_ulimit
5207 case TARGET_NR_ulimit:
5208 goto unimplemented;
5209 #endif
5210 #ifdef TARGET_NR_oldolduname
5211 case TARGET_NR_oldolduname:
5212 goto unimplemented;
5213 #endif
5214 case TARGET_NR_umask:
5215 ret = get_errno(umask(arg1));
5216 break;
5217 case TARGET_NR_chroot:
5218 if (!(p = lock_user_string(arg1)))
5219 goto efault;
5220 ret = get_errno(chroot(p));
5221 unlock_user(p, arg1, 0);
5222 break;
5223 case TARGET_NR_ustat:
5224 goto unimplemented;
5225 case TARGET_NR_dup2:
5226 ret = get_errno(dup2(arg1, arg2));
5227 break;
5228 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5229 case TARGET_NR_dup3:
5230 ret = get_errno(dup3(arg1, arg2, arg3));
5231 break;
5232 #endif
5233 #ifdef TARGET_NR_getppid /* not on alpha */
5234 case TARGET_NR_getppid:
5235 ret = get_errno(getppid());
5236 break;
5237 #endif
5238 case TARGET_NR_getpgrp:
5239 ret = get_errno(getpgrp());
5240 break;
5241 case TARGET_NR_setsid:
5242 ret = get_errno(setsid());
5243 break;
5244 #ifdef TARGET_NR_sigaction
5245 case TARGET_NR_sigaction:
5247 #if defined(TARGET_ALPHA)
5248 struct target_sigaction act, oact, *pact = 0;
5249 struct target_old_sigaction *old_act;
5250 if (arg2) {
5251 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5252 goto efault;
5253 act._sa_handler = old_act->_sa_handler;
5254 target_siginitset(&act.sa_mask, old_act->sa_mask);
5255 act.sa_flags = old_act->sa_flags;
5256 act.sa_restorer = 0;
5257 unlock_user_struct(old_act, arg2, 0);
5258 pact = &act;
5260 ret = get_errno(do_sigaction(arg1, pact, &oact));
5261 if (!is_error(ret) && arg3) {
5262 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5263 goto efault;
5264 old_act->_sa_handler = oact._sa_handler;
5265 old_act->sa_mask = oact.sa_mask.sig[0];
5266 old_act->sa_flags = oact.sa_flags;
5267 unlock_user_struct(old_act, arg3, 1);
5269 #elif defined(TARGET_MIPS)
5270 struct target_sigaction act, oact, *pact, *old_act;
5272 if (arg2) {
5273 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5274 goto efault;
5275 act._sa_handler = old_act->_sa_handler;
5276 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5277 act.sa_flags = old_act->sa_flags;
5278 unlock_user_struct(old_act, arg2, 0);
5279 pact = &act;
5280 } else {
5281 pact = NULL;
5284 ret = get_errno(do_sigaction(arg1, pact, &oact));
5286 if (!is_error(ret) && arg3) {
5287 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5288 goto efault;
5289 old_act->_sa_handler = oact._sa_handler;
5290 old_act->sa_flags = oact.sa_flags;
5291 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5292 old_act->sa_mask.sig[1] = 0;
5293 old_act->sa_mask.sig[2] = 0;
5294 old_act->sa_mask.sig[3] = 0;
5295 unlock_user_struct(old_act, arg3, 1);
5297 #else
5298 struct target_old_sigaction *old_act;
5299 struct target_sigaction act, oact, *pact;
5300 if (arg2) {
5301 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5302 goto efault;
5303 act._sa_handler = old_act->_sa_handler;
5304 target_siginitset(&act.sa_mask, old_act->sa_mask);
5305 act.sa_flags = old_act->sa_flags;
5306 act.sa_restorer = old_act->sa_restorer;
5307 unlock_user_struct(old_act, arg2, 0);
5308 pact = &act;
5309 } else {
5310 pact = NULL;
5312 ret = get_errno(do_sigaction(arg1, pact, &oact));
5313 if (!is_error(ret) && arg3) {
5314 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5315 goto efault;
5316 old_act->_sa_handler = oact._sa_handler;
5317 old_act->sa_mask = oact.sa_mask.sig[0];
5318 old_act->sa_flags = oact.sa_flags;
5319 old_act->sa_restorer = oact.sa_restorer;
5320 unlock_user_struct(old_act, arg3, 1);
5322 #endif
5324 break;
5325 #endif
5326 case TARGET_NR_rt_sigaction:
5328 #if defined(TARGET_ALPHA)
5329 struct target_sigaction act, oact, *pact = 0;
5330 struct target_rt_sigaction *rt_act;
5331 /* ??? arg4 == sizeof(sigset_t). */
5332 if (arg2) {
5333 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5334 goto efault;
5335 act._sa_handler = rt_act->_sa_handler;
5336 act.sa_mask = rt_act->sa_mask;
5337 act.sa_flags = rt_act->sa_flags;
5338 act.sa_restorer = arg5;
5339 unlock_user_struct(rt_act, arg2, 0);
5340 pact = &act;
5342 ret = get_errno(do_sigaction(arg1, pact, &oact));
5343 if (!is_error(ret) && arg3) {
5344 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5345 goto efault;
5346 rt_act->_sa_handler = oact._sa_handler;
5347 rt_act->sa_mask = oact.sa_mask;
5348 rt_act->sa_flags = oact.sa_flags;
5349 unlock_user_struct(rt_act, arg3, 1);
5351 #else
5352 struct target_sigaction *act;
5353 struct target_sigaction *oact;
5355 if (arg2) {
5356 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5357 goto efault;
5358 } else
5359 act = NULL;
5360 if (arg3) {
5361 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5362 ret = -TARGET_EFAULT;
5363 goto rt_sigaction_fail;
5365 } else
5366 oact = NULL;
5367 ret = get_errno(do_sigaction(arg1, act, oact));
5368 rt_sigaction_fail:
5369 if (act)
5370 unlock_user_struct(act, arg2, 0);
5371 if (oact)
5372 unlock_user_struct(oact, arg3, 1);
5373 #endif
5375 break;
5376 #ifdef TARGET_NR_sgetmask /* not on alpha */
5377 case TARGET_NR_sgetmask:
5379 sigset_t cur_set;
5380 abi_ulong target_set;
5381 sigprocmask(0, NULL, &cur_set);
5382 host_to_target_old_sigset(&target_set, &cur_set);
5383 ret = target_set;
5385 break;
5386 #endif
5387 #ifdef TARGET_NR_ssetmask /* not on alpha */
5388 case TARGET_NR_ssetmask:
5390 sigset_t set, oset, cur_set;
5391 abi_ulong target_set = arg1;
5392 sigprocmask(0, NULL, &cur_set);
5393 target_to_host_old_sigset(&set, &target_set);
5394 sigorset(&set, &set, &cur_set);
5395 sigprocmask(SIG_SETMASK, &set, &oset);
5396 host_to_target_old_sigset(&target_set, &oset);
5397 ret = target_set;
5399 break;
5400 #endif
5401 #ifdef TARGET_NR_sigprocmask
5402 case TARGET_NR_sigprocmask:
5404 #if defined(TARGET_ALPHA)
5405 sigset_t set, oldset;
5406 abi_ulong mask;
5407 int how;
5409 switch (arg1) {
5410 case TARGET_SIG_BLOCK:
5411 how = SIG_BLOCK;
5412 break;
5413 case TARGET_SIG_UNBLOCK:
5414 how = SIG_UNBLOCK;
5415 break;
5416 case TARGET_SIG_SETMASK:
5417 how = SIG_SETMASK;
5418 break;
5419 default:
5420 ret = -TARGET_EINVAL;
5421 goto fail;
5423 mask = arg2;
5424 target_to_host_old_sigset(&set, &mask);
5426 ret = get_errno(sigprocmask(how, &set, &oldset));
5428 if (!is_error(ret)) {
5429 host_to_target_old_sigset(&mask, &oldset);
5430 ret = mask;
5431 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5433 #else
5434 sigset_t set, oldset, *set_ptr;
5435 int how;
5437 if (arg2) {
5438 switch (arg1) {
5439 case TARGET_SIG_BLOCK:
5440 how = SIG_BLOCK;
5441 break;
5442 case TARGET_SIG_UNBLOCK:
5443 how = SIG_UNBLOCK;
5444 break;
5445 case TARGET_SIG_SETMASK:
5446 how = SIG_SETMASK;
5447 break;
5448 default:
5449 ret = -TARGET_EINVAL;
5450 goto fail;
5452 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5453 goto efault;
5454 target_to_host_old_sigset(&set, p);
5455 unlock_user(p, arg2, 0);
5456 set_ptr = &set;
5457 } else {
5458 how = 0;
5459 set_ptr = NULL;
5461 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5462 if (!is_error(ret) && arg3) {
5463 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5464 goto efault;
5465 host_to_target_old_sigset(p, &oldset);
5466 unlock_user(p, arg3, sizeof(target_sigset_t));
5468 #endif
5470 break;
5471 #endif
5472 case TARGET_NR_rt_sigprocmask:
5474 int how = arg1;
5475 sigset_t set, oldset, *set_ptr;
5477 if (arg2) {
5478 switch(how) {
5479 case TARGET_SIG_BLOCK:
5480 how = SIG_BLOCK;
5481 break;
5482 case TARGET_SIG_UNBLOCK:
5483 how = SIG_UNBLOCK;
5484 break;
5485 case TARGET_SIG_SETMASK:
5486 how = SIG_SETMASK;
5487 break;
5488 default:
5489 ret = -TARGET_EINVAL;
5490 goto fail;
5492 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5493 goto efault;
5494 target_to_host_sigset(&set, p);
5495 unlock_user(p, arg2, 0);
5496 set_ptr = &set;
5497 } else {
5498 how = 0;
5499 set_ptr = NULL;
5501 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5502 if (!is_error(ret) && arg3) {
5503 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5504 goto efault;
5505 host_to_target_sigset(p, &oldset);
5506 unlock_user(p, arg3, sizeof(target_sigset_t));
5509 break;
5510 #ifdef TARGET_NR_sigpending
5511 case TARGET_NR_sigpending:
5513 sigset_t set;
5514 ret = get_errno(sigpending(&set));
5515 if (!is_error(ret)) {
5516 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5517 goto efault;
5518 host_to_target_old_sigset(p, &set);
5519 unlock_user(p, arg1, sizeof(target_sigset_t));
5522 break;
5523 #endif
5524 case TARGET_NR_rt_sigpending:
5526 sigset_t set;
5527 ret = get_errno(sigpending(&set));
5528 if (!is_error(ret)) {
5529 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5530 goto efault;
5531 host_to_target_sigset(p, &set);
5532 unlock_user(p, arg1, sizeof(target_sigset_t));
5535 break;
5536 #ifdef TARGET_NR_sigsuspend
5537 case TARGET_NR_sigsuspend:
5539 sigset_t set;
5540 #if defined(TARGET_ALPHA)
5541 abi_ulong mask = arg1;
5542 target_to_host_old_sigset(&set, &mask);
5543 #else
5544 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5545 goto efault;
5546 target_to_host_old_sigset(&set, p);
5547 unlock_user(p, arg1, 0);
5548 #endif
5549 ret = get_errno(sigsuspend(&set));
5551 break;
5552 #endif
5553 case TARGET_NR_rt_sigsuspend:
5555 sigset_t set;
5556 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5557 goto efault;
5558 target_to_host_sigset(&set, p);
5559 unlock_user(p, arg1, 0);
5560 ret = get_errno(sigsuspend(&set));
5562 break;
5563 case TARGET_NR_rt_sigtimedwait:
5565 sigset_t set;
5566 struct timespec uts, *puts;
5567 siginfo_t uinfo;
5569 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5570 goto efault;
5571 target_to_host_sigset(&set, p);
5572 unlock_user(p, arg1, 0);
5573 if (arg3) {
5574 puts = &uts;
5575 target_to_host_timespec(puts, arg3);
5576 } else {
5577 puts = NULL;
5579 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5580 if (!is_error(ret) && arg2) {
5581 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5582 goto efault;
5583 host_to_target_siginfo(p, &uinfo);
5584 unlock_user(p, arg2, sizeof(target_siginfo_t));
5587 break;
5588 case TARGET_NR_rt_sigqueueinfo:
5590 siginfo_t uinfo;
5591 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5592 goto efault;
5593 target_to_host_siginfo(&uinfo, p);
5594 unlock_user(p, arg1, 0);
5595 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5597 break;
5598 #ifdef TARGET_NR_sigreturn
5599 case TARGET_NR_sigreturn:
5600 /* NOTE: ret is eax, so not transcoding must be done */
5601 ret = do_sigreturn(cpu_env);
5602 break;
5603 #endif
5604 case TARGET_NR_rt_sigreturn:
5605 /* NOTE: ret is eax, so not transcoding must be done */
5606 ret = do_rt_sigreturn(cpu_env);
5607 break;
5608 case TARGET_NR_sethostname:
5609 if (!(p = lock_user_string(arg1)))
5610 goto efault;
5611 ret = get_errno(sethostname(p, arg2));
5612 unlock_user(p, arg1, 0);
5613 break;
5614 case TARGET_NR_setrlimit:
5616 int resource = target_to_host_resource(arg1);
5617 struct target_rlimit *target_rlim;
5618 struct rlimit rlim;
5619 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5620 goto efault;
5621 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5622 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5623 unlock_user_struct(target_rlim, arg2, 0);
5624 ret = get_errno(setrlimit(resource, &rlim));
5626 break;
5627 case TARGET_NR_getrlimit:
5629 int resource = target_to_host_resource(arg1);
5630 struct target_rlimit *target_rlim;
5631 struct rlimit rlim;
5633 ret = get_errno(getrlimit(resource, &rlim));
5634 if (!is_error(ret)) {
5635 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5636 goto efault;
5637 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5638 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5639 unlock_user_struct(target_rlim, arg2, 1);
5642 break;
5643 case TARGET_NR_getrusage:
5645 struct rusage rusage;
5646 ret = get_errno(getrusage(arg1, &rusage));
5647 if (!is_error(ret)) {
5648 host_to_target_rusage(arg2, &rusage);
5651 break;
5652 case TARGET_NR_gettimeofday:
5654 struct timeval tv;
5655 ret = get_errno(gettimeofday(&tv, NULL));
5656 if (!is_error(ret)) {
5657 if (copy_to_user_timeval(arg1, &tv))
5658 goto efault;
5661 break;
5662 case TARGET_NR_settimeofday:
5664 struct timeval tv;
5665 if (copy_from_user_timeval(&tv, arg1))
5666 goto efault;
5667 ret = get_errno(settimeofday(&tv, NULL));
5669 break;
5670 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5671 case TARGET_NR_select:
5673 struct target_sel_arg_struct *sel;
5674 abi_ulong inp, outp, exp, tvp;
5675 long nsel;
5677 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5678 goto efault;
5679 nsel = tswapl(sel->n);
5680 inp = tswapl(sel->inp);
5681 outp = tswapl(sel->outp);
5682 exp = tswapl(sel->exp);
5683 tvp = tswapl(sel->tvp);
5684 unlock_user_struct(sel, arg1, 0);
5685 ret = do_select(nsel, inp, outp, exp, tvp);
5687 break;
5688 #endif
5689 #ifdef TARGET_NR_pselect6
5690 case TARGET_NR_pselect6:
5692 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5693 fd_set rfds, wfds, efds;
5694 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5695 struct timespec ts, *ts_ptr;
5698 * The 6th arg is actually two args smashed together,
5699 * so we cannot use the C library.
5701 sigset_t set;
5702 struct {
5703 sigset_t *set;
5704 size_t size;
5705 } sig, *sig_ptr;
5707 abi_ulong arg_sigset, arg_sigsize, *arg7;
5708 target_sigset_t *target_sigset;
5710 n = arg1;
5711 rfd_addr = arg2;
5712 wfd_addr = arg3;
5713 efd_addr = arg4;
5714 ts_addr = arg5;
5716 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5717 if (ret) {
5718 goto fail;
5720 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5721 if (ret) {
5722 goto fail;
5724 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5725 if (ret) {
5726 goto fail;
5730 * This takes a timespec, and not a timeval, so we cannot
5731 * use the do_select() helper ...
5733 if (ts_addr) {
5734 if (target_to_host_timespec(&ts, ts_addr)) {
5735 goto efault;
5737 ts_ptr = &ts;
5738 } else {
5739 ts_ptr = NULL;
5742 /* Extract the two packed args for the sigset */
5743 if (arg6) {
5744 sig_ptr = &sig;
5745 sig.size = _NSIG / 8;
5747 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5748 if (!arg7) {
5749 goto efault;
5751 arg_sigset = tswapl(arg7[0]);
5752 arg_sigsize = tswapl(arg7[1]);
5753 unlock_user(arg7, arg6, 0);
5755 if (arg_sigset) {
5756 sig.set = &set;
5757 if (arg_sigsize != sizeof(*target_sigset)) {
5758 /* Like the kernel, we enforce correct size sigsets */
5759 ret = -TARGET_EINVAL;
5760 goto fail;
5762 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5763 sizeof(*target_sigset), 1);
5764 if (!target_sigset) {
5765 goto efault;
5767 target_to_host_sigset(&set, target_sigset);
5768 unlock_user(target_sigset, arg_sigset, 0);
5769 } else {
5770 sig.set = NULL;
5772 } else {
5773 sig_ptr = NULL;
5776 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5777 ts_ptr, sig_ptr));
5779 if (!is_error(ret)) {
5780 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5781 goto efault;
5782 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5783 goto efault;
5784 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5785 goto efault;
5787 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5788 goto efault;
5791 break;
5792 #endif
5793 case TARGET_NR_symlink:
5795 void *p2;
5796 p = lock_user_string(arg1);
5797 p2 = lock_user_string(arg2);
5798 if (!p || !p2)
5799 ret = -TARGET_EFAULT;
5800 else
5801 ret = get_errno(symlink(p, p2));
5802 unlock_user(p2, arg2, 0);
5803 unlock_user(p, arg1, 0);
5805 break;
5806 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5807 case TARGET_NR_symlinkat:
5809 void *p2;
5810 p = lock_user_string(arg1);
5811 p2 = lock_user_string(arg3);
5812 if (!p || !p2)
5813 ret = -TARGET_EFAULT;
5814 else
5815 ret = get_errno(sys_symlinkat(p, arg2, p2));
5816 unlock_user(p2, arg3, 0);
5817 unlock_user(p, arg1, 0);
5819 break;
5820 #endif
5821 #ifdef TARGET_NR_oldlstat
5822 case TARGET_NR_oldlstat:
5823 goto unimplemented;
5824 #endif
5825 case TARGET_NR_readlink:
5827 void *p2, *temp;
5828 p = lock_user_string(arg1);
5829 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5830 if (!p || !p2)
5831 ret = -TARGET_EFAULT;
5832 else {
5833 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5834 char real[PATH_MAX];
5835 temp = realpath(exec_path,real);
5836 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5837 snprintf((char *)p2, arg3, "%s", real);
5839 else
5840 ret = get_errno(readlink(path(p), p2, arg3));
5842 unlock_user(p2, arg2, ret);
5843 unlock_user(p, arg1, 0);
5845 break;
5846 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5847 case TARGET_NR_readlinkat:
5849 void *p2;
5850 p = lock_user_string(arg2);
5851 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5852 if (!p || !p2)
5853 ret = -TARGET_EFAULT;
5854 else
5855 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5856 unlock_user(p2, arg3, ret);
5857 unlock_user(p, arg2, 0);
5859 break;
5860 #endif
5861 #ifdef TARGET_NR_uselib
5862 case TARGET_NR_uselib:
5863 goto unimplemented;
5864 #endif
5865 #ifdef TARGET_NR_swapon
5866 case TARGET_NR_swapon:
5867 if (!(p = lock_user_string(arg1)))
5868 goto efault;
5869 ret = get_errno(swapon(p, arg2));
5870 unlock_user(p, arg1, 0);
5871 break;
5872 #endif
5873 case TARGET_NR_reboot:
5874 goto unimplemented;
5875 #ifdef TARGET_NR_readdir
5876 case TARGET_NR_readdir:
5877 goto unimplemented;
5878 #endif
5879 #ifdef TARGET_NR_mmap
5880 case TARGET_NR_mmap:
5881 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5882 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5883 || defined(TARGET_S390X)
5885 abi_ulong *v;
5886 abi_ulong v1, v2, v3, v4, v5, v6;
5887 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5888 goto efault;
5889 v1 = tswapl(v[0]);
5890 v2 = tswapl(v[1]);
5891 v3 = tswapl(v[2]);
5892 v4 = tswapl(v[3]);
5893 v5 = tswapl(v[4]);
5894 v6 = tswapl(v[5]);
5895 unlock_user(v, arg1, 0);
5896 ret = get_errno(target_mmap(v1, v2, v3,
5897 target_to_host_bitmask(v4, mmap_flags_tbl),
5898 v5, v6));
5900 #else
5901 ret = get_errno(target_mmap(arg1, arg2, arg3,
5902 target_to_host_bitmask(arg4, mmap_flags_tbl),
5903 arg5,
5904 arg6));
5905 #endif
5906 break;
5907 #endif
5908 #ifdef TARGET_NR_mmap2
5909 case TARGET_NR_mmap2:
5910 #ifndef MMAP_SHIFT
5911 #define MMAP_SHIFT 12
5912 #endif
5913 ret = get_errno(target_mmap(arg1, arg2, arg3,
5914 target_to_host_bitmask(arg4, mmap_flags_tbl),
5915 arg5,
5916 arg6 << MMAP_SHIFT));
5917 break;
5918 #endif
5919 case TARGET_NR_munmap:
5920 ret = get_errno(target_munmap(arg1, arg2));
5921 break;
5922 case TARGET_NR_mprotect:
5924 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5925 /* Special hack to detect libc making the stack executable. */
5926 if ((arg3 & PROT_GROWSDOWN)
5927 && arg1 >= ts->info->stack_limit
5928 && arg1 <= ts->info->start_stack) {
5929 arg3 &= ~PROT_GROWSDOWN;
5930 arg2 = arg2 + arg1 - ts->info->stack_limit;
5931 arg1 = ts->info->stack_limit;
5934 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5935 break;
5936 #ifdef TARGET_NR_mremap
5937 case TARGET_NR_mremap:
5938 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5939 break;
5940 #endif
5941 /* ??? msync/mlock/munlock are broken for softmmu. */
5942 #ifdef TARGET_NR_msync
5943 case TARGET_NR_msync:
5944 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5945 break;
5946 #endif
5947 #ifdef TARGET_NR_mlock
5948 case TARGET_NR_mlock:
5949 ret = get_errno(mlock(g2h(arg1), arg2));
5950 break;
5951 #endif
5952 #ifdef TARGET_NR_munlock
5953 case TARGET_NR_munlock:
5954 ret = get_errno(munlock(g2h(arg1), arg2));
5955 break;
5956 #endif
5957 #ifdef TARGET_NR_mlockall
5958 case TARGET_NR_mlockall:
5959 ret = get_errno(mlockall(arg1));
5960 break;
5961 #endif
5962 #ifdef TARGET_NR_munlockall
5963 case TARGET_NR_munlockall:
5964 ret = get_errno(munlockall());
5965 break;
5966 #endif
5967 case TARGET_NR_truncate:
5968 if (!(p = lock_user_string(arg1)))
5969 goto efault;
5970 ret = get_errno(truncate(p, arg2));
5971 unlock_user(p, arg1, 0);
5972 break;
5973 case TARGET_NR_ftruncate:
5974 ret = get_errno(ftruncate(arg1, arg2));
5975 break;
5976 case TARGET_NR_fchmod:
5977 ret = get_errno(fchmod(arg1, arg2));
5978 break;
5979 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5980 case TARGET_NR_fchmodat:
5981 if (!(p = lock_user_string(arg2)))
5982 goto efault;
5983 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5984 unlock_user(p, arg2, 0);
5985 break;
5986 #endif
5987 case TARGET_NR_getpriority:
5988 /* libc does special remapping of the return value of
5989 * sys_getpriority() so it's just easiest to call
5990 * sys_getpriority() directly rather than through libc. */
5991 ret = get_errno(sys_getpriority(arg1, arg2));
5992 break;
5993 case TARGET_NR_setpriority:
5994 ret = get_errno(setpriority(arg1, arg2, arg3));
5995 break;
5996 #ifdef TARGET_NR_profil
5997 case TARGET_NR_profil:
5998 goto unimplemented;
5999 #endif
6000 case TARGET_NR_statfs:
6001 if (!(p = lock_user_string(arg1)))
6002 goto efault;
6003 ret = get_errno(statfs(path(p), &stfs));
6004 unlock_user(p, arg1, 0);
6005 convert_statfs:
6006 if (!is_error(ret)) {
6007 struct target_statfs *target_stfs;
6009 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6010 goto efault;
6011 __put_user(stfs.f_type, &target_stfs->f_type);
6012 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6013 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6014 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6015 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6016 __put_user(stfs.f_files, &target_stfs->f_files);
6017 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6018 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6019 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6020 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6021 unlock_user_struct(target_stfs, arg2, 1);
6023 break;
6024 case TARGET_NR_fstatfs:
6025 ret = get_errno(fstatfs(arg1, &stfs));
6026 goto convert_statfs;
6027 #ifdef TARGET_NR_statfs64
6028 case TARGET_NR_statfs64:
6029 if (!(p = lock_user_string(arg1)))
6030 goto efault;
6031 ret = get_errno(statfs(path(p), &stfs));
6032 unlock_user(p, arg1, 0);
6033 convert_statfs64:
6034 if (!is_error(ret)) {
6035 struct target_statfs64 *target_stfs;
6037 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6038 goto efault;
6039 __put_user(stfs.f_type, &target_stfs->f_type);
6040 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6041 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6042 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6043 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6044 __put_user(stfs.f_files, &target_stfs->f_files);
6045 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6046 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6047 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6048 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6049 unlock_user_struct(target_stfs, arg3, 1);
6051 break;
6052 case TARGET_NR_fstatfs64:
6053 ret = get_errno(fstatfs(arg1, &stfs));
6054 goto convert_statfs64;
6055 #endif
6056 #ifdef TARGET_NR_ioperm
6057 case TARGET_NR_ioperm:
6058 goto unimplemented;
6059 #endif
6060 #ifdef TARGET_NR_socketcall
6061 case TARGET_NR_socketcall:
6062 ret = do_socketcall(arg1, arg2);
6063 break;
6064 #endif
6065 #ifdef TARGET_NR_accept
6066 case TARGET_NR_accept:
6067 ret = do_accept(arg1, arg2, arg3);
6068 break;
6069 #endif
6070 #ifdef TARGET_NR_bind
6071 case TARGET_NR_bind:
6072 ret = do_bind(arg1, arg2, arg3);
6073 break;
6074 #endif
6075 #ifdef TARGET_NR_connect
6076 case TARGET_NR_connect:
6077 ret = do_connect(arg1, arg2, arg3);
6078 break;
6079 #endif
6080 #ifdef TARGET_NR_getpeername
6081 case TARGET_NR_getpeername:
6082 ret = do_getpeername(arg1, arg2, arg3);
6083 break;
6084 #endif
6085 #ifdef TARGET_NR_getsockname
6086 case TARGET_NR_getsockname:
6087 ret = do_getsockname(arg1, arg2, arg3);
6088 break;
6089 #endif
6090 #ifdef TARGET_NR_getsockopt
6091 case TARGET_NR_getsockopt:
6092 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6093 break;
6094 #endif
6095 #ifdef TARGET_NR_listen
6096 case TARGET_NR_listen:
6097 ret = get_errno(listen(arg1, arg2));
6098 break;
6099 #endif
6100 #ifdef TARGET_NR_recv
6101 case TARGET_NR_recv:
6102 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6103 break;
6104 #endif
6105 #ifdef TARGET_NR_recvfrom
6106 case TARGET_NR_recvfrom:
6107 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6108 break;
6109 #endif
6110 #ifdef TARGET_NR_recvmsg
6111 case TARGET_NR_recvmsg:
6112 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6113 break;
6114 #endif
6115 #ifdef TARGET_NR_send
6116 case TARGET_NR_send:
6117 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6118 break;
6119 #endif
6120 #ifdef TARGET_NR_sendmsg
6121 case TARGET_NR_sendmsg:
6122 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6123 break;
6124 #endif
6125 #ifdef TARGET_NR_sendto
6126 case TARGET_NR_sendto:
6127 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6128 break;
6129 #endif
6130 #ifdef TARGET_NR_shutdown
6131 case TARGET_NR_shutdown:
6132 ret = get_errno(shutdown(arg1, arg2));
6133 break;
6134 #endif
6135 #ifdef TARGET_NR_socket
6136 case TARGET_NR_socket:
6137 ret = do_socket(arg1, arg2, arg3);
6138 break;
6139 #endif
6140 #ifdef TARGET_NR_socketpair
6141 case TARGET_NR_socketpair:
6142 ret = do_socketpair(arg1, arg2, arg3, arg4);
6143 break;
6144 #endif
6145 #ifdef TARGET_NR_setsockopt
6146 case TARGET_NR_setsockopt:
6147 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6148 break;
6149 #endif
6151 case TARGET_NR_syslog:
6152 if (!(p = lock_user_string(arg2)))
6153 goto efault;
6154 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6155 unlock_user(p, arg2, 0);
6156 break;
6158 case TARGET_NR_setitimer:
6160 struct itimerval value, ovalue, *pvalue;
6162 if (arg2) {
6163 pvalue = &value;
6164 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6165 || copy_from_user_timeval(&pvalue->it_value,
6166 arg2 + sizeof(struct target_timeval)))
6167 goto efault;
6168 } else {
6169 pvalue = NULL;
6171 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6172 if (!is_error(ret) && arg3) {
6173 if (copy_to_user_timeval(arg3,
6174 &ovalue.it_interval)
6175 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6176 &ovalue.it_value))
6177 goto efault;
6180 break;
6181 case TARGET_NR_getitimer:
6183 struct itimerval value;
6185 ret = get_errno(getitimer(arg1, &value));
6186 if (!is_error(ret) && arg2) {
6187 if (copy_to_user_timeval(arg2,
6188 &value.it_interval)
6189 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6190 &value.it_value))
6191 goto efault;
6194 break;
6195 case TARGET_NR_stat:
6196 if (!(p = lock_user_string(arg1)))
6197 goto efault;
6198 ret = get_errno(stat(path(p), &st));
6199 unlock_user(p, arg1, 0);
6200 goto do_stat;
6201 case TARGET_NR_lstat:
6202 if (!(p = lock_user_string(arg1)))
6203 goto efault;
6204 ret = get_errno(lstat(path(p), &st));
6205 unlock_user(p, arg1, 0);
6206 goto do_stat;
6207 case TARGET_NR_fstat:
6209 ret = get_errno(fstat(arg1, &st));
6210 do_stat:
6211 if (!is_error(ret)) {
6212 struct target_stat *target_st;
6214 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6215 goto efault;
6216 memset(target_st, 0, sizeof(*target_st));
6217 __put_user(st.st_dev, &target_st->st_dev);
6218 __put_user(st.st_ino, &target_st->st_ino);
6219 __put_user(st.st_mode, &target_st->st_mode);
6220 __put_user(st.st_uid, &target_st->st_uid);
6221 __put_user(st.st_gid, &target_st->st_gid);
6222 __put_user(st.st_nlink, &target_st->st_nlink);
6223 __put_user(st.st_rdev, &target_st->st_rdev);
6224 __put_user(st.st_size, &target_st->st_size);
6225 __put_user(st.st_blksize, &target_st->st_blksize);
6226 __put_user(st.st_blocks, &target_st->st_blocks);
6227 __put_user(st.st_atime, &target_st->target_st_atime);
6228 __put_user(st.st_mtime, &target_st->target_st_mtime);
6229 __put_user(st.st_ctime, &target_st->target_st_ctime);
6230 unlock_user_struct(target_st, arg2, 1);
6233 break;
6234 #ifdef TARGET_NR_olduname
6235 case TARGET_NR_olduname:
6236 goto unimplemented;
6237 #endif
6238 #ifdef TARGET_NR_iopl
6239 case TARGET_NR_iopl:
6240 goto unimplemented;
6241 #endif
6242 case TARGET_NR_vhangup:
6243 ret = get_errno(vhangup());
6244 break;
6245 #ifdef TARGET_NR_idle
6246 case TARGET_NR_idle:
6247 goto unimplemented;
6248 #endif
6249 #ifdef TARGET_NR_syscall
6250 case TARGET_NR_syscall:
6251 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6252 arg6, arg7, arg8, 0);
6253 break;
6254 #endif
6255 case TARGET_NR_wait4:
6257 int status;
6258 abi_long status_ptr = arg2;
6259 struct rusage rusage, *rusage_ptr;
6260 abi_ulong target_rusage = arg4;
6261 if (target_rusage)
6262 rusage_ptr = &rusage;
6263 else
6264 rusage_ptr = NULL;
6265 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6266 if (!is_error(ret)) {
6267 if (status_ptr) {
6268 status = host_to_target_waitstatus(status);
6269 if (put_user_s32(status, status_ptr))
6270 goto efault;
6272 if (target_rusage)
6273 host_to_target_rusage(target_rusage, &rusage);
6276 break;
6277 #ifdef TARGET_NR_swapoff
6278 case TARGET_NR_swapoff:
6279 if (!(p = lock_user_string(arg1)))
6280 goto efault;
6281 ret = get_errno(swapoff(p));
6282 unlock_user(p, arg1, 0);
6283 break;
6284 #endif
6285 case TARGET_NR_sysinfo:
6287 struct target_sysinfo *target_value;
6288 struct sysinfo value;
6289 ret = get_errno(sysinfo(&value));
6290 if (!is_error(ret) && arg1)
6292 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6293 goto efault;
6294 __put_user(value.uptime, &target_value->uptime);
6295 __put_user(value.loads[0], &target_value->loads[0]);
6296 __put_user(value.loads[1], &target_value->loads[1]);
6297 __put_user(value.loads[2], &target_value->loads[2]);
6298 __put_user(value.totalram, &target_value->totalram);
6299 __put_user(value.freeram, &target_value->freeram);
6300 __put_user(value.sharedram, &target_value->sharedram);
6301 __put_user(value.bufferram, &target_value->bufferram);
6302 __put_user(value.totalswap, &target_value->totalswap);
6303 __put_user(value.freeswap, &target_value->freeswap);
6304 __put_user(value.procs, &target_value->procs);
6305 __put_user(value.totalhigh, &target_value->totalhigh);
6306 __put_user(value.freehigh, &target_value->freehigh);
6307 __put_user(value.mem_unit, &target_value->mem_unit);
6308 unlock_user_struct(target_value, arg1, 1);
6311 break;
6312 #ifdef TARGET_NR_ipc
6313 case TARGET_NR_ipc:
6314 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6315 break;
6316 #endif
6317 #ifdef TARGET_NR_semget
6318 case TARGET_NR_semget:
6319 ret = get_errno(semget(arg1, arg2, arg3));
6320 break;
6321 #endif
6322 #ifdef TARGET_NR_semop
6323 case TARGET_NR_semop:
6324 ret = get_errno(do_semop(arg1, arg2, arg3));
6325 break;
6326 #endif
6327 #ifdef TARGET_NR_semctl
6328 case TARGET_NR_semctl:
6329 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6330 break;
6331 #endif
6332 #ifdef TARGET_NR_msgctl
6333 case TARGET_NR_msgctl:
6334 ret = do_msgctl(arg1, arg2, arg3);
6335 break;
6336 #endif
6337 #ifdef TARGET_NR_msgget
6338 case TARGET_NR_msgget:
6339 ret = get_errno(msgget(arg1, arg2));
6340 break;
6341 #endif
6342 #ifdef TARGET_NR_msgrcv
6343 case TARGET_NR_msgrcv:
6344 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6345 break;
6346 #endif
6347 #ifdef TARGET_NR_msgsnd
6348 case TARGET_NR_msgsnd:
6349 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6350 break;
6351 #endif
6352 #ifdef TARGET_NR_shmget
6353 case TARGET_NR_shmget:
6354 ret = get_errno(shmget(arg1, arg2, arg3));
6355 break;
6356 #endif
6357 #ifdef TARGET_NR_shmctl
6358 case TARGET_NR_shmctl:
6359 ret = do_shmctl(arg1, arg2, arg3);
6360 break;
6361 #endif
6362 #ifdef TARGET_NR_shmat
6363 case TARGET_NR_shmat:
6364 ret = do_shmat(arg1, arg2, arg3);
6365 break;
6366 #endif
6367 #ifdef TARGET_NR_shmdt
6368 case TARGET_NR_shmdt:
6369 ret = do_shmdt(arg1);
6370 break;
6371 #endif
6372 case TARGET_NR_fsync:
6373 ret = get_errno(fsync(arg1));
6374 break;
6375 case TARGET_NR_clone:
6376 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6377 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6378 #elif defined(TARGET_CRIS)
6379 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6380 #elif defined(TARGET_S390X)
6381 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6382 #else
6383 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6384 #endif
6385 break;
6386 #ifdef __NR_exit_group
6387 /* new thread calls */
6388 case TARGET_NR_exit_group:
6389 #ifdef TARGET_GPROF
6390 _mcleanup();
6391 #endif
6392 gdb_exit(cpu_env, arg1);
6393 ret = get_errno(exit_group(arg1));
6394 break;
6395 #endif
6396 case TARGET_NR_setdomainname:
6397 if (!(p = lock_user_string(arg1)))
6398 goto efault;
6399 ret = get_errno(setdomainname(p, arg2));
6400 unlock_user(p, arg1, 0);
6401 break;
6402 case TARGET_NR_uname:
6403 /* no need to transcode because we use the linux syscall */
6405 struct new_utsname * buf;
6407 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6408 goto efault;
6409 ret = get_errno(sys_uname(buf));
6410 if (!is_error(ret)) {
6411 /* Overrite the native machine name with whatever is being
6412 emulated. */
6413 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6414 /* Allow the user to override the reported release. */
6415 if (qemu_uname_release && *qemu_uname_release)
6416 strcpy (buf->release, qemu_uname_release);
6418 unlock_user_struct(buf, arg1, 1);
6420 break;
6421 #ifdef TARGET_I386
6422 case TARGET_NR_modify_ldt:
6423 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6424 break;
6425 #if !defined(TARGET_X86_64)
6426 case TARGET_NR_vm86old:
6427 goto unimplemented;
6428 case TARGET_NR_vm86:
6429 ret = do_vm86(cpu_env, arg1, arg2);
6430 break;
6431 #endif
6432 #endif
6433 case TARGET_NR_adjtimex:
6434 goto unimplemented;
6435 #ifdef TARGET_NR_create_module
6436 case TARGET_NR_create_module:
6437 #endif
6438 case TARGET_NR_init_module:
6439 case TARGET_NR_delete_module:
6440 #ifdef TARGET_NR_get_kernel_syms
6441 case TARGET_NR_get_kernel_syms:
6442 #endif
6443 goto unimplemented;
6444 case TARGET_NR_quotactl:
6445 goto unimplemented;
6446 case TARGET_NR_getpgid:
6447 ret = get_errno(getpgid(arg1));
6448 break;
6449 case TARGET_NR_fchdir:
6450 ret = get_errno(fchdir(arg1));
6451 break;
6452 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6453 case TARGET_NR_bdflush:
6454 goto unimplemented;
6455 #endif
6456 #ifdef TARGET_NR_sysfs
6457 case TARGET_NR_sysfs:
6458 goto unimplemented;
6459 #endif
6460 case TARGET_NR_personality:
6461 ret = get_errno(personality(arg1));
6462 break;
6463 #ifdef TARGET_NR_afs_syscall
6464 case TARGET_NR_afs_syscall:
6465 goto unimplemented;
6466 #endif
6467 #ifdef TARGET_NR__llseek /* Not on alpha */
6468 case TARGET_NR__llseek:
6470 int64_t res;
6471 #if !defined(__NR_llseek)
6472 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6473 if (res == -1) {
6474 ret = get_errno(res);
6475 } else {
6476 ret = 0;
6478 #else
6479 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6480 #endif
6481 if ((ret == 0) && put_user_s64(res, arg4)) {
6482 goto efault;
6485 break;
6486 #endif
6487 case TARGET_NR_getdents:
6488 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6490 struct target_dirent *target_dirp;
6491 struct linux_dirent *dirp;
6492 abi_long count = arg3;
6494 dirp = malloc(count);
6495 if (!dirp) {
6496 ret = -TARGET_ENOMEM;
6497 goto fail;
6500 ret = get_errno(sys_getdents(arg1, dirp, count));
6501 if (!is_error(ret)) {
6502 struct linux_dirent *de;
6503 struct target_dirent *tde;
6504 int len = ret;
6505 int reclen, treclen;
6506 int count1, tnamelen;
6508 count1 = 0;
6509 de = dirp;
6510 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6511 goto efault;
6512 tde = target_dirp;
6513 while (len > 0) {
6514 reclen = de->d_reclen;
6515 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6516 tde->d_reclen = tswap16(treclen);
6517 tde->d_ino = tswapl(de->d_ino);
6518 tde->d_off = tswapl(de->d_off);
6519 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6520 if (tnamelen > 256)
6521 tnamelen = 256;
6522 /* XXX: may not be correct */
6523 pstrcpy(tde->d_name, tnamelen, de->d_name);
6524 de = (struct linux_dirent *)((char *)de + reclen);
6525 len -= reclen;
6526 tde = (struct target_dirent *)((char *)tde + treclen);
6527 count1 += treclen;
6529 ret = count1;
6530 unlock_user(target_dirp, arg2, ret);
6532 free(dirp);
6534 #else
6536 struct linux_dirent *dirp;
6537 abi_long count = arg3;
6539 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6540 goto efault;
6541 ret = get_errno(sys_getdents(arg1, dirp, count));
6542 if (!is_error(ret)) {
6543 struct linux_dirent *de;
6544 int len = ret;
6545 int reclen;
6546 de = dirp;
6547 while (len > 0) {
6548 reclen = de->d_reclen;
6549 if (reclen > len)
6550 break;
6551 de->d_reclen = tswap16(reclen);
6552 tswapls(&de->d_ino);
6553 tswapls(&de->d_off);
6554 de = (struct linux_dirent *)((char *)de + reclen);
6555 len -= reclen;
6558 unlock_user(dirp, arg2, ret);
6560 #endif
6561 break;
6562 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6563 case TARGET_NR_getdents64:
6565 struct linux_dirent64 *dirp;
6566 abi_long count = arg3;
6567 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6568 goto efault;
6569 ret = get_errno(sys_getdents64(arg1, dirp, count));
6570 if (!is_error(ret)) {
6571 struct linux_dirent64 *de;
6572 int len = ret;
6573 int reclen;
6574 de = dirp;
6575 while (len > 0) {
6576 reclen = de->d_reclen;
6577 if (reclen > len)
6578 break;
6579 de->d_reclen = tswap16(reclen);
6580 tswap64s((uint64_t *)&de->d_ino);
6581 tswap64s((uint64_t *)&de->d_off);
6582 de = (struct linux_dirent64 *)((char *)de + reclen);
6583 len -= reclen;
6586 unlock_user(dirp, arg2, ret);
6588 break;
6589 #endif /* TARGET_NR_getdents64 */
6590 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6591 #ifdef TARGET_S390X
6592 case TARGET_NR_select:
6593 #else
6594 case TARGET_NR__newselect:
6595 #endif
6596 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6597 break;
6598 #endif
6599 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6600 # ifdef TARGET_NR_poll
6601 case TARGET_NR_poll:
6602 # endif
6603 # ifdef TARGET_NR_ppoll
6604 case TARGET_NR_ppoll:
6605 # endif
6607 struct target_pollfd *target_pfd;
6608 unsigned int nfds = arg2;
6609 int timeout = arg3;
6610 struct pollfd *pfd;
6611 unsigned int i;
6613 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6614 if (!target_pfd)
6615 goto efault;
6617 pfd = alloca(sizeof(struct pollfd) * nfds);
6618 for(i = 0; i < nfds; i++) {
6619 pfd[i].fd = tswap32(target_pfd[i].fd);
6620 pfd[i].events = tswap16(target_pfd[i].events);
6623 # ifdef TARGET_NR_ppoll
6624 if (num == TARGET_NR_ppoll) {
6625 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6626 target_sigset_t *target_set;
6627 sigset_t _set, *set = &_set;
6629 if (arg3) {
6630 if (target_to_host_timespec(timeout_ts, arg3)) {
6631 unlock_user(target_pfd, arg1, 0);
6632 goto efault;
6634 } else {
6635 timeout_ts = NULL;
6638 if (arg4) {
6639 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6640 if (!target_set) {
6641 unlock_user(target_pfd, arg1, 0);
6642 goto efault;
6644 target_to_host_sigset(set, target_set);
6645 } else {
6646 set = NULL;
6649 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6651 if (!is_error(ret) && arg3) {
6652 host_to_target_timespec(arg3, timeout_ts);
6654 if (arg4) {
6655 unlock_user(target_set, arg4, 0);
6657 } else
6658 # endif
6659 ret = get_errno(poll(pfd, nfds, timeout));
6661 if (!is_error(ret)) {
6662 for(i = 0; i < nfds; i++) {
6663 target_pfd[i].revents = tswap16(pfd[i].revents);
6666 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6668 break;
6669 #endif
6670 case TARGET_NR_flock:
6671 /* NOTE: the flock constant seems to be the same for every
6672 Linux platform */
6673 ret = get_errno(flock(arg1, arg2));
6674 break;
6675 case TARGET_NR_readv:
6677 int count = arg3;
6678 struct iovec *vec;
6680 vec = alloca(count * sizeof(struct iovec));
6681 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6682 goto efault;
6683 ret = get_errno(readv(arg1, vec, count));
6684 unlock_iovec(vec, arg2, count, 1);
6686 break;
6687 case TARGET_NR_writev:
6689 int count = arg3;
6690 struct iovec *vec;
6692 vec = alloca(count * sizeof(struct iovec));
6693 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6694 goto efault;
6695 ret = get_errno(writev(arg1, vec, count));
6696 unlock_iovec(vec, arg2, count, 0);
6698 break;
6699 case TARGET_NR_getsid:
6700 ret = get_errno(getsid(arg1));
6701 break;
6702 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6703 case TARGET_NR_fdatasync:
6704 ret = get_errno(fdatasync(arg1));
6705 break;
6706 #endif
6707 case TARGET_NR__sysctl:
6708 /* We don't implement this, but ENOTDIR is always a safe
6709 return value. */
6710 ret = -TARGET_ENOTDIR;
6711 break;
6712 case TARGET_NR_sched_getaffinity:
6714 unsigned int mask_size;
6715 unsigned long *mask;
6718 * sched_getaffinity needs multiples of ulong, so need to take
6719 * care of mismatches between target ulong and host ulong sizes.
6721 if (arg2 & (sizeof(abi_ulong) - 1)) {
6722 ret = -TARGET_EINVAL;
6723 break;
6725 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6727 mask = alloca(mask_size);
6728 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6730 if (!is_error(ret)) {
6731 if (copy_to_user(arg3, mask, ret)) {
6732 goto efault;
6736 break;
6737 case TARGET_NR_sched_setaffinity:
6739 unsigned int mask_size;
6740 unsigned long *mask;
6743 * sched_setaffinity needs multiples of ulong, so need to take
6744 * care of mismatches between target ulong and host ulong sizes.
6746 if (arg2 & (sizeof(abi_ulong) - 1)) {
6747 ret = -TARGET_EINVAL;
6748 break;
6750 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6752 mask = alloca(mask_size);
6753 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6754 goto efault;
6756 memcpy(mask, p, arg2);
6757 unlock_user_struct(p, arg2, 0);
6759 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6761 break;
6762 case TARGET_NR_sched_setparam:
6764 struct sched_param *target_schp;
6765 struct sched_param schp;
6767 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6768 goto efault;
6769 schp.sched_priority = tswap32(target_schp->sched_priority);
6770 unlock_user_struct(target_schp, arg2, 0);
6771 ret = get_errno(sched_setparam(arg1, &schp));
6773 break;
6774 case TARGET_NR_sched_getparam:
6776 struct sched_param *target_schp;
6777 struct sched_param schp;
6778 ret = get_errno(sched_getparam(arg1, &schp));
6779 if (!is_error(ret)) {
6780 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6781 goto efault;
6782 target_schp->sched_priority = tswap32(schp.sched_priority);
6783 unlock_user_struct(target_schp, arg2, 1);
6786 break;
6787 case TARGET_NR_sched_setscheduler:
6789 struct sched_param *target_schp;
6790 struct sched_param schp;
6791 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6792 goto efault;
6793 schp.sched_priority = tswap32(target_schp->sched_priority);
6794 unlock_user_struct(target_schp, arg3, 0);
6795 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6797 break;
6798 case TARGET_NR_sched_getscheduler:
6799 ret = get_errno(sched_getscheduler(arg1));
6800 break;
6801 case TARGET_NR_sched_yield:
6802 ret = get_errno(sched_yield());
6803 break;
6804 case TARGET_NR_sched_get_priority_max:
6805 ret = get_errno(sched_get_priority_max(arg1));
6806 break;
6807 case TARGET_NR_sched_get_priority_min:
6808 ret = get_errno(sched_get_priority_min(arg1));
6809 break;
6810 case TARGET_NR_sched_rr_get_interval:
6812 struct timespec ts;
6813 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6814 if (!is_error(ret)) {
6815 host_to_target_timespec(arg2, &ts);
6818 break;
6819 case TARGET_NR_nanosleep:
6821 struct timespec req, rem;
6822 target_to_host_timespec(&req, arg1);
6823 ret = get_errno(nanosleep(&req, &rem));
6824 if (is_error(ret) && arg2) {
6825 host_to_target_timespec(arg2, &rem);
6828 break;
6829 #ifdef TARGET_NR_query_module
6830 case TARGET_NR_query_module:
6831 goto unimplemented;
6832 #endif
6833 #ifdef TARGET_NR_nfsservctl
6834 case TARGET_NR_nfsservctl:
6835 goto unimplemented;
6836 #endif
6837 case TARGET_NR_prctl:
6838 switch (arg1)
6840 case PR_GET_PDEATHSIG:
6842 int deathsig;
6843 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6844 if (!is_error(ret) && arg2
6845 && put_user_ual(deathsig, arg2))
6846 goto efault;
6848 break;
6849 default:
6850 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6851 break;
6853 break;
6854 #ifdef TARGET_NR_arch_prctl
6855 case TARGET_NR_arch_prctl:
6856 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6857 ret = do_arch_prctl(cpu_env, arg1, arg2);
6858 break;
6859 #else
6860 goto unimplemented;
6861 #endif
6862 #endif
6863 #ifdef TARGET_NR_pread
6864 case TARGET_NR_pread:
6865 if (regpairs_aligned(cpu_env))
6866 arg4 = arg5;
6867 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6868 goto efault;
6869 ret = get_errno(pread(arg1, p, arg3, arg4));
6870 unlock_user(p, arg2, ret);
6871 break;
6872 case TARGET_NR_pwrite:
6873 if (regpairs_aligned(cpu_env))
6874 arg4 = arg5;
6875 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6876 goto efault;
6877 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6878 unlock_user(p, arg2, 0);
6879 break;
6880 #endif
6881 #ifdef TARGET_NR_pread64
6882 case TARGET_NR_pread64:
6883 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6884 goto efault;
6885 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6886 unlock_user(p, arg2, ret);
6887 break;
6888 case TARGET_NR_pwrite64:
6889 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6890 goto efault;
6891 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6892 unlock_user(p, arg2, 0);
6893 break;
6894 #endif
6895 case TARGET_NR_getcwd:
6896 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6897 goto efault;
6898 ret = get_errno(sys_getcwd1(p, arg2));
6899 unlock_user(p, arg1, ret);
6900 break;
6901 case TARGET_NR_capget:
6902 goto unimplemented;
6903 case TARGET_NR_capset:
6904 goto unimplemented;
6905 case TARGET_NR_sigaltstack:
6906 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6907 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6908 defined(TARGET_M68K) || defined(TARGET_S390X)
6909 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6910 break;
6911 #else
6912 goto unimplemented;
6913 #endif
6914 case TARGET_NR_sendfile:
6915 goto unimplemented;
6916 #ifdef TARGET_NR_getpmsg
6917 case TARGET_NR_getpmsg:
6918 goto unimplemented;
6919 #endif
6920 #ifdef TARGET_NR_putpmsg
6921 case TARGET_NR_putpmsg:
6922 goto unimplemented;
6923 #endif
6924 #ifdef TARGET_NR_vfork
6925 case TARGET_NR_vfork:
6926 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6927 0, 0, 0, 0));
6928 break;
6929 #endif
6930 #ifdef TARGET_NR_ugetrlimit
6931 case TARGET_NR_ugetrlimit:
6933 struct rlimit rlim;
6934 int resource = target_to_host_resource(arg1);
6935 ret = get_errno(getrlimit(resource, &rlim));
6936 if (!is_error(ret)) {
6937 struct target_rlimit *target_rlim;
6938 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6939 goto efault;
6940 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6941 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6942 unlock_user_struct(target_rlim, arg2, 1);
6944 break;
6946 #endif
6947 #ifdef TARGET_NR_truncate64
6948 case TARGET_NR_truncate64:
6949 if (!(p = lock_user_string(arg1)))
6950 goto efault;
6951 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6952 unlock_user(p, arg1, 0);
6953 break;
6954 #endif
6955 #ifdef TARGET_NR_ftruncate64
6956 case TARGET_NR_ftruncate64:
6957 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6958 break;
6959 #endif
6960 #ifdef TARGET_NR_stat64
6961 case TARGET_NR_stat64:
6962 if (!(p = lock_user_string(arg1)))
6963 goto efault;
6964 ret = get_errno(stat(path(p), &st));
6965 unlock_user(p, arg1, 0);
6966 if (!is_error(ret))
6967 ret = host_to_target_stat64(cpu_env, arg2, &st);
6968 break;
6969 #endif
6970 #ifdef TARGET_NR_lstat64
6971 case TARGET_NR_lstat64:
6972 if (!(p = lock_user_string(arg1)))
6973 goto efault;
6974 ret = get_errno(lstat(path(p), &st));
6975 unlock_user(p, arg1, 0);
6976 if (!is_error(ret))
6977 ret = host_to_target_stat64(cpu_env, arg2, &st);
6978 break;
6979 #endif
6980 #ifdef TARGET_NR_fstat64
6981 case TARGET_NR_fstat64:
6982 ret = get_errno(fstat(arg1, &st));
6983 if (!is_error(ret))
6984 ret = host_to_target_stat64(cpu_env, arg2, &st);
6985 break;
6986 #endif
6987 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6988 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6989 #ifdef TARGET_NR_fstatat64
6990 case TARGET_NR_fstatat64:
6991 #endif
6992 #ifdef TARGET_NR_newfstatat
6993 case TARGET_NR_newfstatat:
6994 #endif
6995 if (!(p = lock_user_string(arg2)))
6996 goto efault;
6997 #ifdef __NR_fstatat64
6998 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6999 #else
7000 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7001 #endif
7002 if (!is_error(ret))
7003 ret = host_to_target_stat64(cpu_env, arg3, &st);
7004 break;
7005 #endif
7006 case TARGET_NR_lchown:
7007 if (!(p = lock_user_string(arg1)))
7008 goto efault;
7009 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7010 unlock_user(p, arg1, 0);
7011 break;
7012 #ifdef TARGET_NR_getuid
7013 case TARGET_NR_getuid:
7014 ret = get_errno(high2lowuid(getuid()));
7015 break;
7016 #endif
7017 #ifdef TARGET_NR_getgid
7018 case TARGET_NR_getgid:
7019 ret = get_errno(high2lowgid(getgid()));
7020 break;
7021 #endif
7022 #ifdef TARGET_NR_geteuid
7023 case TARGET_NR_geteuid:
7024 ret = get_errno(high2lowuid(geteuid()));
7025 break;
7026 #endif
7027 #ifdef TARGET_NR_getegid
7028 case TARGET_NR_getegid:
7029 ret = get_errno(high2lowgid(getegid()));
7030 break;
7031 #endif
7032 case TARGET_NR_setreuid:
7033 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7034 break;
7035 case TARGET_NR_setregid:
7036 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7037 break;
7038 case TARGET_NR_getgroups:
7040 int gidsetsize = arg1;
7041 target_id *target_grouplist;
7042 gid_t *grouplist;
7043 int i;
7045 grouplist = alloca(gidsetsize * sizeof(gid_t));
7046 ret = get_errno(getgroups(gidsetsize, grouplist));
7047 if (gidsetsize == 0)
7048 break;
7049 if (!is_error(ret)) {
7050 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7051 if (!target_grouplist)
7052 goto efault;
7053 for(i = 0;i < ret; i++)
7054 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7055 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7058 break;
7059 case TARGET_NR_setgroups:
7061 int gidsetsize = arg1;
7062 target_id *target_grouplist;
7063 gid_t *grouplist;
7064 int i;
7066 grouplist = alloca(gidsetsize * sizeof(gid_t));
7067 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7068 if (!target_grouplist) {
7069 ret = -TARGET_EFAULT;
7070 goto fail;
7072 for(i = 0;i < gidsetsize; i++)
7073 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7074 unlock_user(target_grouplist, arg2, 0);
7075 ret = get_errno(setgroups(gidsetsize, grouplist));
7077 break;
7078 case TARGET_NR_fchown:
7079 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7080 break;
7081 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7082 case TARGET_NR_fchownat:
7083 if (!(p = lock_user_string(arg2)))
7084 goto efault;
7085 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7086 unlock_user(p, arg2, 0);
7087 break;
7088 #endif
7089 #ifdef TARGET_NR_setresuid
7090 case TARGET_NR_setresuid:
7091 ret = get_errno(setresuid(low2highuid(arg1),
7092 low2highuid(arg2),
7093 low2highuid(arg3)));
7094 break;
7095 #endif
7096 #ifdef TARGET_NR_getresuid
7097 case TARGET_NR_getresuid:
7099 uid_t ruid, euid, suid;
7100 ret = get_errno(getresuid(&ruid, &euid, &suid));
7101 if (!is_error(ret)) {
7102 if (put_user_u16(high2lowuid(ruid), arg1)
7103 || put_user_u16(high2lowuid(euid), arg2)
7104 || put_user_u16(high2lowuid(suid), arg3))
7105 goto efault;
7108 break;
7109 #endif
7110 #ifdef TARGET_NR_getresgid
7111 case TARGET_NR_setresgid:
7112 ret = get_errno(setresgid(low2highgid(arg1),
7113 low2highgid(arg2),
7114 low2highgid(arg3)));
7115 break;
7116 #endif
7117 #ifdef TARGET_NR_getresgid
7118 case TARGET_NR_getresgid:
7120 gid_t rgid, egid, sgid;
7121 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7122 if (!is_error(ret)) {
7123 if (put_user_u16(high2lowgid(rgid), arg1)
7124 || put_user_u16(high2lowgid(egid), arg2)
7125 || put_user_u16(high2lowgid(sgid), arg3))
7126 goto efault;
7129 break;
7130 #endif
7131 case TARGET_NR_chown:
7132 if (!(p = lock_user_string(arg1)))
7133 goto efault;
7134 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7135 unlock_user(p, arg1, 0);
7136 break;
7137 case TARGET_NR_setuid:
7138 ret = get_errno(setuid(low2highuid(arg1)));
7139 break;
7140 case TARGET_NR_setgid:
7141 ret = get_errno(setgid(low2highgid(arg1)));
7142 break;
7143 case TARGET_NR_setfsuid:
7144 ret = get_errno(setfsuid(arg1));
7145 break;
7146 case TARGET_NR_setfsgid:
7147 ret = get_errno(setfsgid(arg1));
7148 break;
7150 #ifdef TARGET_NR_lchown32
7151 case TARGET_NR_lchown32:
7152 if (!(p = lock_user_string(arg1)))
7153 goto efault;
7154 ret = get_errno(lchown(p, arg2, arg3));
7155 unlock_user(p, arg1, 0);
7156 break;
7157 #endif
7158 #ifdef TARGET_NR_getuid32
7159 case TARGET_NR_getuid32:
7160 ret = get_errno(getuid());
7161 break;
7162 #endif
7164 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7165 /* Alpha specific */
7166 case TARGET_NR_getxuid:
7168 uid_t euid;
7169 euid=geteuid();
7170 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7172 ret = get_errno(getuid());
7173 break;
7174 #endif
7175 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7176 /* Alpha specific */
7177 case TARGET_NR_getxgid:
7179 uid_t egid;
7180 egid=getegid();
7181 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7183 ret = get_errno(getgid());
7184 break;
7185 #endif
7186 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7187 /* Alpha specific */
7188 case TARGET_NR_osf_getsysinfo:
7189 ret = -TARGET_EOPNOTSUPP;
7190 switch (arg1) {
7191 case TARGET_GSI_IEEE_FP_CONTROL:
7193 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7195 /* Copied from linux ieee_fpcr_to_swcr. */
7196 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7197 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7198 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7199 | SWCR_TRAP_ENABLE_DZE
7200 | SWCR_TRAP_ENABLE_OVF);
7201 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7202 | SWCR_TRAP_ENABLE_INE);
7203 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7204 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7206 if (put_user_u64 (swcr, arg2))
7207 goto efault;
7208 ret = 0;
7210 break;
7212 /* case GSI_IEEE_STATE_AT_SIGNAL:
7213 -- Not implemented in linux kernel.
7214 case GSI_UACPROC:
7215 -- Retrieves current unaligned access state; not much used.
7216 case GSI_PROC_TYPE:
7217 -- Retrieves implver information; surely not used.
7218 case GSI_GET_HWRPB:
7219 -- Grabs a copy of the HWRPB; surely not used.
7222 break;
7223 #endif
7224 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7225 /* Alpha specific */
7226 case TARGET_NR_osf_setsysinfo:
7227 ret = -TARGET_EOPNOTSUPP;
7228 switch (arg1) {
7229 case TARGET_SSI_IEEE_FP_CONTROL:
7230 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7232 uint64_t swcr, fpcr, orig_fpcr;
7234 if (get_user_u64 (swcr, arg2))
7235 goto efault;
7236 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7237 fpcr = orig_fpcr & FPCR_DYN_MASK;
7239 /* Copied from linux ieee_swcr_to_fpcr. */
7240 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7241 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7242 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7243 | SWCR_TRAP_ENABLE_DZE
7244 | SWCR_TRAP_ENABLE_OVF)) << 48;
7245 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7246 | SWCR_TRAP_ENABLE_INE)) << 57;
7247 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7248 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7250 cpu_alpha_store_fpcr (cpu_env, fpcr);
7251 ret = 0;
7253 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7254 /* Old exceptions are not signaled. */
7255 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7257 /* If any exceptions set by this call, and are unmasked,
7258 send a signal. */
7259 /* ??? FIXME */
7262 break;
7264 /* case SSI_NVPAIRS:
7265 -- Used with SSIN_UACPROC to enable unaligned accesses.
7266 case SSI_IEEE_STATE_AT_SIGNAL:
7267 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7268 -- Not implemented in linux kernel
7271 break;
7272 #endif
7273 #ifdef TARGET_NR_osf_sigprocmask
7274 /* Alpha specific. */
7275 case TARGET_NR_osf_sigprocmask:
7277 abi_ulong mask;
7278 int how;
7279 sigset_t set, oldset;
7281 switch(arg1) {
7282 case TARGET_SIG_BLOCK:
7283 how = SIG_BLOCK;
7284 break;
7285 case TARGET_SIG_UNBLOCK:
7286 how = SIG_UNBLOCK;
7287 break;
7288 case TARGET_SIG_SETMASK:
7289 how = SIG_SETMASK;
7290 break;
7291 default:
7292 ret = -TARGET_EINVAL;
7293 goto fail;
7295 mask = arg2;
7296 target_to_host_old_sigset(&set, &mask);
7297 sigprocmask(how, &set, &oldset);
7298 host_to_target_old_sigset(&mask, &oldset);
7299 ret = mask;
7301 break;
7302 #endif
7304 #ifdef TARGET_NR_getgid32
7305 case TARGET_NR_getgid32:
7306 ret = get_errno(getgid());
7307 break;
7308 #endif
7309 #ifdef TARGET_NR_geteuid32
7310 case TARGET_NR_geteuid32:
7311 ret = get_errno(geteuid());
7312 break;
7313 #endif
7314 #ifdef TARGET_NR_getegid32
7315 case TARGET_NR_getegid32:
7316 ret = get_errno(getegid());
7317 break;
7318 #endif
7319 #ifdef TARGET_NR_setreuid32
7320 case TARGET_NR_setreuid32:
7321 ret = get_errno(setreuid(arg1, arg2));
7322 break;
7323 #endif
7324 #ifdef TARGET_NR_setregid32
7325 case TARGET_NR_setregid32:
7326 ret = get_errno(setregid(arg1, arg2));
7327 break;
7328 #endif
7329 #ifdef TARGET_NR_getgroups32
7330 case TARGET_NR_getgroups32:
7332 int gidsetsize = arg1;
7333 uint32_t *target_grouplist;
7334 gid_t *grouplist;
7335 int i;
7337 grouplist = alloca(gidsetsize * sizeof(gid_t));
7338 ret = get_errno(getgroups(gidsetsize, grouplist));
7339 if (gidsetsize == 0)
7340 break;
7341 if (!is_error(ret)) {
7342 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7343 if (!target_grouplist) {
7344 ret = -TARGET_EFAULT;
7345 goto fail;
7347 for(i = 0;i < ret; i++)
7348 target_grouplist[i] = tswap32(grouplist[i]);
7349 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7352 break;
7353 #endif
7354 #ifdef TARGET_NR_setgroups32
7355 case TARGET_NR_setgroups32:
7357 int gidsetsize = arg1;
7358 uint32_t *target_grouplist;
7359 gid_t *grouplist;
7360 int i;
7362 grouplist = alloca(gidsetsize * sizeof(gid_t));
7363 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7364 if (!target_grouplist) {
7365 ret = -TARGET_EFAULT;
7366 goto fail;
7368 for(i = 0;i < gidsetsize; i++)
7369 grouplist[i] = tswap32(target_grouplist[i]);
7370 unlock_user(target_grouplist, arg2, 0);
7371 ret = get_errno(setgroups(gidsetsize, grouplist));
7373 break;
7374 #endif
7375 #ifdef TARGET_NR_fchown32
7376 case TARGET_NR_fchown32:
7377 ret = get_errno(fchown(arg1, arg2, arg3));
7378 break;
7379 #endif
7380 #ifdef TARGET_NR_setresuid32
7381 case TARGET_NR_setresuid32:
7382 ret = get_errno(setresuid(arg1, arg2, arg3));
7383 break;
7384 #endif
7385 #ifdef TARGET_NR_getresuid32
7386 case TARGET_NR_getresuid32:
7388 uid_t ruid, euid, suid;
7389 ret = get_errno(getresuid(&ruid, &euid, &suid));
7390 if (!is_error(ret)) {
7391 if (put_user_u32(ruid, arg1)
7392 || put_user_u32(euid, arg2)
7393 || put_user_u32(suid, arg3))
7394 goto efault;
7397 break;
7398 #endif
7399 #ifdef TARGET_NR_setresgid32
7400 case TARGET_NR_setresgid32:
7401 ret = get_errno(setresgid(arg1, arg2, arg3));
7402 break;
7403 #endif
7404 #ifdef TARGET_NR_getresgid32
7405 case TARGET_NR_getresgid32:
7407 gid_t rgid, egid, sgid;
7408 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7409 if (!is_error(ret)) {
7410 if (put_user_u32(rgid, arg1)
7411 || put_user_u32(egid, arg2)
7412 || put_user_u32(sgid, arg3))
7413 goto efault;
7416 break;
7417 #endif
7418 #ifdef TARGET_NR_chown32
7419 case TARGET_NR_chown32:
7420 if (!(p = lock_user_string(arg1)))
7421 goto efault;
7422 ret = get_errno(chown(p, arg2, arg3));
7423 unlock_user(p, arg1, 0);
7424 break;
7425 #endif
7426 #ifdef TARGET_NR_setuid32
7427 case TARGET_NR_setuid32:
7428 ret = get_errno(setuid(arg1));
7429 break;
7430 #endif
7431 #ifdef TARGET_NR_setgid32
7432 case TARGET_NR_setgid32:
7433 ret = get_errno(setgid(arg1));
7434 break;
7435 #endif
7436 #ifdef TARGET_NR_setfsuid32
7437 case TARGET_NR_setfsuid32:
7438 ret = get_errno(setfsuid(arg1));
7439 break;
7440 #endif
7441 #ifdef TARGET_NR_setfsgid32
7442 case TARGET_NR_setfsgid32:
7443 ret = get_errno(setfsgid(arg1));
7444 break;
7445 #endif
7447 case TARGET_NR_pivot_root:
7448 goto unimplemented;
7449 #ifdef TARGET_NR_mincore
7450 case TARGET_NR_mincore:
7452 void *a;
7453 ret = -TARGET_EFAULT;
7454 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7455 goto efault;
7456 if (!(p = lock_user_string(arg3)))
7457 goto mincore_fail;
7458 ret = get_errno(mincore(a, arg2, p));
7459 unlock_user(p, arg3, ret);
7460 mincore_fail:
7461 unlock_user(a, arg1, 0);
7463 break;
7464 #endif
7465 #ifdef TARGET_NR_arm_fadvise64_64
7466 case TARGET_NR_arm_fadvise64_64:
7469 * arm_fadvise64_64 looks like fadvise64_64 but
7470 * with different argument order
7472 abi_long temp;
7473 temp = arg3;
7474 arg3 = arg4;
7475 arg4 = temp;
7477 #endif
7478 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7479 #ifdef TARGET_NR_fadvise64_64
7480 case TARGET_NR_fadvise64_64:
7481 #endif
7482 #ifdef TARGET_NR_fadvise64
7483 case TARGET_NR_fadvise64:
7484 #endif
7485 #ifdef TARGET_S390X
7486 switch (arg4) {
7487 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7488 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7489 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7490 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7491 default: break;
7493 #endif
7494 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7495 break;
7496 #endif
7497 #ifdef TARGET_NR_madvise
7498 case TARGET_NR_madvise:
7499 /* A straight passthrough may not be safe because qemu sometimes
7500 turns private flie-backed mappings into anonymous mappings.
7501 This will break MADV_DONTNEED.
7502 This is a hint, so ignoring and returning success is ok. */
7503 ret = get_errno(0);
7504 break;
7505 #endif
7506 #if TARGET_ABI_BITS == 32
7507 case TARGET_NR_fcntl64:
7509 int cmd;
7510 struct flock64 fl;
7511 struct target_flock64 *target_fl;
7512 #ifdef TARGET_ARM
7513 struct target_eabi_flock64 *target_efl;
7514 #endif
7516 cmd = target_to_host_fcntl_cmd(arg2);
7517 if (cmd == -TARGET_EINVAL)
7518 return cmd;
7520 switch(arg2) {
7521 case TARGET_F_GETLK64:
7522 #ifdef TARGET_ARM
7523 if (((CPUARMState *)cpu_env)->eabi) {
7524 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7525 goto efault;
7526 fl.l_type = tswap16(target_efl->l_type);
7527 fl.l_whence = tswap16(target_efl->l_whence);
7528 fl.l_start = tswap64(target_efl->l_start);
7529 fl.l_len = tswap64(target_efl->l_len);
7530 fl.l_pid = tswap32(target_efl->l_pid);
7531 unlock_user_struct(target_efl, arg3, 0);
7532 } else
7533 #endif
7535 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7536 goto efault;
7537 fl.l_type = tswap16(target_fl->l_type);
7538 fl.l_whence = tswap16(target_fl->l_whence);
7539 fl.l_start = tswap64(target_fl->l_start);
7540 fl.l_len = tswap64(target_fl->l_len);
7541 fl.l_pid = tswap32(target_fl->l_pid);
7542 unlock_user_struct(target_fl, arg3, 0);
7544 ret = get_errno(fcntl(arg1, cmd, &fl));
7545 if (ret == 0) {
7546 #ifdef TARGET_ARM
7547 if (((CPUARMState *)cpu_env)->eabi) {
7548 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7549 goto efault;
7550 target_efl->l_type = tswap16(fl.l_type);
7551 target_efl->l_whence = tswap16(fl.l_whence);
7552 target_efl->l_start = tswap64(fl.l_start);
7553 target_efl->l_len = tswap64(fl.l_len);
7554 target_efl->l_pid = tswap32(fl.l_pid);
7555 unlock_user_struct(target_efl, arg3, 1);
7556 } else
7557 #endif
7559 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7560 goto efault;
7561 target_fl->l_type = tswap16(fl.l_type);
7562 target_fl->l_whence = tswap16(fl.l_whence);
7563 target_fl->l_start = tswap64(fl.l_start);
7564 target_fl->l_len = tswap64(fl.l_len);
7565 target_fl->l_pid = tswap32(fl.l_pid);
7566 unlock_user_struct(target_fl, arg3, 1);
7569 break;
7571 case TARGET_F_SETLK64:
7572 case TARGET_F_SETLKW64:
7573 #ifdef TARGET_ARM
7574 if (((CPUARMState *)cpu_env)->eabi) {
7575 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7576 goto efault;
7577 fl.l_type = tswap16(target_efl->l_type);
7578 fl.l_whence = tswap16(target_efl->l_whence);
7579 fl.l_start = tswap64(target_efl->l_start);
7580 fl.l_len = tswap64(target_efl->l_len);
7581 fl.l_pid = tswap32(target_efl->l_pid);
7582 unlock_user_struct(target_efl, arg3, 0);
7583 } else
7584 #endif
7586 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7587 goto efault;
7588 fl.l_type = tswap16(target_fl->l_type);
7589 fl.l_whence = tswap16(target_fl->l_whence);
7590 fl.l_start = tswap64(target_fl->l_start);
7591 fl.l_len = tswap64(target_fl->l_len);
7592 fl.l_pid = tswap32(target_fl->l_pid);
7593 unlock_user_struct(target_fl, arg3, 0);
7595 ret = get_errno(fcntl(arg1, cmd, &fl));
7596 break;
7597 default:
7598 ret = do_fcntl(arg1, arg2, arg3);
7599 break;
7601 break;
7603 #endif
7604 #ifdef TARGET_NR_cacheflush
7605 case TARGET_NR_cacheflush:
7606 /* self-modifying code is handled automatically, so nothing needed */
7607 ret = 0;
7608 break;
7609 #endif
7610 #ifdef TARGET_NR_security
7611 case TARGET_NR_security:
7612 goto unimplemented;
7613 #endif
7614 #ifdef TARGET_NR_getpagesize
7615 case TARGET_NR_getpagesize:
7616 ret = TARGET_PAGE_SIZE;
7617 break;
7618 #endif
7619 case TARGET_NR_gettid:
7620 ret = get_errno(gettid());
7621 break;
7622 #ifdef TARGET_NR_readahead
7623 case TARGET_NR_readahead:
7624 #if TARGET_ABI_BITS == 32
7625 if (regpairs_aligned(cpu_env)) {
7626 arg2 = arg3;
7627 arg3 = arg4;
7628 arg4 = arg5;
7630 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7631 #else
7632 ret = get_errno(readahead(arg1, arg2, arg3));
7633 #endif
7634 break;
7635 #endif
7636 #ifdef TARGET_NR_setxattr
7637 case TARGET_NR_setxattr:
7638 case TARGET_NR_lsetxattr:
7639 case TARGET_NR_fsetxattr:
7640 case TARGET_NR_getxattr:
7641 case TARGET_NR_lgetxattr:
7642 case TARGET_NR_fgetxattr:
7643 case TARGET_NR_listxattr:
7644 case TARGET_NR_llistxattr:
7645 case TARGET_NR_flistxattr:
7646 case TARGET_NR_removexattr:
7647 case TARGET_NR_lremovexattr:
7648 case TARGET_NR_fremovexattr:
7649 ret = -TARGET_EOPNOTSUPP;
7650 break;
7651 #endif
7652 #ifdef TARGET_NR_set_thread_area
7653 case TARGET_NR_set_thread_area:
7654 #if defined(TARGET_MIPS)
7655 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7656 ret = 0;
7657 break;
7658 #elif defined(TARGET_CRIS)
7659 if (arg1 & 0xff)
7660 ret = -TARGET_EINVAL;
7661 else {
7662 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7663 ret = 0;
7665 break;
7666 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7667 ret = do_set_thread_area(cpu_env, arg1);
7668 break;
7669 #else
7670 goto unimplemented_nowarn;
7671 #endif
7672 #endif
7673 #ifdef TARGET_NR_get_thread_area
7674 case TARGET_NR_get_thread_area:
7675 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7676 ret = do_get_thread_area(cpu_env, arg1);
7677 #else
7678 goto unimplemented_nowarn;
7679 #endif
7680 #endif
7681 #ifdef TARGET_NR_getdomainname
7682 case TARGET_NR_getdomainname:
7683 goto unimplemented_nowarn;
7684 #endif
7686 #ifdef TARGET_NR_clock_gettime
7687 case TARGET_NR_clock_gettime:
7689 struct timespec ts;
7690 ret = get_errno(clock_gettime(arg1, &ts));
7691 if (!is_error(ret)) {
7692 host_to_target_timespec(arg2, &ts);
7694 break;
7696 #endif
7697 #ifdef TARGET_NR_clock_getres
7698 case TARGET_NR_clock_getres:
7700 struct timespec ts;
7701 ret = get_errno(clock_getres(arg1, &ts));
7702 if (!is_error(ret)) {
7703 host_to_target_timespec(arg2, &ts);
7705 break;
7707 #endif
7708 #ifdef TARGET_NR_clock_nanosleep
7709 case TARGET_NR_clock_nanosleep:
7711 struct timespec ts;
7712 target_to_host_timespec(&ts, arg3);
7713 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7714 if (arg4)
7715 host_to_target_timespec(arg4, &ts);
7716 break;
7718 #endif
7720 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7721 case TARGET_NR_set_tid_address:
7722 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7723 break;
7724 #endif
7726 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7727 case TARGET_NR_tkill:
7728 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7729 break;
7730 #endif
7732 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7733 case TARGET_NR_tgkill:
7734 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7735 target_to_host_signal(arg3)));
7736 break;
7737 #endif
7739 #ifdef TARGET_NR_set_robust_list
7740 case TARGET_NR_set_robust_list:
7741 goto unimplemented_nowarn;
7742 #endif
7744 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7745 case TARGET_NR_utimensat:
7747 struct timespec *tsp, ts[2];
7748 if (!arg3) {
7749 tsp = NULL;
7750 } else {
7751 target_to_host_timespec(ts, arg3);
7752 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7753 tsp = ts;
7755 if (!arg2)
7756 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7757 else {
7758 if (!(p = lock_user_string(arg2))) {
7759 ret = -TARGET_EFAULT;
7760 goto fail;
7762 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7763 unlock_user(p, arg2, 0);
7766 break;
7767 #endif
7768 #if defined(CONFIG_USE_NPTL)
7769 case TARGET_NR_futex:
7770 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7771 break;
7772 #endif
7773 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7774 case TARGET_NR_inotify_init:
7775 ret = get_errno(sys_inotify_init());
7776 break;
7777 #endif
7778 #ifdef CONFIG_INOTIFY1
7779 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7780 case TARGET_NR_inotify_init1:
7781 ret = get_errno(sys_inotify_init1(arg1));
7782 break;
7783 #endif
7784 #endif
7785 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7786 case TARGET_NR_inotify_add_watch:
7787 p = lock_user_string(arg2);
7788 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7789 unlock_user(p, arg2, 0);
7790 break;
7791 #endif
7792 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7793 case TARGET_NR_inotify_rm_watch:
7794 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7795 break;
7796 #endif
7798 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7799 case TARGET_NR_mq_open:
7801 struct mq_attr posix_mq_attr;
7803 p = lock_user_string(arg1 - 1);
7804 if (arg4 != 0)
7805 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7806 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7807 unlock_user (p, arg1, 0);
7809 break;
7811 case TARGET_NR_mq_unlink:
7812 p = lock_user_string(arg1 - 1);
7813 ret = get_errno(mq_unlink(p));
7814 unlock_user (p, arg1, 0);
7815 break;
7817 case TARGET_NR_mq_timedsend:
7819 struct timespec ts;
7821 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7822 if (arg5 != 0) {
7823 target_to_host_timespec(&ts, arg5);
7824 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7825 host_to_target_timespec(arg5, &ts);
7827 else
7828 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7829 unlock_user (p, arg2, arg3);
7831 break;
7833 case TARGET_NR_mq_timedreceive:
7835 struct timespec ts;
7836 unsigned int prio;
7838 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7839 if (arg5 != 0) {
7840 target_to_host_timespec(&ts, arg5);
7841 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7842 host_to_target_timespec(arg5, &ts);
7844 else
7845 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7846 unlock_user (p, arg2, arg3);
7847 if (arg4 != 0)
7848 put_user_u32(prio, arg4);
7850 break;
7852 /* Not implemented for now... */
7853 /* case TARGET_NR_mq_notify: */
7854 /* break; */
7856 case TARGET_NR_mq_getsetattr:
7858 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7859 ret = 0;
7860 if (arg3 != 0) {
7861 ret = mq_getattr(arg1, &posix_mq_attr_out);
7862 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7864 if (arg2 != 0) {
7865 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7866 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7870 break;
7871 #endif
7873 #ifdef CONFIG_SPLICE
7874 #ifdef TARGET_NR_tee
7875 case TARGET_NR_tee:
7877 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7879 break;
7880 #endif
7881 #ifdef TARGET_NR_splice
7882 case TARGET_NR_splice:
7884 loff_t loff_in, loff_out;
7885 loff_t *ploff_in = NULL, *ploff_out = NULL;
7886 if(arg2) {
7887 get_user_u64(loff_in, arg2);
7888 ploff_in = &loff_in;
7890 if(arg4) {
7891 get_user_u64(loff_out, arg2);
7892 ploff_out = &loff_out;
7894 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7896 break;
7897 #endif
7898 #ifdef TARGET_NR_vmsplice
7899 case TARGET_NR_vmsplice:
7901 int count = arg3;
7902 struct iovec *vec;
7904 vec = alloca(count * sizeof(struct iovec));
7905 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7906 goto efault;
7907 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7908 unlock_iovec(vec, arg2, count, 0);
7910 break;
7911 #endif
7912 #endif /* CONFIG_SPLICE */
7913 #ifdef CONFIG_EVENTFD
7914 #if defined(TARGET_NR_eventfd)
7915 case TARGET_NR_eventfd:
7916 ret = get_errno(eventfd(arg1, 0));
7917 break;
7918 #endif
7919 #if defined(TARGET_NR_eventfd2)
7920 case TARGET_NR_eventfd2:
7921 ret = get_errno(eventfd(arg1, arg2));
7922 break;
7923 #endif
7924 #endif /* CONFIG_EVENTFD */
7925 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7926 case TARGET_NR_fallocate:
7927 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7928 break;
7929 #endif
7930 #if defined(CONFIG_SYNC_FILE_RANGE)
7931 #if defined(TARGET_NR_sync_file_range)
7932 case TARGET_NR_sync_file_range:
7933 #if TARGET_ABI_BITS == 32
7934 #if defined(TARGET_MIPS)
7935 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7936 target_offset64(arg5, arg6), arg7));
7937 #else
7938 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7939 target_offset64(arg4, arg5), arg6));
7940 #endif /* !TARGET_MIPS */
7941 #else
7942 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7943 #endif
7944 break;
7945 #endif
7946 #if defined(TARGET_NR_sync_file_range2)
7947 case TARGET_NR_sync_file_range2:
7948 /* This is like sync_file_range but the arguments are reordered */
7949 #if TARGET_ABI_BITS == 32
7950 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7951 target_offset64(arg5, arg6), arg2));
7952 #else
7953 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7954 #endif
7955 break;
7956 #endif
7957 #endif
7958 #if defined(CONFIG_EPOLL)
7959 #if defined(TARGET_NR_epoll_create)
7960 case TARGET_NR_epoll_create:
7961 ret = get_errno(epoll_create(arg1));
7962 break;
7963 #endif
7964 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7965 case TARGET_NR_epoll_create1:
7966 ret = get_errno(epoll_create1(arg1));
7967 break;
7968 #endif
7969 #if defined(TARGET_NR_epoll_ctl)
7970 case TARGET_NR_epoll_ctl:
7972 struct epoll_event ep;
7973 struct epoll_event *epp = 0;
7974 if (arg4) {
7975 struct target_epoll_event *target_ep;
7976 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7977 goto efault;
7979 ep.events = tswap32(target_ep->events);
7980 /* The epoll_data_t union is just opaque data to the kernel,
7981 * so we transfer all 64 bits across and need not worry what
7982 * actual data type it is.
7984 ep.data.u64 = tswap64(target_ep->data.u64);
7985 unlock_user_struct(target_ep, arg4, 0);
7986 epp = &ep;
7988 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7989 break;
7991 #endif
7993 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7994 #define IMPLEMENT_EPOLL_PWAIT
7995 #endif
7996 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7997 #if defined(TARGET_NR_epoll_wait)
7998 case TARGET_NR_epoll_wait:
7999 #endif
8000 #if defined(IMPLEMENT_EPOLL_PWAIT)
8001 case TARGET_NR_epoll_pwait:
8002 #endif
8004 struct target_epoll_event *target_ep;
8005 struct epoll_event *ep;
8006 int epfd = arg1;
8007 int maxevents = arg3;
8008 int timeout = arg4;
8010 target_ep = lock_user(VERIFY_WRITE, arg2,
8011 maxevents * sizeof(struct target_epoll_event), 1);
8012 if (!target_ep) {
8013 goto efault;
8016 ep = alloca(maxevents * sizeof(struct epoll_event));
8018 switch (num) {
8019 #if defined(IMPLEMENT_EPOLL_PWAIT)
8020 case TARGET_NR_epoll_pwait:
8022 target_sigset_t *target_set;
8023 sigset_t _set, *set = &_set;
8025 if (arg5) {
8026 target_set = lock_user(VERIFY_READ, arg5,
8027 sizeof(target_sigset_t), 1);
8028 if (!target_set) {
8029 unlock_user(target_ep, arg2, 0);
8030 goto efault;
8032 target_to_host_sigset(set, target_set);
8033 unlock_user(target_set, arg5, 0);
8034 } else {
8035 set = NULL;
8038 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8039 break;
8041 #endif
8042 #if defined(TARGET_NR_epoll_wait)
8043 case TARGET_NR_epoll_wait:
8044 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8045 break;
8046 #endif
8047 default:
8048 ret = -TARGET_ENOSYS;
8050 if (!is_error(ret)) {
8051 int i;
8052 for (i = 0; i < ret; i++) {
8053 target_ep[i].events = tswap32(ep[i].events);
8054 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8057 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8058 break;
8060 #endif
8061 #endif
8062 #ifdef TARGET_NR_prlimit64
8063 case TARGET_NR_prlimit64:
8065 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8066 struct target_rlimit64 *target_rnew, *target_rold;
8067 struct host_rlimit64 rnew, rold, *rnewp = 0;
8068 if (arg3) {
8069 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8070 goto efault;
8072 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8073 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8074 unlock_user_struct(target_rnew, arg3, 0);
8075 rnewp = &rnew;
8078 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8079 if (!is_error(ret) && arg4) {
8080 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8081 goto efault;
8083 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8084 target_rold->rlim_max = tswap64(rold.rlim_max);
8085 unlock_user_struct(target_rold, arg4, 1);
8087 break;
8089 #endif
8090 default:
8091 unimplemented:
8092 gemu_log("qemu: Unsupported syscall: %d\n", num);
8093 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8094 unimplemented_nowarn:
8095 #endif
8096 ret = -TARGET_ENOSYS;
8097 break;
8099 fail:
8100 #ifdef DEBUG
8101 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8102 #endif
8103 if(do_strace)
8104 print_syscall_ret(num, ret);
8105 return ret;
8106 efault:
8107 ret = -TARGET_EFAULT;
8108 goto fail;