linux-user: fake /proc/self/auxv
[qemu-kvm.git] / linux-user / syscall.c
blobc6bfcd881581a596c60ec69d9350fa3b880b4f1f
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu-xattr.h"
75 #endif
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include "linux_loop.h"
99 #include "cpu-uname.h"
101 #include "qemu.h"
103 #if defined(CONFIG_USE_NPTL)
104 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
105 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
106 #else
107 /* XXX: Hardcode the above values. */
108 #define CLONE_NPTL_FLAGS2 0
109 #endif
111 //#define DEBUG
113 //#include <linux/msdos_fs.h>
114 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
115 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
118 #undef _syscall0
119 #undef _syscall1
120 #undef _syscall2
121 #undef _syscall3
122 #undef _syscall4
123 #undef _syscall5
124 #undef _syscall6
126 #define _syscall0(type,name) \
127 static type name (void) \
129 return syscall(__NR_##name); \
132 #define _syscall1(type,name,type1,arg1) \
133 static type name (type1 arg1) \
135 return syscall(__NR_##name, arg1); \
138 #define _syscall2(type,name,type1,arg1,type2,arg2) \
139 static type name (type1 arg1,type2 arg2) \
141 return syscall(__NR_##name, arg1, arg2); \
144 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
145 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 return syscall(__NR_##name, arg1, arg2, arg3); \
150 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
151 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
156 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
157 type5,arg5) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
164 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5,type6,arg6) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 type6 arg6) \
169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
173 #define __NR_sys_uname __NR_uname
174 #define __NR_sys_faccessat __NR_faccessat
175 #define __NR_sys_fchmodat __NR_fchmodat
176 #define __NR_sys_fchownat __NR_fchownat
177 #define __NR_sys_fstatat64 __NR_fstatat64
178 #define __NR_sys_futimesat __NR_futimesat
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_linkat __NR_linkat
184 #define __NR_sys_mkdirat __NR_mkdirat
185 #define __NR_sys_mknodat __NR_mknodat
186 #define __NR_sys_newfstatat __NR_newfstatat
187 #define __NR_sys_openat __NR_openat
188 #define __NR_sys_readlinkat __NR_readlinkat
189 #define __NR_sys_renameat __NR_renameat
190 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
191 #define __NR_sys_symlinkat __NR_symlinkat
192 #define __NR_sys_syslog __NR_syslog
193 #define __NR_sys_tgkill __NR_tgkill
194 #define __NR_sys_tkill __NR_tkill
195 #define __NR_sys_unlinkat __NR_unlinkat
196 #define __NR_sys_utimensat __NR_utimensat
197 #define __NR_sys_futex __NR_futex
198 #define __NR_sys_inotify_init __NR_inotify_init
199 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
200 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
203 defined(__s390x__)
204 #define __NR__llseek __NR_lseek
205 #endif
207 #ifdef __NR_gettid
208 _syscall0(int, gettid)
209 #else
210 /* This is a replacement for the host gettid() and must return a host
211 errno. */
212 static int gettid(void) {
213 return -ENOSYS;
215 #endif
216 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
217 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
218 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
219 #endif
220 _syscall2(int, sys_getpriority, int, which, int, who);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
229 #endif
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill,int,tid,int,sig)
232 #endif
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group,int,error_code)
235 #endif
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address,int *,tidptr)
238 #endif
239 #if defined(CONFIG_USE_NPTL)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
242 const struct timespec *,timeout,int *,uaddr2,int,val3)
243 #endif
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
254 static bitmask_transtbl fcntl_flags_tbl[] = {
255 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
256 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
257 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
258 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
259 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
260 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
261 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
262 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
263 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
264 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
265 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
266 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
267 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
268 #if defined(O_DIRECT)
269 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
270 #endif
271 { 0, 0, 0, 0 }
274 #define COPY_UTSNAME_FIELD(dest, src) \
275 do { \
276 /* __NEW_UTS_LEN doesn't include terminating null */ \
277 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
278 (dest)[__NEW_UTS_LEN] = '\0'; \
279 } while (0)
281 static int sys_uname(struct new_utsname *buf)
283 struct utsname uts_buf;
285 if (uname(&uts_buf) < 0)
286 return (-1);
289 * Just in case these have some differences, we
290 * translate utsname to new_utsname (which is the
291 * struct linux kernel uses).
294 memset(buf, 0, sizeof(*buf));
295 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
296 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
297 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
298 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
299 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
300 #ifdef _GNU_SOURCE
301 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
302 #endif
303 return (0);
305 #undef COPY_UTSNAME_FIELD
308 static int sys_getcwd1(char *buf, size_t size)
310 if (getcwd(buf, size) == NULL) {
311 /* getcwd() sets errno */
312 return (-1);
314 return strlen(buf)+1;
317 #ifdef CONFIG_ATFILE
319 * Host system seems to have atfile syscall stubs available. We
320 * now enable them one by one as specified by target syscall_nr.h.
323 #ifdef TARGET_NR_faccessat
324 static int sys_faccessat(int dirfd, const char *pathname, int mode)
326 return (faccessat(dirfd, pathname, mode, 0));
328 #endif
329 #ifdef TARGET_NR_fchmodat
330 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
332 return (fchmodat(dirfd, pathname, mode, 0));
334 #endif
335 #if defined(TARGET_NR_fchownat)
336 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
337 gid_t group, int flags)
339 return (fchownat(dirfd, pathname, owner, group, flags));
341 #endif
342 #ifdef __NR_fstatat64
343 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
344 int flags)
346 return (fstatat(dirfd, pathname, buf, flags));
348 #endif
349 #ifdef __NR_newfstatat
350 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
351 int flags)
353 return (fstatat(dirfd, pathname, buf, flags));
355 #endif
356 #ifdef TARGET_NR_futimesat
357 static int sys_futimesat(int dirfd, const char *pathname,
358 const struct timeval times[2])
360 return (futimesat(dirfd, pathname, times));
362 #endif
363 #ifdef TARGET_NR_linkat
364 static int sys_linkat(int olddirfd, const char *oldpath,
365 int newdirfd, const char *newpath, int flags)
367 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
369 #endif
370 #ifdef TARGET_NR_mkdirat
371 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
373 return (mkdirat(dirfd, pathname, mode));
375 #endif
376 #ifdef TARGET_NR_mknodat
377 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
378 dev_t dev)
380 return (mknodat(dirfd, pathname, mode, dev));
382 #endif
383 #ifdef TARGET_NR_openat
384 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
387 * open(2) has extra parameter 'mode' when called with
388 * flag O_CREAT.
390 if ((flags & O_CREAT) != 0) {
391 return (openat(dirfd, pathname, flags, mode));
393 return (openat(dirfd, pathname, flags));
395 #endif
396 #ifdef TARGET_NR_readlinkat
397 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
399 return (readlinkat(dirfd, pathname, buf, bufsiz));
401 #endif
402 #ifdef TARGET_NR_renameat
403 static int sys_renameat(int olddirfd, const char *oldpath,
404 int newdirfd, const char *newpath)
406 return (renameat(olddirfd, oldpath, newdirfd, newpath));
408 #endif
409 #ifdef TARGET_NR_symlinkat
410 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
412 return (symlinkat(oldpath, newdirfd, newpath));
414 #endif
415 #ifdef TARGET_NR_unlinkat
416 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
418 return (unlinkat(dirfd, pathname, flags));
420 #endif
421 #else /* !CONFIG_ATFILE */
424 * Try direct syscalls instead
426 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
427 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
428 #endif
429 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
430 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
431 #endif
432 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
433 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
434 uid_t,owner,gid_t,group,int,flags)
435 #endif
436 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
437 defined(__NR_fstatat64)
438 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
439 struct stat *,buf,int,flags)
440 #endif
441 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
442 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
443 const struct timeval *,times)
444 #endif
445 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
446 defined(__NR_newfstatat)
447 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
448 struct stat *,buf,int,flags)
449 #endif
450 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
451 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
452 int,newdirfd,const char *,newpath,int,flags)
453 #endif
454 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
455 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
456 #endif
457 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
458 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
459 mode_t,mode,dev_t,dev)
460 #endif
461 #if defined(TARGET_NR_openat) && defined(__NR_openat)
462 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
463 #endif
464 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
465 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
466 char *,buf,size_t,bufsize)
467 #endif
468 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
469 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
470 int,newdirfd,const char *,newpath)
471 #endif
472 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
473 _syscall3(int,sys_symlinkat,const char *,oldpath,
474 int,newdirfd,const char *,newpath)
475 #endif
476 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
477 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
478 #endif
480 #endif /* CONFIG_ATFILE */
482 #ifdef CONFIG_UTIMENSAT
483 static int sys_utimensat(int dirfd, const char *pathname,
484 const struct timespec times[2], int flags)
486 if (pathname == NULL)
487 return futimens(dirfd, times);
488 else
489 return utimensat(dirfd, pathname, times, flags);
491 #else
492 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
493 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
494 const struct timespec *,tsp,int,flags)
495 #endif
496 #endif /* CONFIG_UTIMENSAT */
498 #ifdef CONFIG_INOTIFY
499 #include <sys/inotify.h>
501 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
502 static int sys_inotify_init(void)
504 return (inotify_init());
506 #endif
507 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
508 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
510 return (inotify_add_watch(fd, pathname, mask));
512 #endif
513 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
514 static int sys_inotify_rm_watch(int fd, int32_t wd)
516 return (inotify_rm_watch(fd, wd));
518 #endif
519 #ifdef CONFIG_INOTIFY1
520 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
521 static int sys_inotify_init1(int flags)
523 return (inotify_init1(flags));
525 #endif
526 #endif
527 #else
528 /* Userspace can usually survive runtime without inotify */
529 #undef TARGET_NR_inotify_init
530 #undef TARGET_NR_inotify_init1
531 #undef TARGET_NR_inotify_add_watch
532 #undef TARGET_NR_inotify_rm_watch
533 #endif /* CONFIG_INOTIFY */
535 #if defined(TARGET_NR_ppoll)
536 #ifndef __NR_ppoll
537 # define __NR_ppoll -1
538 #endif
539 #define __NR_sys_ppoll __NR_ppoll
540 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
541 struct timespec *, timeout, const __sigset_t *, sigmask,
542 size_t, sigsetsize)
543 #endif
545 #if defined(TARGET_NR_pselect6)
546 #ifndef __NR_pselect6
547 # define __NR_pselect6 -1
548 #endif
549 #define __NR_sys_pselect6 __NR_pselect6
550 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
551 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
552 #endif
554 #if defined(TARGET_NR_prlimit64)
555 #ifndef __NR_prlimit64
556 # define __NR_prlimit64 -1
557 #endif
558 #define __NR_sys_prlimit64 __NR_prlimit64
559 /* The glibc rlimit structure may not be that used by the underlying syscall */
560 struct host_rlimit64 {
561 uint64_t rlim_cur;
562 uint64_t rlim_max;
564 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
565 const struct host_rlimit64 *, new_limit,
566 struct host_rlimit64 *, old_limit)
567 #endif
569 extern int personality(int);
570 extern int flock(int, int);
571 extern int setfsuid(int);
572 extern int setfsgid(int);
573 extern int setgroups(int, gid_t *);
575 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
576 #ifdef TARGET_ARM
577 static inline int regpairs_aligned(void *cpu_env) {
578 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
580 #elif defined(TARGET_MIPS)
581 static inline int regpairs_aligned(void *cpu_env) { return 1; }
582 #else
583 static inline int regpairs_aligned(void *cpu_env) { return 0; }
584 #endif
586 #define ERRNO_TABLE_SIZE 1200
588 /* target_to_host_errno_table[] is initialized from
589 * host_to_target_errno_table[] in syscall_init(). */
590 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
594 * This list is the union of errno values overridden in asm-<arch>/errno.h
595 * minus the errnos that are not actually generic to all archs.
597 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
598 [EIDRM] = TARGET_EIDRM,
599 [ECHRNG] = TARGET_ECHRNG,
600 [EL2NSYNC] = TARGET_EL2NSYNC,
601 [EL3HLT] = TARGET_EL3HLT,
602 [EL3RST] = TARGET_EL3RST,
603 [ELNRNG] = TARGET_ELNRNG,
604 [EUNATCH] = TARGET_EUNATCH,
605 [ENOCSI] = TARGET_ENOCSI,
606 [EL2HLT] = TARGET_EL2HLT,
607 [EDEADLK] = TARGET_EDEADLK,
608 [ENOLCK] = TARGET_ENOLCK,
609 [EBADE] = TARGET_EBADE,
610 [EBADR] = TARGET_EBADR,
611 [EXFULL] = TARGET_EXFULL,
612 [ENOANO] = TARGET_ENOANO,
613 [EBADRQC] = TARGET_EBADRQC,
614 [EBADSLT] = TARGET_EBADSLT,
615 [EBFONT] = TARGET_EBFONT,
616 [ENOSTR] = TARGET_ENOSTR,
617 [ENODATA] = TARGET_ENODATA,
618 [ETIME] = TARGET_ETIME,
619 [ENOSR] = TARGET_ENOSR,
620 [ENONET] = TARGET_ENONET,
621 [ENOPKG] = TARGET_ENOPKG,
622 [EREMOTE] = TARGET_EREMOTE,
623 [ENOLINK] = TARGET_ENOLINK,
624 [EADV] = TARGET_EADV,
625 [ESRMNT] = TARGET_ESRMNT,
626 [ECOMM] = TARGET_ECOMM,
627 [EPROTO] = TARGET_EPROTO,
628 [EDOTDOT] = TARGET_EDOTDOT,
629 [EMULTIHOP] = TARGET_EMULTIHOP,
630 [EBADMSG] = TARGET_EBADMSG,
631 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
632 [EOVERFLOW] = TARGET_EOVERFLOW,
633 [ENOTUNIQ] = TARGET_ENOTUNIQ,
634 [EBADFD] = TARGET_EBADFD,
635 [EREMCHG] = TARGET_EREMCHG,
636 [ELIBACC] = TARGET_ELIBACC,
637 [ELIBBAD] = TARGET_ELIBBAD,
638 [ELIBSCN] = TARGET_ELIBSCN,
639 [ELIBMAX] = TARGET_ELIBMAX,
640 [ELIBEXEC] = TARGET_ELIBEXEC,
641 [EILSEQ] = TARGET_EILSEQ,
642 [ENOSYS] = TARGET_ENOSYS,
643 [ELOOP] = TARGET_ELOOP,
644 [ERESTART] = TARGET_ERESTART,
645 [ESTRPIPE] = TARGET_ESTRPIPE,
646 [ENOTEMPTY] = TARGET_ENOTEMPTY,
647 [EUSERS] = TARGET_EUSERS,
648 [ENOTSOCK] = TARGET_ENOTSOCK,
649 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
650 [EMSGSIZE] = TARGET_EMSGSIZE,
651 [EPROTOTYPE] = TARGET_EPROTOTYPE,
652 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
653 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
654 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
655 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
656 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
657 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
658 [EADDRINUSE] = TARGET_EADDRINUSE,
659 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
660 [ENETDOWN] = TARGET_ENETDOWN,
661 [ENETUNREACH] = TARGET_ENETUNREACH,
662 [ENETRESET] = TARGET_ENETRESET,
663 [ECONNABORTED] = TARGET_ECONNABORTED,
664 [ECONNRESET] = TARGET_ECONNRESET,
665 [ENOBUFS] = TARGET_ENOBUFS,
666 [EISCONN] = TARGET_EISCONN,
667 [ENOTCONN] = TARGET_ENOTCONN,
668 [EUCLEAN] = TARGET_EUCLEAN,
669 [ENOTNAM] = TARGET_ENOTNAM,
670 [ENAVAIL] = TARGET_ENAVAIL,
671 [EISNAM] = TARGET_EISNAM,
672 [EREMOTEIO] = TARGET_EREMOTEIO,
673 [ESHUTDOWN] = TARGET_ESHUTDOWN,
674 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
675 [ETIMEDOUT] = TARGET_ETIMEDOUT,
676 [ECONNREFUSED] = TARGET_ECONNREFUSED,
677 [EHOSTDOWN] = TARGET_EHOSTDOWN,
678 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
679 [EALREADY] = TARGET_EALREADY,
680 [EINPROGRESS] = TARGET_EINPROGRESS,
681 [ESTALE] = TARGET_ESTALE,
682 [ECANCELED] = TARGET_ECANCELED,
683 [ENOMEDIUM] = TARGET_ENOMEDIUM,
684 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
685 #ifdef ENOKEY
686 [ENOKEY] = TARGET_ENOKEY,
687 #endif
688 #ifdef EKEYEXPIRED
689 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
690 #endif
691 #ifdef EKEYREVOKED
692 [EKEYREVOKED] = TARGET_EKEYREVOKED,
693 #endif
694 #ifdef EKEYREJECTED
695 [EKEYREJECTED] = TARGET_EKEYREJECTED,
696 #endif
697 #ifdef EOWNERDEAD
698 [EOWNERDEAD] = TARGET_EOWNERDEAD,
699 #endif
700 #ifdef ENOTRECOVERABLE
701 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
702 #endif
705 static inline int host_to_target_errno(int err)
707 if(host_to_target_errno_table[err])
708 return host_to_target_errno_table[err];
709 return err;
712 static inline int target_to_host_errno(int err)
714 if (target_to_host_errno_table[err])
715 return target_to_host_errno_table[err];
716 return err;
719 static inline abi_long get_errno(abi_long ret)
721 if (ret == -1)
722 return -host_to_target_errno(errno);
723 else
724 return ret;
727 static inline int is_error(abi_long ret)
729 return (abi_ulong)ret >= (abi_ulong)(-4096);
732 char *target_strerror(int err)
734 return strerror(target_to_host_errno(err));
737 static abi_ulong target_brk;
738 static abi_ulong target_original_brk;
739 static abi_ulong brk_page;
741 void target_set_brk(abi_ulong new_brk)
743 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
744 brk_page = HOST_PAGE_ALIGN(target_brk);
747 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
748 #define DEBUGF_BRK(message, args...)
750 /* do_brk() must return target values and target errnos. */
751 abi_long do_brk(abi_ulong new_brk)
753 abi_long mapped_addr;
754 int new_alloc_size;
756 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
758 if (!new_brk) {
759 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
760 return target_brk;
762 if (new_brk < target_original_brk) {
763 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
764 return target_brk;
767 /* If the new brk is less than the highest page reserved to the
768 * target heap allocation, set it and we're almost done... */
769 if (new_brk <= brk_page) {
770 /* Heap contents are initialized to zero, as for anonymous
771 * mapped pages. */
772 if (new_brk > target_brk) {
773 memset(g2h(target_brk), 0, new_brk - target_brk);
775 target_brk = new_brk;
776 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
777 return target_brk;
780 /* We need to allocate more memory after the brk... Note that
781 * we don't use MAP_FIXED because that will map over the top of
782 * any existing mapping (like the one with the host libc or qemu
783 * itself); instead we treat "mapped but at wrong address" as
784 * a failure and unmap again.
786 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
787 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
788 PROT_READ|PROT_WRITE,
789 MAP_ANON|MAP_PRIVATE, 0, 0));
791 if (mapped_addr == brk_page) {
792 /* Heap contents are initialized to zero, as for anonymous
793 * mapped pages. Technically the new pages are already
794 * initialized to zero since they *are* anonymous mapped
795 * pages, however we have to take care with the contents that
796 * come from the remaining part of the previous page: it may
797 * contains garbage data due to a previous heap usage (grown
798 * then shrunken). */
799 memset(g2h(target_brk), 0, brk_page - target_brk);
801 target_brk = new_brk;
802 brk_page = HOST_PAGE_ALIGN(target_brk);
803 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
804 return target_brk;
805 } else if (mapped_addr != -1) {
806 /* Mapped but at wrong address, meaning there wasn't actually
807 * enough space for this brk.
809 target_munmap(mapped_addr, new_alloc_size);
810 mapped_addr = -1;
811 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
813 else {
814 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
817 #if defined(TARGET_ALPHA)
818 /* We (partially) emulate OSF/1 on Alpha, which requires we
819 return a proper errno, not an unchanged brk value. */
820 return -TARGET_ENOMEM;
821 #endif
822 /* For everything else, return the previous break. */
823 return target_brk;
826 static inline abi_long copy_from_user_fdset(fd_set *fds,
827 abi_ulong target_fds_addr,
828 int n)
830 int i, nw, j, k;
831 abi_ulong b, *target_fds;
833 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
834 if (!(target_fds = lock_user(VERIFY_READ,
835 target_fds_addr,
836 sizeof(abi_ulong) * nw,
837 1)))
838 return -TARGET_EFAULT;
840 FD_ZERO(fds);
841 k = 0;
842 for (i = 0; i < nw; i++) {
843 /* grab the abi_ulong */
844 __get_user(b, &target_fds[i]);
845 for (j = 0; j < TARGET_ABI_BITS; j++) {
846 /* check the bit inside the abi_ulong */
847 if ((b >> j) & 1)
848 FD_SET(k, fds);
849 k++;
853 unlock_user(target_fds, target_fds_addr, 0);
855 return 0;
858 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
859 abi_ulong target_fds_addr,
860 int n)
862 if (target_fds_addr) {
863 if (copy_from_user_fdset(fds, target_fds_addr, n))
864 return -TARGET_EFAULT;
865 *fds_ptr = fds;
866 } else {
867 *fds_ptr = NULL;
869 return 0;
872 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
873 const fd_set *fds,
874 int n)
876 int i, nw, j, k;
877 abi_long v;
878 abi_ulong *target_fds;
880 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
881 if (!(target_fds = lock_user(VERIFY_WRITE,
882 target_fds_addr,
883 sizeof(abi_ulong) * nw,
884 0)))
885 return -TARGET_EFAULT;
887 k = 0;
888 for (i = 0; i < nw; i++) {
889 v = 0;
890 for (j = 0; j < TARGET_ABI_BITS; j++) {
891 v |= ((FD_ISSET(k, fds) != 0) << j);
892 k++;
894 __put_user(v, &target_fds[i]);
897 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
899 return 0;
902 #if defined(__alpha__)
903 #define HOST_HZ 1024
904 #else
905 #define HOST_HZ 100
906 #endif
908 static inline abi_long host_to_target_clock_t(long ticks)
910 #if HOST_HZ == TARGET_HZ
911 return ticks;
912 #else
913 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
914 #endif
917 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
918 const struct rusage *rusage)
920 struct target_rusage *target_rusage;
922 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
923 return -TARGET_EFAULT;
924 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
925 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
926 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
927 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
928 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
929 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
930 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
931 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
932 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
933 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
934 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
935 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
936 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
937 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
938 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
939 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
940 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
941 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
942 unlock_user_struct(target_rusage, target_addr, 1);
944 return 0;
947 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
949 abi_ulong target_rlim_swap;
950 rlim_t result;
952 target_rlim_swap = tswapal(target_rlim);
953 if (target_rlim_swap == TARGET_RLIM_INFINITY)
954 return RLIM_INFINITY;
956 result = target_rlim_swap;
957 if (target_rlim_swap != (rlim_t)result)
958 return RLIM_INFINITY;
960 return result;
963 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
965 abi_ulong target_rlim_swap;
966 abi_ulong result;
968 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
969 target_rlim_swap = TARGET_RLIM_INFINITY;
970 else
971 target_rlim_swap = rlim;
972 result = tswapal(target_rlim_swap);
974 return result;
977 static inline int target_to_host_resource(int code)
979 switch (code) {
980 case TARGET_RLIMIT_AS:
981 return RLIMIT_AS;
982 case TARGET_RLIMIT_CORE:
983 return RLIMIT_CORE;
984 case TARGET_RLIMIT_CPU:
985 return RLIMIT_CPU;
986 case TARGET_RLIMIT_DATA:
987 return RLIMIT_DATA;
988 case TARGET_RLIMIT_FSIZE:
989 return RLIMIT_FSIZE;
990 case TARGET_RLIMIT_LOCKS:
991 return RLIMIT_LOCKS;
992 case TARGET_RLIMIT_MEMLOCK:
993 return RLIMIT_MEMLOCK;
994 case TARGET_RLIMIT_MSGQUEUE:
995 return RLIMIT_MSGQUEUE;
996 case TARGET_RLIMIT_NICE:
997 return RLIMIT_NICE;
998 case TARGET_RLIMIT_NOFILE:
999 return RLIMIT_NOFILE;
1000 case TARGET_RLIMIT_NPROC:
1001 return RLIMIT_NPROC;
1002 case TARGET_RLIMIT_RSS:
1003 return RLIMIT_RSS;
1004 case TARGET_RLIMIT_RTPRIO:
1005 return RLIMIT_RTPRIO;
1006 case TARGET_RLIMIT_SIGPENDING:
1007 return RLIMIT_SIGPENDING;
1008 case TARGET_RLIMIT_STACK:
1009 return RLIMIT_STACK;
1010 default:
1011 return code;
1015 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1016 abi_ulong target_tv_addr)
1018 struct target_timeval *target_tv;
1020 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1021 return -TARGET_EFAULT;
1023 __get_user(tv->tv_sec, &target_tv->tv_sec);
1024 __get_user(tv->tv_usec, &target_tv->tv_usec);
1026 unlock_user_struct(target_tv, target_tv_addr, 0);
1028 return 0;
1031 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1032 const struct timeval *tv)
1034 struct target_timeval *target_tv;
1036 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1037 return -TARGET_EFAULT;
1039 __put_user(tv->tv_sec, &target_tv->tv_sec);
1040 __put_user(tv->tv_usec, &target_tv->tv_usec);
1042 unlock_user_struct(target_tv, target_tv_addr, 1);
1044 return 0;
1047 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1048 #include <mqueue.h>
1050 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1051 abi_ulong target_mq_attr_addr)
1053 struct target_mq_attr *target_mq_attr;
1055 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1056 target_mq_attr_addr, 1))
1057 return -TARGET_EFAULT;
1059 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1060 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1061 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1062 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1064 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1066 return 0;
1069 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1070 const struct mq_attr *attr)
1072 struct target_mq_attr *target_mq_attr;
1074 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1075 target_mq_attr_addr, 0))
1076 return -TARGET_EFAULT;
1078 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1079 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1080 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1081 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1083 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1085 return 0;
1087 #endif
1089 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1090 /* do_select() must return target values and target errnos. */
1091 static abi_long do_select(int n,
1092 abi_ulong rfd_addr, abi_ulong wfd_addr,
1093 abi_ulong efd_addr, abi_ulong target_tv_addr)
1095 fd_set rfds, wfds, efds;
1096 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1097 struct timeval tv, *tv_ptr;
1098 abi_long ret;
1100 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1101 if (ret) {
1102 return ret;
1104 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1105 if (ret) {
1106 return ret;
1108 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1109 if (ret) {
1110 return ret;
1113 if (target_tv_addr) {
1114 if (copy_from_user_timeval(&tv, target_tv_addr))
1115 return -TARGET_EFAULT;
1116 tv_ptr = &tv;
1117 } else {
1118 tv_ptr = NULL;
1121 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1123 if (!is_error(ret)) {
1124 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1125 return -TARGET_EFAULT;
1126 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1127 return -TARGET_EFAULT;
1128 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1129 return -TARGET_EFAULT;
1131 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1132 return -TARGET_EFAULT;
1135 return ret;
1137 #endif
1139 static abi_long do_pipe2(int host_pipe[], int flags)
1141 #ifdef CONFIG_PIPE2
1142 return pipe2(host_pipe, flags);
1143 #else
1144 return -ENOSYS;
1145 #endif
1148 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1149 int flags, int is_pipe2)
1151 int host_pipe[2];
1152 abi_long ret;
1153 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1155 if (is_error(ret))
1156 return get_errno(ret);
1158 /* Several targets have special calling conventions for the original
1159 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1160 if (!is_pipe2) {
1161 #if defined(TARGET_ALPHA)
1162 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1163 return host_pipe[0];
1164 #elif defined(TARGET_MIPS)
1165 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1166 return host_pipe[0];
1167 #elif defined(TARGET_SH4)
1168 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1169 return host_pipe[0];
1170 #endif
1173 if (put_user_s32(host_pipe[0], pipedes)
1174 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1175 return -TARGET_EFAULT;
1176 return get_errno(ret);
1179 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1180 abi_ulong target_addr,
1181 socklen_t len)
1183 struct target_ip_mreqn *target_smreqn;
1185 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1186 if (!target_smreqn)
1187 return -TARGET_EFAULT;
1188 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1189 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1190 if (len == sizeof(struct target_ip_mreqn))
1191 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1192 unlock_user(target_smreqn, target_addr, 0);
1194 return 0;
1197 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1198 abi_ulong target_addr,
1199 socklen_t len)
1201 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1202 sa_family_t sa_family;
1203 struct target_sockaddr *target_saddr;
1205 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1206 if (!target_saddr)
1207 return -TARGET_EFAULT;
1209 sa_family = tswap16(target_saddr->sa_family);
1211 /* Oops. The caller might send a incomplete sun_path; sun_path
1212 * must be terminated by \0 (see the manual page), but
1213 * unfortunately it is quite common to specify sockaddr_un
1214 * length as "strlen(x->sun_path)" while it should be
1215 * "strlen(...) + 1". We'll fix that here if needed.
1216 * Linux kernel has a similar feature.
1219 if (sa_family == AF_UNIX) {
1220 if (len < unix_maxlen && len > 0) {
1221 char *cp = (char*)target_saddr;
1223 if ( cp[len-1] && !cp[len] )
1224 len++;
1226 if (len > unix_maxlen)
1227 len = unix_maxlen;
1230 memcpy(addr, target_saddr, len);
1231 addr->sa_family = sa_family;
1232 unlock_user(target_saddr, target_addr, 0);
1234 return 0;
1237 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1238 struct sockaddr *addr,
1239 socklen_t len)
1241 struct target_sockaddr *target_saddr;
1243 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1244 if (!target_saddr)
1245 return -TARGET_EFAULT;
1246 memcpy(target_saddr, addr, len);
1247 target_saddr->sa_family = tswap16(addr->sa_family);
1248 unlock_user(target_saddr, target_addr, len);
1250 return 0;
1253 /* ??? Should this also swap msgh->name? */
1254 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1255 struct target_msghdr *target_msgh)
1257 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1258 abi_long msg_controllen;
1259 abi_ulong target_cmsg_addr;
1260 struct target_cmsghdr *target_cmsg;
1261 socklen_t space = 0;
1263 msg_controllen = tswapal(target_msgh->msg_controllen);
1264 if (msg_controllen < sizeof (struct target_cmsghdr))
1265 goto the_end;
1266 target_cmsg_addr = tswapal(target_msgh->msg_control);
1267 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1268 if (!target_cmsg)
1269 return -TARGET_EFAULT;
1271 while (cmsg && target_cmsg) {
1272 void *data = CMSG_DATA(cmsg);
1273 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1275 int len = tswapal(target_cmsg->cmsg_len)
1276 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1278 space += CMSG_SPACE(len);
1279 if (space > msgh->msg_controllen) {
1280 space -= CMSG_SPACE(len);
1281 gemu_log("Host cmsg overflow\n");
1282 break;
1285 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1286 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1287 cmsg->cmsg_len = CMSG_LEN(len);
1289 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1290 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1291 memcpy(data, target_data, len);
1292 } else {
1293 int *fd = (int *)data;
1294 int *target_fd = (int *)target_data;
1295 int i, numfds = len / sizeof(int);
1297 for (i = 0; i < numfds; i++)
1298 fd[i] = tswap32(target_fd[i]);
1301 cmsg = CMSG_NXTHDR(msgh, cmsg);
1302 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1304 unlock_user(target_cmsg, target_cmsg_addr, 0);
1305 the_end:
1306 msgh->msg_controllen = space;
1307 return 0;
1310 /* ??? Should this also swap msgh->name? */
1311 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1312 struct msghdr *msgh)
1314 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1315 abi_long msg_controllen;
1316 abi_ulong target_cmsg_addr;
1317 struct target_cmsghdr *target_cmsg;
1318 socklen_t space = 0;
1320 msg_controllen = tswapal(target_msgh->msg_controllen);
1321 if (msg_controllen < sizeof (struct target_cmsghdr))
1322 goto the_end;
1323 target_cmsg_addr = tswapal(target_msgh->msg_control);
1324 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1325 if (!target_cmsg)
1326 return -TARGET_EFAULT;
1328 while (cmsg && target_cmsg) {
1329 void *data = CMSG_DATA(cmsg);
1330 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1332 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1334 space += TARGET_CMSG_SPACE(len);
1335 if (space > msg_controllen) {
1336 space -= TARGET_CMSG_SPACE(len);
1337 gemu_log("Target cmsg overflow\n");
1338 break;
1341 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1342 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1343 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1345 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1346 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1347 memcpy(target_data, data, len);
1348 } else {
1349 int *fd = (int *)data;
1350 int *target_fd = (int *)target_data;
1351 int i, numfds = len / sizeof(int);
1353 for (i = 0; i < numfds; i++)
1354 target_fd[i] = tswap32(fd[i]);
1357 cmsg = CMSG_NXTHDR(msgh, cmsg);
1358 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1360 unlock_user(target_cmsg, target_cmsg_addr, space);
1361 the_end:
1362 target_msgh->msg_controllen = tswapal(space);
1363 return 0;
1366 /* do_setsockopt() Must return target values and target errnos. */
1367 static abi_long do_setsockopt(int sockfd, int level, int optname,
1368 abi_ulong optval_addr, socklen_t optlen)
1370 abi_long ret;
1371 int val;
1372 struct ip_mreqn *ip_mreq;
1373 struct ip_mreq_source *ip_mreq_source;
1375 switch(level) {
1376 case SOL_TCP:
1377 /* TCP options all take an 'int' value. */
1378 if (optlen < sizeof(uint32_t))
1379 return -TARGET_EINVAL;
1381 if (get_user_u32(val, optval_addr))
1382 return -TARGET_EFAULT;
1383 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1384 break;
1385 case SOL_IP:
1386 switch(optname) {
1387 case IP_TOS:
1388 case IP_TTL:
1389 case IP_HDRINCL:
1390 case IP_ROUTER_ALERT:
1391 case IP_RECVOPTS:
1392 case IP_RETOPTS:
1393 case IP_PKTINFO:
1394 case IP_MTU_DISCOVER:
1395 case IP_RECVERR:
1396 case IP_RECVTOS:
1397 #ifdef IP_FREEBIND
1398 case IP_FREEBIND:
1399 #endif
1400 case IP_MULTICAST_TTL:
1401 case IP_MULTICAST_LOOP:
1402 val = 0;
1403 if (optlen >= sizeof(uint32_t)) {
1404 if (get_user_u32(val, optval_addr))
1405 return -TARGET_EFAULT;
1406 } else if (optlen >= 1) {
1407 if (get_user_u8(val, optval_addr))
1408 return -TARGET_EFAULT;
1410 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1411 break;
1412 case IP_ADD_MEMBERSHIP:
1413 case IP_DROP_MEMBERSHIP:
1414 if (optlen < sizeof (struct target_ip_mreq) ||
1415 optlen > sizeof (struct target_ip_mreqn))
1416 return -TARGET_EINVAL;
1418 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1419 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1420 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1421 break;
1423 case IP_BLOCK_SOURCE:
1424 case IP_UNBLOCK_SOURCE:
1425 case IP_ADD_SOURCE_MEMBERSHIP:
1426 case IP_DROP_SOURCE_MEMBERSHIP:
1427 if (optlen != sizeof (struct target_ip_mreq_source))
1428 return -TARGET_EINVAL;
1430 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1431 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1432 unlock_user (ip_mreq_source, optval_addr, 0);
1433 break;
1435 default:
1436 goto unimplemented;
1438 break;
1439 case TARGET_SOL_SOCKET:
1440 switch (optname) {
1441 /* Options with 'int' argument. */
1442 case TARGET_SO_DEBUG:
1443 optname = SO_DEBUG;
1444 break;
1445 case TARGET_SO_REUSEADDR:
1446 optname = SO_REUSEADDR;
1447 break;
1448 case TARGET_SO_TYPE:
1449 optname = SO_TYPE;
1450 break;
1451 case TARGET_SO_ERROR:
1452 optname = SO_ERROR;
1453 break;
1454 case TARGET_SO_DONTROUTE:
1455 optname = SO_DONTROUTE;
1456 break;
1457 case TARGET_SO_BROADCAST:
1458 optname = SO_BROADCAST;
1459 break;
1460 case TARGET_SO_SNDBUF:
1461 optname = SO_SNDBUF;
1462 break;
1463 case TARGET_SO_RCVBUF:
1464 optname = SO_RCVBUF;
1465 break;
1466 case TARGET_SO_KEEPALIVE:
1467 optname = SO_KEEPALIVE;
1468 break;
1469 case TARGET_SO_OOBINLINE:
1470 optname = SO_OOBINLINE;
1471 break;
1472 case TARGET_SO_NO_CHECK:
1473 optname = SO_NO_CHECK;
1474 break;
1475 case TARGET_SO_PRIORITY:
1476 optname = SO_PRIORITY;
1477 break;
1478 #ifdef SO_BSDCOMPAT
1479 case TARGET_SO_BSDCOMPAT:
1480 optname = SO_BSDCOMPAT;
1481 break;
1482 #endif
1483 case TARGET_SO_PASSCRED:
1484 optname = SO_PASSCRED;
1485 break;
1486 case TARGET_SO_TIMESTAMP:
1487 optname = SO_TIMESTAMP;
1488 break;
1489 case TARGET_SO_RCVLOWAT:
1490 optname = SO_RCVLOWAT;
1491 break;
1492 case TARGET_SO_RCVTIMEO:
1493 optname = SO_RCVTIMEO;
1494 break;
1495 case TARGET_SO_SNDTIMEO:
1496 optname = SO_SNDTIMEO;
1497 break;
1498 break;
1499 default:
1500 goto unimplemented;
1502 if (optlen < sizeof(uint32_t))
1503 return -TARGET_EINVAL;
1505 if (get_user_u32(val, optval_addr))
1506 return -TARGET_EFAULT;
1507 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1508 break;
1509 default:
1510 unimplemented:
1511 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1512 ret = -TARGET_ENOPROTOOPT;
1514 return ret;
1517 /* do_getsockopt() Must return target values and target errnos. */
1518 static abi_long do_getsockopt(int sockfd, int level, int optname,
1519 abi_ulong optval_addr, abi_ulong optlen)
1521 abi_long ret;
1522 int len, val;
1523 socklen_t lv;
1525 switch(level) {
1526 case TARGET_SOL_SOCKET:
1527 level = SOL_SOCKET;
1528 switch (optname) {
1529 /* These don't just return a single integer */
1530 case TARGET_SO_LINGER:
1531 case TARGET_SO_RCVTIMEO:
1532 case TARGET_SO_SNDTIMEO:
1533 case TARGET_SO_PEERCRED:
1534 case TARGET_SO_PEERNAME:
1535 goto unimplemented;
1536 /* Options with 'int' argument. */
1537 case TARGET_SO_DEBUG:
1538 optname = SO_DEBUG;
1539 goto int_case;
1540 case TARGET_SO_REUSEADDR:
1541 optname = SO_REUSEADDR;
1542 goto int_case;
1543 case TARGET_SO_TYPE:
1544 optname = SO_TYPE;
1545 goto int_case;
1546 case TARGET_SO_ERROR:
1547 optname = SO_ERROR;
1548 goto int_case;
1549 case TARGET_SO_DONTROUTE:
1550 optname = SO_DONTROUTE;
1551 goto int_case;
1552 case TARGET_SO_BROADCAST:
1553 optname = SO_BROADCAST;
1554 goto int_case;
1555 case TARGET_SO_SNDBUF:
1556 optname = SO_SNDBUF;
1557 goto int_case;
1558 case TARGET_SO_RCVBUF:
1559 optname = SO_RCVBUF;
1560 goto int_case;
1561 case TARGET_SO_KEEPALIVE:
1562 optname = SO_KEEPALIVE;
1563 goto int_case;
1564 case TARGET_SO_OOBINLINE:
1565 optname = SO_OOBINLINE;
1566 goto int_case;
1567 case TARGET_SO_NO_CHECK:
1568 optname = SO_NO_CHECK;
1569 goto int_case;
1570 case TARGET_SO_PRIORITY:
1571 optname = SO_PRIORITY;
1572 goto int_case;
1573 #ifdef SO_BSDCOMPAT
1574 case TARGET_SO_BSDCOMPAT:
1575 optname = SO_BSDCOMPAT;
1576 goto int_case;
1577 #endif
1578 case TARGET_SO_PASSCRED:
1579 optname = SO_PASSCRED;
1580 goto int_case;
1581 case TARGET_SO_TIMESTAMP:
1582 optname = SO_TIMESTAMP;
1583 goto int_case;
1584 case TARGET_SO_RCVLOWAT:
1585 optname = SO_RCVLOWAT;
1586 goto int_case;
1587 default:
1588 goto int_case;
1590 break;
1591 case SOL_TCP:
1592 /* TCP options all take an 'int' value. */
1593 int_case:
1594 if (get_user_u32(len, optlen))
1595 return -TARGET_EFAULT;
1596 if (len < 0)
1597 return -TARGET_EINVAL;
1598 lv = sizeof(lv);
1599 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1600 if (ret < 0)
1601 return ret;
1602 if (len > lv)
1603 len = lv;
1604 if (len == 4) {
1605 if (put_user_u32(val, optval_addr))
1606 return -TARGET_EFAULT;
1607 } else {
1608 if (put_user_u8(val, optval_addr))
1609 return -TARGET_EFAULT;
1611 if (put_user_u32(len, optlen))
1612 return -TARGET_EFAULT;
1613 break;
1614 case SOL_IP:
1615 switch(optname) {
1616 case IP_TOS:
1617 case IP_TTL:
1618 case IP_HDRINCL:
1619 case IP_ROUTER_ALERT:
1620 case IP_RECVOPTS:
1621 case IP_RETOPTS:
1622 case IP_PKTINFO:
1623 case IP_MTU_DISCOVER:
1624 case IP_RECVERR:
1625 case IP_RECVTOS:
1626 #ifdef IP_FREEBIND
1627 case IP_FREEBIND:
1628 #endif
1629 case IP_MULTICAST_TTL:
1630 case IP_MULTICAST_LOOP:
1631 if (get_user_u32(len, optlen))
1632 return -TARGET_EFAULT;
1633 if (len < 0)
1634 return -TARGET_EINVAL;
1635 lv = sizeof(lv);
1636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1637 if (ret < 0)
1638 return ret;
1639 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1640 len = 1;
1641 if (put_user_u32(len, optlen)
1642 || put_user_u8(val, optval_addr))
1643 return -TARGET_EFAULT;
1644 } else {
1645 if (len > sizeof(int))
1646 len = sizeof(int);
1647 if (put_user_u32(len, optlen)
1648 || put_user_u32(val, optval_addr))
1649 return -TARGET_EFAULT;
1651 break;
1652 default:
1653 ret = -TARGET_ENOPROTOOPT;
1654 break;
1656 break;
1657 default:
1658 unimplemented:
1659 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1660 level, optname);
1661 ret = -TARGET_EOPNOTSUPP;
1662 break;
1664 return ret;
1667 /* FIXME
1668 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1669 * other lock functions have a return code of 0 for failure.
1671 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1672 int count, int copy)
1674 struct target_iovec *target_vec;
1675 abi_ulong base;
1676 int i;
1678 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1679 if (!target_vec)
1680 return -TARGET_EFAULT;
1681 for(i = 0;i < count; i++) {
1682 base = tswapal(target_vec[i].iov_base);
1683 vec[i].iov_len = tswapal(target_vec[i].iov_len);
1684 if (vec[i].iov_len != 0) {
1685 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1686 /* Don't check lock_user return value. We must call writev even
1687 if a element has invalid base address. */
1688 } else {
1689 /* zero length pointer is ignored */
1690 vec[i].iov_base = NULL;
1693 unlock_user (target_vec, target_addr, 0);
1694 return 0;
1697 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1698 int count, int copy)
1700 struct target_iovec *target_vec;
1701 abi_ulong base;
1702 int i;
1704 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1705 if (!target_vec)
1706 return -TARGET_EFAULT;
1707 for(i = 0;i < count; i++) {
1708 if (target_vec[i].iov_base) {
1709 base = tswapal(target_vec[i].iov_base);
1710 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1713 unlock_user (target_vec, target_addr, 0);
1715 return 0;
1718 /* do_socket() Must return target values and target errnos. */
1719 static abi_long do_socket(int domain, int type, int protocol)
1721 #if defined(TARGET_MIPS)
1722 switch(type) {
1723 case TARGET_SOCK_DGRAM:
1724 type = SOCK_DGRAM;
1725 break;
1726 case TARGET_SOCK_STREAM:
1727 type = SOCK_STREAM;
1728 break;
1729 case TARGET_SOCK_RAW:
1730 type = SOCK_RAW;
1731 break;
1732 case TARGET_SOCK_RDM:
1733 type = SOCK_RDM;
1734 break;
1735 case TARGET_SOCK_SEQPACKET:
1736 type = SOCK_SEQPACKET;
1737 break;
1738 case TARGET_SOCK_PACKET:
1739 type = SOCK_PACKET;
1740 break;
1742 #endif
1743 if (domain == PF_NETLINK)
1744 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1745 return get_errno(socket(domain, type, protocol));
1748 /* do_bind() Must return target values and target errnos. */
1749 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1750 socklen_t addrlen)
1752 void *addr;
1753 abi_long ret;
1755 if ((int)addrlen < 0) {
1756 return -TARGET_EINVAL;
1759 addr = alloca(addrlen+1);
1761 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1762 if (ret)
1763 return ret;
1765 return get_errno(bind(sockfd, addr, addrlen));
1768 /* do_connect() Must return target values and target errnos. */
1769 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1770 socklen_t addrlen)
1772 void *addr;
1773 abi_long ret;
1775 if ((int)addrlen < 0) {
1776 return -TARGET_EINVAL;
1779 addr = alloca(addrlen);
1781 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1782 if (ret)
1783 return ret;
1785 return get_errno(connect(sockfd, addr, addrlen));
1788 /* do_sendrecvmsg() Must return target values and target errnos. */
1789 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1790 int flags, int send)
1792 abi_long ret, len;
1793 struct target_msghdr *msgp;
1794 struct msghdr msg;
1795 int count;
1796 struct iovec *vec;
1797 abi_ulong target_vec;
1799 /* FIXME */
1800 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1801 msgp,
1802 target_msg,
1803 send ? 1 : 0))
1804 return -TARGET_EFAULT;
1805 if (msgp->msg_name) {
1806 msg.msg_namelen = tswap32(msgp->msg_namelen);
1807 msg.msg_name = alloca(msg.msg_namelen);
1808 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1809 msg.msg_namelen);
1810 if (ret) {
1811 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1812 return ret;
1814 } else {
1815 msg.msg_name = NULL;
1816 msg.msg_namelen = 0;
1818 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1819 msg.msg_control = alloca(msg.msg_controllen);
1820 msg.msg_flags = tswap32(msgp->msg_flags);
1822 count = tswapal(msgp->msg_iovlen);
1823 vec = alloca(count * sizeof(struct iovec));
1824 target_vec = tswapal(msgp->msg_iov);
1825 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1826 msg.msg_iovlen = count;
1827 msg.msg_iov = vec;
1829 if (send) {
1830 ret = target_to_host_cmsg(&msg, msgp);
1831 if (ret == 0)
1832 ret = get_errno(sendmsg(fd, &msg, flags));
1833 } else {
1834 ret = get_errno(recvmsg(fd, &msg, flags));
1835 if (!is_error(ret)) {
1836 len = ret;
1837 ret = host_to_target_cmsg(msgp, &msg);
1838 if (!is_error(ret))
1839 ret = len;
1842 unlock_iovec(vec, target_vec, count, !send);
1843 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1844 return ret;
1847 /* do_accept() Must return target values and target errnos. */
1848 static abi_long do_accept(int fd, abi_ulong target_addr,
1849 abi_ulong target_addrlen_addr)
1851 socklen_t addrlen;
1852 void *addr;
1853 abi_long ret;
1855 if (target_addr == 0)
1856 return get_errno(accept(fd, NULL, NULL));
1858 /* linux returns EINVAL if addrlen pointer is invalid */
1859 if (get_user_u32(addrlen, target_addrlen_addr))
1860 return -TARGET_EINVAL;
1862 if ((int)addrlen < 0) {
1863 return -TARGET_EINVAL;
1866 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1867 return -TARGET_EINVAL;
1869 addr = alloca(addrlen);
1871 ret = get_errno(accept(fd, addr, &addrlen));
1872 if (!is_error(ret)) {
1873 host_to_target_sockaddr(target_addr, addr, addrlen);
1874 if (put_user_u32(addrlen, target_addrlen_addr))
1875 ret = -TARGET_EFAULT;
1877 return ret;
1880 /* do_getpeername() Must return target values and target errnos. */
1881 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1882 abi_ulong target_addrlen_addr)
1884 socklen_t addrlen;
1885 void *addr;
1886 abi_long ret;
1888 if (get_user_u32(addrlen, target_addrlen_addr))
1889 return -TARGET_EFAULT;
1891 if ((int)addrlen < 0) {
1892 return -TARGET_EINVAL;
1895 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1896 return -TARGET_EFAULT;
1898 addr = alloca(addrlen);
1900 ret = get_errno(getpeername(fd, addr, &addrlen));
1901 if (!is_error(ret)) {
1902 host_to_target_sockaddr(target_addr, addr, addrlen);
1903 if (put_user_u32(addrlen, target_addrlen_addr))
1904 ret = -TARGET_EFAULT;
1906 return ret;
1909 /* do_getsockname() Must return target values and target errnos. */
1910 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1911 abi_ulong target_addrlen_addr)
1913 socklen_t addrlen;
1914 void *addr;
1915 abi_long ret;
1917 if (get_user_u32(addrlen, target_addrlen_addr))
1918 return -TARGET_EFAULT;
1920 if ((int)addrlen < 0) {
1921 return -TARGET_EINVAL;
1924 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1925 return -TARGET_EFAULT;
1927 addr = alloca(addrlen);
1929 ret = get_errno(getsockname(fd, addr, &addrlen));
1930 if (!is_error(ret)) {
1931 host_to_target_sockaddr(target_addr, addr, addrlen);
1932 if (put_user_u32(addrlen, target_addrlen_addr))
1933 ret = -TARGET_EFAULT;
1935 return ret;
1938 /* do_socketpair() Must return target values and target errnos. */
1939 static abi_long do_socketpair(int domain, int type, int protocol,
1940 abi_ulong target_tab_addr)
1942 int tab[2];
1943 abi_long ret;
1945 ret = get_errno(socketpair(domain, type, protocol, tab));
1946 if (!is_error(ret)) {
1947 if (put_user_s32(tab[0], target_tab_addr)
1948 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1949 ret = -TARGET_EFAULT;
1951 return ret;
1954 /* do_sendto() Must return target values and target errnos. */
1955 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1956 abi_ulong target_addr, socklen_t addrlen)
1958 void *addr;
1959 void *host_msg;
1960 abi_long ret;
1962 if ((int)addrlen < 0) {
1963 return -TARGET_EINVAL;
1966 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1967 if (!host_msg)
1968 return -TARGET_EFAULT;
1969 if (target_addr) {
1970 addr = alloca(addrlen);
1971 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1972 if (ret) {
1973 unlock_user(host_msg, msg, 0);
1974 return ret;
1976 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1977 } else {
1978 ret = get_errno(send(fd, host_msg, len, flags));
1980 unlock_user(host_msg, msg, 0);
1981 return ret;
1984 /* do_recvfrom() Must return target values and target errnos. */
1985 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1986 abi_ulong target_addr,
1987 abi_ulong target_addrlen)
1989 socklen_t addrlen;
1990 void *addr;
1991 void *host_msg;
1992 abi_long ret;
1994 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1995 if (!host_msg)
1996 return -TARGET_EFAULT;
1997 if (target_addr) {
1998 if (get_user_u32(addrlen, target_addrlen)) {
1999 ret = -TARGET_EFAULT;
2000 goto fail;
2002 if ((int)addrlen < 0) {
2003 ret = -TARGET_EINVAL;
2004 goto fail;
2006 addr = alloca(addrlen);
2007 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2008 } else {
2009 addr = NULL; /* To keep compiler quiet. */
2010 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2012 if (!is_error(ret)) {
2013 if (target_addr) {
2014 host_to_target_sockaddr(target_addr, addr, addrlen);
2015 if (put_user_u32(addrlen, target_addrlen)) {
2016 ret = -TARGET_EFAULT;
2017 goto fail;
2020 unlock_user(host_msg, msg, len);
2021 } else {
2022 fail:
2023 unlock_user(host_msg, msg, 0);
2025 return ret;
2028 #ifdef TARGET_NR_socketcall
2029 /* do_socketcall() Must return target values and target errnos. */
2030 static abi_long do_socketcall(int num, abi_ulong vptr)
2032 abi_long ret;
2033 const int n = sizeof(abi_ulong);
2035 switch(num) {
2036 case SOCKOP_socket:
2038 abi_ulong domain, type, protocol;
2040 if (get_user_ual(domain, vptr)
2041 || get_user_ual(type, vptr + n)
2042 || get_user_ual(protocol, vptr + 2 * n))
2043 return -TARGET_EFAULT;
2045 ret = do_socket(domain, type, protocol);
2047 break;
2048 case SOCKOP_bind:
2050 abi_ulong sockfd;
2051 abi_ulong target_addr;
2052 socklen_t addrlen;
2054 if (get_user_ual(sockfd, vptr)
2055 || get_user_ual(target_addr, vptr + n)
2056 || get_user_ual(addrlen, vptr + 2 * n))
2057 return -TARGET_EFAULT;
2059 ret = do_bind(sockfd, target_addr, addrlen);
2061 break;
2062 case SOCKOP_connect:
2064 abi_ulong sockfd;
2065 abi_ulong target_addr;
2066 socklen_t addrlen;
2068 if (get_user_ual(sockfd, vptr)
2069 || get_user_ual(target_addr, vptr + n)
2070 || get_user_ual(addrlen, vptr + 2 * n))
2071 return -TARGET_EFAULT;
2073 ret = do_connect(sockfd, target_addr, addrlen);
2075 break;
2076 case SOCKOP_listen:
2078 abi_ulong sockfd, backlog;
2080 if (get_user_ual(sockfd, vptr)
2081 || get_user_ual(backlog, vptr + n))
2082 return -TARGET_EFAULT;
2084 ret = get_errno(listen(sockfd, backlog));
2086 break;
2087 case SOCKOP_accept:
2089 abi_ulong sockfd;
2090 abi_ulong target_addr, target_addrlen;
2092 if (get_user_ual(sockfd, vptr)
2093 || get_user_ual(target_addr, vptr + n)
2094 || get_user_ual(target_addrlen, vptr + 2 * n))
2095 return -TARGET_EFAULT;
2097 ret = do_accept(sockfd, target_addr, target_addrlen);
2099 break;
2100 case SOCKOP_getsockname:
2102 abi_ulong sockfd;
2103 abi_ulong target_addr, target_addrlen;
2105 if (get_user_ual(sockfd, vptr)
2106 || get_user_ual(target_addr, vptr + n)
2107 || get_user_ual(target_addrlen, vptr + 2 * n))
2108 return -TARGET_EFAULT;
2110 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2112 break;
2113 case SOCKOP_getpeername:
2115 abi_ulong sockfd;
2116 abi_ulong target_addr, target_addrlen;
2118 if (get_user_ual(sockfd, vptr)
2119 || get_user_ual(target_addr, vptr + n)
2120 || get_user_ual(target_addrlen, vptr + 2 * n))
2121 return -TARGET_EFAULT;
2123 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2125 break;
2126 case SOCKOP_socketpair:
2128 abi_ulong domain, type, protocol;
2129 abi_ulong tab;
2131 if (get_user_ual(domain, vptr)
2132 || get_user_ual(type, vptr + n)
2133 || get_user_ual(protocol, vptr + 2 * n)
2134 || get_user_ual(tab, vptr + 3 * n))
2135 return -TARGET_EFAULT;
2137 ret = do_socketpair(domain, type, protocol, tab);
2139 break;
2140 case SOCKOP_send:
2142 abi_ulong sockfd;
2143 abi_ulong msg;
2144 size_t len;
2145 abi_ulong flags;
2147 if (get_user_ual(sockfd, vptr)
2148 || get_user_ual(msg, vptr + n)
2149 || get_user_ual(len, vptr + 2 * n)
2150 || get_user_ual(flags, vptr + 3 * n))
2151 return -TARGET_EFAULT;
2153 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2155 break;
2156 case SOCKOP_recv:
2158 abi_ulong sockfd;
2159 abi_ulong msg;
2160 size_t len;
2161 abi_ulong flags;
2163 if (get_user_ual(sockfd, vptr)
2164 || get_user_ual(msg, vptr + n)
2165 || get_user_ual(len, vptr + 2 * n)
2166 || get_user_ual(flags, vptr + 3 * n))
2167 return -TARGET_EFAULT;
2169 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2171 break;
2172 case SOCKOP_sendto:
2174 abi_ulong sockfd;
2175 abi_ulong msg;
2176 size_t len;
2177 abi_ulong flags;
2178 abi_ulong addr;
2179 socklen_t addrlen;
2181 if (get_user_ual(sockfd, vptr)
2182 || get_user_ual(msg, vptr + n)
2183 || get_user_ual(len, vptr + 2 * n)
2184 || get_user_ual(flags, vptr + 3 * n)
2185 || get_user_ual(addr, vptr + 4 * n)
2186 || get_user_ual(addrlen, vptr + 5 * n))
2187 return -TARGET_EFAULT;
2189 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2191 break;
2192 case SOCKOP_recvfrom:
2194 abi_ulong sockfd;
2195 abi_ulong msg;
2196 size_t len;
2197 abi_ulong flags;
2198 abi_ulong addr;
2199 socklen_t addrlen;
2201 if (get_user_ual(sockfd, vptr)
2202 || get_user_ual(msg, vptr + n)
2203 || get_user_ual(len, vptr + 2 * n)
2204 || get_user_ual(flags, vptr + 3 * n)
2205 || get_user_ual(addr, vptr + 4 * n)
2206 || get_user_ual(addrlen, vptr + 5 * n))
2207 return -TARGET_EFAULT;
2209 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2211 break;
2212 case SOCKOP_shutdown:
2214 abi_ulong sockfd, how;
2216 if (get_user_ual(sockfd, vptr)
2217 || get_user_ual(how, vptr + n))
2218 return -TARGET_EFAULT;
2220 ret = get_errno(shutdown(sockfd, how));
2222 break;
2223 case SOCKOP_sendmsg:
2224 case SOCKOP_recvmsg:
2226 abi_ulong fd;
2227 abi_ulong target_msg;
2228 abi_ulong flags;
2230 if (get_user_ual(fd, vptr)
2231 || get_user_ual(target_msg, vptr + n)
2232 || get_user_ual(flags, vptr + 2 * n))
2233 return -TARGET_EFAULT;
2235 ret = do_sendrecvmsg(fd, target_msg, flags,
2236 (num == SOCKOP_sendmsg));
2238 break;
2239 case SOCKOP_setsockopt:
2241 abi_ulong sockfd;
2242 abi_ulong level;
2243 abi_ulong optname;
2244 abi_ulong optval;
2245 socklen_t optlen;
2247 if (get_user_ual(sockfd, vptr)
2248 || get_user_ual(level, vptr + n)
2249 || get_user_ual(optname, vptr + 2 * n)
2250 || get_user_ual(optval, vptr + 3 * n)
2251 || get_user_ual(optlen, vptr + 4 * n))
2252 return -TARGET_EFAULT;
2254 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2256 break;
2257 case SOCKOP_getsockopt:
2259 abi_ulong sockfd;
2260 abi_ulong level;
2261 abi_ulong optname;
2262 abi_ulong optval;
2263 socklen_t optlen;
2265 if (get_user_ual(sockfd, vptr)
2266 || get_user_ual(level, vptr + n)
2267 || get_user_ual(optname, vptr + 2 * n)
2268 || get_user_ual(optval, vptr + 3 * n)
2269 || get_user_ual(optlen, vptr + 4 * n))
2270 return -TARGET_EFAULT;
2272 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2274 break;
2275 default:
2276 gemu_log("Unsupported socketcall: %d\n", num);
2277 ret = -TARGET_ENOSYS;
2278 break;
2280 return ret;
2282 #endif
2284 #define N_SHM_REGIONS 32
2286 static struct shm_region {
2287 abi_ulong start;
2288 abi_ulong size;
2289 } shm_regions[N_SHM_REGIONS];
2291 struct target_ipc_perm
2293 abi_long __key;
2294 abi_ulong uid;
2295 abi_ulong gid;
2296 abi_ulong cuid;
2297 abi_ulong cgid;
2298 unsigned short int mode;
2299 unsigned short int __pad1;
2300 unsigned short int __seq;
2301 unsigned short int __pad2;
2302 abi_ulong __unused1;
2303 abi_ulong __unused2;
2306 struct target_semid_ds
2308 struct target_ipc_perm sem_perm;
2309 abi_ulong sem_otime;
2310 abi_ulong __unused1;
2311 abi_ulong sem_ctime;
2312 abi_ulong __unused2;
2313 abi_ulong sem_nsems;
2314 abi_ulong __unused3;
2315 abi_ulong __unused4;
2318 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2319 abi_ulong target_addr)
2321 struct target_ipc_perm *target_ip;
2322 struct target_semid_ds *target_sd;
2324 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2325 return -TARGET_EFAULT;
2326 target_ip = &(target_sd->sem_perm);
2327 host_ip->__key = tswapal(target_ip->__key);
2328 host_ip->uid = tswapal(target_ip->uid);
2329 host_ip->gid = tswapal(target_ip->gid);
2330 host_ip->cuid = tswapal(target_ip->cuid);
2331 host_ip->cgid = tswapal(target_ip->cgid);
2332 host_ip->mode = tswap16(target_ip->mode);
2333 unlock_user_struct(target_sd, target_addr, 0);
2334 return 0;
2337 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2338 struct ipc_perm *host_ip)
2340 struct target_ipc_perm *target_ip;
2341 struct target_semid_ds *target_sd;
2343 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2344 return -TARGET_EFAULT;
2345 target_ip = &(target_sd->sem_perm);
2346 target_ip->__key = tswapal(host_ip->__key);
2347 target_ip->uid = tswapal(host_ip->uid);
2348 target_ip->gid = tswapal(host_ip->gid);
2349 target_ip->cuid = tswapal(host_ip->cuid);
2350 target_ip->cgid = tswapal(host_ip->cgid);
2351 target_ip->mode = tswap16(host_ip->mode);
2352 unlock_user_struct(target_sd, target_addr, 1);
2353 return 0;
2356 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2357 abi_ulong target_addr)
2359 struct target_semid_ds *target_sd;
2361 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2362 return -TARGET_EFAULT;
2363 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2364 return -TARGET_EFAULT;
2365 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2366 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2367 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2368 unlock_user_struct(target_sd, target_addr, 0);
2369 return 0;
2372 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2373 struct semid_ds *host_sd)
2375 struct target_semid_ds *target_sd;
2377 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2378 return -TARGET_EFAULT;
2379 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2380 return -TARGET_EFAULT;
2381 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2382 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2383 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2384 unlock_user_struct(target_sd, target_addr, 1);
2385 return 0;
2388 struct target_seminfo {
2389 int semmap;
2390 int semmni;
2391 int semmns;
2392 int semmnu;
2393 int semmsl;
2394 int semopm;
2395 int semume;
2396 int semusz;
2397 int semvmx;
2398 int semaem;
2401 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2402 struct seminfo *host_seminfo)
2404 struct target_seminfo *target_seminfo;
2405 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2406 return -TARGET_EFAULT;
2407 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2408 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2409 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2410 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2411 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2412 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2413 __put_user(host_seminfo->semume, &target_seminfo->semume);
2414 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2415 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2416 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2417 unlock_user_struct(target_seminfo, target_addr, 1);
2418 return 0;
2421 union semun {
2422 int val;
2423 struct semid_ds *buf;
2424 unsigned short *array;
2425 struct seminfo *__buf;
2428 union target_semun {
2429 int val;
2430 abi_ulong buf;
2431 abi_ulong array;
2432 abi_ulong __buf;
2435 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2436 abi_ulong target_addr)
2438 int nsems;
2439 unsigned short *array;
2440 union semun semun;
2441 struct semid_ds semid_ds;
2442 int i, ret;
2444 semun.buf = &semid_ds;
2446 ret = semctl(semid, 0, IPC_STAT, semun);
2447 if (ret == -1)
2448 return get_errno(ret);
2450 nsems = semid_ds.sem_nsems;
2452 *host_array = malloc(nsems*sizeof(unsigned short));
2453 array = lock_user(VERIFY_READ, target_addr,
2454 nsems*sizeof(unsigned short), 1);
2455 if (!array)
2456 return -TARGET_EFAULT;
2458 for(i=0; i<nsems; i++) {
2459 __get_user((*host_array)[i], &array[i]);
2461 unlock_user(array, target_addr, 0);
2463 return 0;
2466 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2467 unsigned short **host_array)
2469 int nsems;
2470 unsigned short *array;
2471 union semun semun;
2472 struct semid_ds semid_ds;
2473 int i, ret;
2475 semun.buf = &semid_ds;
2477 ret = semctl(semid, 0, IPC_STAT, semun);
2478 if (ret == -1)
2479 return get_errno(ret);
2481 nsems = semid_ds.sem_nsems;
2483 array = lock_user(VERIFY_WRITE, target_addr,
2484 nsems*sizeof(unsigned short), 0);
2485 if (!array)
2486 return -TARGET_EFAULT;
2488 for(i=0; i<nsems; i++) {
2489 __put_user((*host_array)[i], &array[i]);
2491 free(*host_array);
2492 unlock_user(array, target_addr, 1);
2494 return 0;
2497 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2498 union target_semun target_su)
2500 union semun arg;
2501 struct semid_ds dsarg;
2502 unsigned short *array = NULL;
2503 struct seminfo seminfo;
2504 abi_long ret = -TARGET_EINVAL;
2505 abi_long err;
2506 cmd &= 0xff;
2508 switch( cmd ) {
2509 case GETVAL:
2510 case SETVAL:
2511 arg.val = tswap32(target_su.val);
2512 ret = get_errno(semctl(semid, semnum, cmd, arg));
2513 target_su.val = tswap32(arg.val);
2514 break;
2515 case GETALL:
2516 case SETALL:
2517 err = target_to_host_semarray(semid, &array, target_su.array);
2518 if (err)
2519 return err;
2520 arg.array = array;
2521 ret = get_errno(semctl(semid, semnum, cmd, arg));
2522 err = host_to_target_semarray(semid, target_su.array, &array);
2523 if (err)
2524 return err;
2525 break;
2526 case IPC_STAT:
2527 case IPC_SET:
2528 case SEM_STAT:
2529 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2530 if (err)
2531 return err;
2532 arg.buf = &dsarg;
2533 ret = get_errno(semctl(semid, semnum, cmd, arg));
2534 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2535 if (err)
2536 return err;
2537 break;
2538 case IPC_INFO:
2539 case SEM_INFO:
2540 arg.__buf = &seminfo;
2541 ret = get_errno(semctl(semid, semnum, cmd, arg));
2542 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2543 if (err)
2544 return err;
2545 break;
2546 case IPC_RMID:
2547 case GETPID:
2548 case GETNCNT:
2549 case GETZCNT:
2550 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2551 break;
2554 return ret;
2557 struct target_sembuf {
2558 unsigned short sem_num;
2559 short sem_op;
2560 short sem_flg;
2563 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2564 abi_ulong target_addr,
2565 unsigned nsops)
2567 struct target_sembuf *target_sembuf;
2568 int i;
2570 target_sembuf = lock_user(VERIFY_READ, target_addr,
2571 nsops*sizeof(struct target_sembuf), 1);
2572 if (!target_sembuf)
2573 return -TARGET_EFAULT;
2575 for(i=0; i<nsops; i++) {
2576 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2577 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2578 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2581 unlock_user(target_sembuf, target_addr, 0);
2583 return 0;
2586 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2588 struct sembuf sops[nsops];
2590 if (target_to_host_sembuf(sops, ptr, nsops))
2591 return -TARGET_EFAULT;
2593 return semop(semid, sops, nsops);
2596 struct target_msqid_ds
2598 struct target_ipc_perm msg_perm;
2599 abi_ulong msg_stime;
2600 #if TARGET_ABI_BITS == 32
2601 abi_ulong __unused1;
2602 #endif
2603 abi_ulong msg_rtime;
2604 #if TARGET_ABI_BITS == 32
2605 abi_ulong __unused2;
2606 #endif
2607 abi_ulong msg_ctime;
2608 #if TARGET_ABI_BITS == 32
2609 abi_ulong __unused3;
2610 #endif
2611 abi_ulong __msg_cbytes;
2612 abi_ulong msg_qnum;
2613 abi_ulong msg_qbytes;
2614 abi_ulong msg_lspid;
2615 abi_ulong msg_lrpid;
2616 abi_ulong __unused4;
2617 abi_ulong __unused5;
2620 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2621 abi_ulong target_addr)
2623 struct target_msqid_ds *target_md;
2625 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2626 return -TARGET_EFAULT;
2627 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2628 return -TARGET_EFAULT;
2629 host_md->msg_stime = tswapal(target_md->msg_stime);
2630 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2631 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2632 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2633 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2634 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2635 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2636 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2637 unlock_user_struct(target_md, target_addr, 0);
2638 return 0;
2641 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2642 struct msqid_ds *host_md)
2644 struct target_msqid_ds *target_md;
2646 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2647 return -TARGET_EFAULT;
2648 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2649 return -TARGET_EFAULT;
2650 target_md->msg_stime = tswapal(host_md->msg_stime);
2651 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2652 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2653 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2654 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2655 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2656 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2657 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2658 unlock_user_struct(target_md, target_addr, 1);
2659 return 0;
2662 struct target_msginfo {
2663 int msgpool;
2664 int msgmap;
2665 int msgmax;
2666 int msgmnb;
2667 int msgmni;
2668 int msgssz;
2669 int msgtql;
2670 unsigned short int msgseg;
2673 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2674 struct msginfo *host_msginfo)
2676 struct target_msginfo *target_msginfo;
2677 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2678 return -TARGET_EFAULT;
2679 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2680 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2681 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2682 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2683 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2684 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2685 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2686 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2687 unlock_user_struct(target_msginfo, target_addr, 1);
2688 return 0;
2691 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2693 struct msqid_ds dsarg;
2694 struct msginfo msginfo;
2695 abi_long ret = -TARGET_EINVAL;
2697 cmd &= 0xff;
2699 switch (cmd) {
2700 case IPC_STAT:
2701 case IPC_SET:
2702 case MSG_STAT:
2703 if (target_to_host_msqid_ds(&dsarg,ptr))
2704 return -TARGET_EFAULT;
2705 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2706 if (host_to_target_msqid_ds(ptr,&dsarg))
2707 return -TARGET_EFAULT;
2708 break;
2709 case IPC_RMID:
2710 ret = get_errno(msgctl(msgid, cmd, NULL));
2711 break;
2712 case IPC_INFO:
2713 case MSG_INFO:
2714 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2715 if (host_to_target_msginfo(ptr, &msginfo))
2716 return -TARGET_EFAULT;
2717 break;
2720 return ret;
2723 struct target_msgbuf {
2724 abi_long mtype;
2725 char mtext[1];
2728 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2729 unsigned int msgsz, int msgflg)
2731 struct target_msgbuf *target_mb;
2732 struct msgbuf *host_mb;
2733 abi_long ret = 0;
2735 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2736 return -TARGET_EFAULT;
2737 host_mb = malloc(msgsz+sizeof(long));
2738 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2739 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2740 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2741 free(host_mb);
2742 unlock_user_struct(target_mb, msgp, 0);
2744 return ret;
2747 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2748 unsigned int msgsz, abi_long msgtyp,
2749 int msgflg)
2751 struct target_msgbuf *target_mb;
2752 char *target_mtext;
2753 struct msgbuf *host_mb;
2754 abi_long ret = 0;
2756 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2757 return -TARGET_EFAULT;
2759 host_mb = malloc(msgsz+sizeof(long));
2760 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2762 if (ret > 0) {
2763 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2764 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2765 if (!target_mtext) {
2766 ret = -TARGET_EFAULT;
2767 goto end;
2769 memcpy(target_mb->mtext, host_mb->mtext, ret);
2770 unlock_user(target_mtext, target_mtext_addr, ret);
2773 target_mb->mtype = tswapal(host_mb->mtype);
2774 free(host_mb);
2776 end:
2777 if (target_mb)
2778 unlock_user_struct(target_mb, msgp, 1);
2779 return ret;
2782 struct target_shmid_ds
2784 struct target_ipc_perm shm_perm;
2785 abi_ulong shm_segsz;
2786 abi_ulong shm_atime;
2787 #if TARGET_ABI_BITS == 32
2788 abi_ulong __unused1;
2789 #endif
2790 abi_ulong shm_dtime;
2791 #if TARGET_ABI_BITS == 32
2792 abi_ulong __unused2;
2793 #endif
2794 abi_ulong shm_ctime;
2795 #if TARGET_ABI_BITS == 32
2796 abi_ulong __unused3;
2797 #endif
2798 int shm_cpid;
2799 int shm_lpid;
2800 abi_ulong shm_nattch;
2801 unsigned long int __unused4;
2802 unsigned long int __unused5;
2805 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2806 abi_ulong target_addr)
2808 struct target_shmid_ds *target_sd;
2810 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2811 return -TARGET_EFAULT;
2812 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2813 return -TARGET_EFAULT;
2814 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2815 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2816 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2817 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2818 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2819 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2820 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2821 unlock_user_struct(target_sd, target_addr, 0);
2822 return 0;
2825 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2826 struct shmid_ds *host_sd)
2828 struct target_shmid_ds *target_sd;
2830 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2831 return -TARGET_EFAULT;
2832 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2833 return -TARGET_EFAULT;
2834 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2835 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2836 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2837 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2838 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2839 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2840 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2841 unlock_user_struct(target_sd, target_addr, 1);
2842 return 0;
2845 struct target_shminfo {
2846 abi_ulong shmmax;
2847 abi_ulong shmmin;
2848 abi_ulong shmmni;
2849 abi_ulong shmseg;
2850 abi_ulong shmall;
2853 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2854 struct shminfo *host_shminfo)
2856 struct target_shminfo *target_shminfo;
2857 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2858 return -TARGET_EFAULT;
2859 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2860 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2861 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2862 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2863 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2864 unlock_user_struct(target_shminfo, target_addr, 1);
2865 return 0;
2868 struct target_shm_info {
2869 int used_ids;
2870 abi_ulong shm_tot;
2871 abi_ulong shm_rss;
2872 abi_ulong shm_swp;
2873 abi_ulong swap_attempts;
2874 abi_ulong swap_successes;
2877 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2878 struct shm_info *host_shm_info)
2880 struct target_shm_info *target_shm_info;
2881 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2882 return -TARGET_EFAULT;
2883 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2884 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2885 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2886 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2887 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2888 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2889 unlock_user_struct(target_shm_info, target_addr, 1);
2890 return 0;
2893 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2895 struct shmid_ds dsarg;
2896 struct shminfo shminfo;
2897 struct shm_info shm_info;
2898 abi_long ret = -TARGET_EINVAL;
2900 cmd &= 0xff;
2902 switch(cmd) {
2903 case IPC_STAT:
2904 case IPC_SET:
2905 case SHM_STAT:
2906 if (target_to_host_shmid_ds(&dsarg, buf))
2907 return -TARGET_EFAULT;
2908 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2909 if (host_to_target_shmid_ds(buf, &dsarg))
2910 return -TARGET_EFAULT;
2911 break;
2912 case IPC_INFO:
2913 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2914 if (host_to_target_shminfo(buf, &shminfo))
2915 return -TARGET_EFAULT;
2916 break;
2917 case SHM_INFO:
2918 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2919 if (host_to_target_shm_info(buf, &shm_info))
2920 return -TARGET_EFAULT;
2921 break;
2922 case IPC_RMID:
2923 case SHM_LOCK:
2924 case SHM_UNLOCK:
2925 ret = get_errno(shmctl(shmid, cmd, NULL));
2926 break;
2929 return ret;
2932 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2934 abi_long raddr;
2935 void *host_raddr;
2936 struct shmid_ds shm_info;
2937 int i,ret;
2939 /* find out the length of the shared memory segment */
2940 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2941 if (is_error(ret)) {
2942 /* can't get length, bail out */
2943 return ret;
2946 mmap_lock();
2948 if (shmaddr)
2949 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2950 else {
2951 abi_ulong mmap_start;
2953 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2955 if (mmap_start == -1) {
2956 errno = ENOMEM;
2957 host_raddr = (void *)-1;
2958 } else
2959 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2962 if (host_raddr == (void *)-1) {
2963 mmap_unlock();
2964 return get_errno((long)host_raddr);
2966 raddr=h2g((unsigned long)host_raddr);
2968 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2969 PAGE_VALID | PAGE_READ |
2970 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2972 for (i = 0; i < N_SHM_REGIONS; i++) {
2973 if (shm_regions[i].start == 0) {
2974 shm_regions[i].start = raddr;
2975 shm_regions[i].size = shm_info.shm_segsz;
2976 break;
2980 mmap_unlock();
2981 return raddr;
2985 static inline abi_long do_shmdt(abi_ulong shmaddr)
2987 int i;
2989 for (i = 0; i < N_SHM_REGIONS; ++i) {
2990 if (shm_regions[i].start == shmaddr) {
2991 shm_regions[i].start = 0;
2992 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2993 break;
2997 return get_errno(shmdt(g2h(shmaddr)));
3000 #ifdef TARGET_NR_ipc
3001 /* ??? This only works with linear mappings. */
3002 /* do_ipc() must return target values and target errnos. */
3003 static abi_long do_ipc(unsigned int call, int first,
3004 int second, int third,
3005 abi_long ptr, abi_long fifth)
3007 int version;
3008 abi_long ret = 0;
3010 version = call >> 16;
3011 call &= 0xffff;
3013 switch (call) {
3014 case IPCOP_semop:
3015 ret = do_semop(first, ptr, second);
3016 break;
3018 case IPCOP_semget:
3019 ret = get_errno(semget(first, second, third));
3020 break;
3022 case IPCOP_semctl:
3023 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3024 break;
3026 case IPCOP_msgget:
3027 ret = get_errno(msgget(first, second));
3028 break;
3030 case IPCOP_msgsnd:
3031 ret = do_msgsnd(first, ptr, second, third);
3032 break;
3034 case IPCOP_msgctl:
3035 ret = do_msgctl(first, second, ptr);
3036 break;
3038 case IPCOP_msgrcv:
3039 switch (version) {
3040 case 0:
3042 struct target_ipc_kludge {
3043 abi_long msgp;
3044 abi_long msgtyp;
3045 } *tmp;
3047 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3048 ret = -TARGET_EFAULT;
3049 break;
3052 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3054 unlock_user_struct(tmp, ptr, 0);
3055 break;
3057 default:
3058 ret = do_msgrcv(first, ptr, second, fifth, third);
3060 break;
3062 case IPCOP_shmat:
3063 switch (version) {
3064 default:
3066 abi_ulong raddr;
3067 raddr = do_shmat(first, ptr, second);
3068 if (is_error(raddr))
3069 return get_errno(raddr);
3070 if (put_user_ual(raddr, third))
3071 return -TARGET_EFAULT;
3072 break;
3074 case 1:
3075 ret = -TARGET_EINVAL;
3076 break;
3078 break;
3079 case IPCOP_shmdt:
3080 ret = do_shmdt(ptr);
3081 break;
3083 case IPCOP_shmget:
3084 /* IPC_* flag values are the same on all linux platforms */
3085 ret = get_errno(shmget(first, second, third));
3086 break;
3088 /* IPC_* and SHM_* command values are the same on all linux platforms */
3089 case IPCOP_shmctl:
3090 ret = do_shmctl(first, second, third);
3091 break;
3092 default:
3093 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3094 ret = -TARGET_ENOSYS;
3095 break;
3097 return ret;
3099 #endif
3101 /* kernel structure types definitions */
3103 #define STRUCT(name, ...) STRUCT_ ## name,
3104 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3105 enum {
3106 #include "syscall_types.h"
3108 #undef STRUCT
3109 #undef STRUCT_SPECIAL
3111 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3112 #define STRUCT_SPECIAL(name)
3113 #include "syscall_types.h"
3114 #undef STRUCT
3115 #undef STRUCT_SPECIAL
3117 typedef struct IOCTLEntry IOCTLEntry;
3119 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3120 int fd, abi_long cmd, abi_long arg);
3122 struct IOCTLEntry {
3123 unsigned int target_cmd;
3124 unsigned int host_cmd;
3125 const char *name;
3126 int access;
3127 do_ioctl_fn *do_ioctl;
3128 const argtype arg_type[5];
3131 #define IOC_R 0x0001
3132 #define IOC_W 0x0002
3133 #define IOC_RW (IOC_R | IOC_W)
3135 #define MAX_STRUCT_SIZE 4096
3137 #ifdef CONFIG_FIEMAP
3138 /* So fiemap access checks don't overflow on 32 bit systems.
3139 * This is very slightly smaller than the limit imposed by
3140 * the underlying kernel.
3142 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3143 / sizeof(struct fiemap_extent))
3145 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3146 int fd, abi_long cmd, abi_long arg)
3148 /* The parameter for this ioctl is a struct fiemap followed
3149 * by an array of struct fiemap_extent whose size is set
3150 * in fiemap->fm_extent_count. The array is filled in by the
3151 * ioctl.
3153 int target_size_in, target_size_out;
3154 struct fiemap *fm;
3155 const argtype *arg_type = ie->arg_type;
3156 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3157 void *argptr, *p;
3158 abi_long ret;
3159 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3160 uint32_t outbufsz;
3161 int free_fm = 0;
3163 assert(arg_type[0] == TYPE_PTR);
3164 assert(ie->access == IOC_RW);
3165 arg_type++;
3166 target_size_in = thunk_type_size(arg_type, 0);
3167 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3168 if (!argptr) {
3169 return -TARGET_EFAULT;
3171 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3172 unlock_user(argptr, arg, 0);
3173 fm = (struct fiemap *)buf_temp;
3174 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3175 return -TARGET_EINVAL;
3178 outbufsz = sizeof (*fm) +
3179 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3181 if (outbufsz > MAX_STRUCT_SIZE) {
3182 /* We can't fit all the extents into the fixed size buffer.
3183 * Allocate one that is large enough and use it instead.
3185 fm = malloc(outbufsz);
3186 if (!fm) {
3187 return -TARGET_ENOMEM;
3189 memcpy(fm, buf_temp, sizeof(struct fiemap));
3190 free_fm = 1;
3192 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3193 if (!is_error(ret)) {
3194 target_size_out = target_size_in;
3195 /* An extent_count of 0 means we were only counting the extents
3196 * so there are no structs to copy
3198 if (fm->fm_extent_count != 0) {
3199 target_size_out += fm->fm_mapped_extents * extent_size;
3201 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3202 if (!argptr) {
3203 ret = -TARGET_EFAULT;
3204 } else {
3205 /* Convert the struct fiemap */
3206 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3207 if (fm->fm_extent_count != 0) {
3208 p = argptr + target_size_in;
3209 /* ...and then all the struct fiemap_extents */
3210 for (i = 0; i < fm->fm_mapped_extents; i++) {
3211 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3212 THUNK_TARGET);
3213 p += extent_size;
3216 unlock_user(argptr, arg, target_size_out);
3219 if (free_fm) {
3220 free(fm);
3222 return ret;
3224 #endif
3226 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3227 int fd, abi_long cmd, abi_long arg)
3229 const argtype *arg_type = ie->arg_type;
3230 int target_size;
3231 void *argptr;
3232 int ret;
3233 struct ifconf *host_ifconf;
3234 uint32_t outbufsz;
3235 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3236 int target_ifreq_size;
3237 int nb_ifreq;
3238 int free_buf = 0;
3239 int i;
3240 int target_ifc_len;
3241 abi_long target_ifc_buf;
3242 int host_ifc_len;
3243 char *host_ifc_buf;
3245 assert(arg_type[0] == TYPE_PTR);
3246 assert(ie->access == IOC_RW);
3248 arg_type++;
3249 target_size = thunk_type_size(arg_type, 0);
3251 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3252 if (!argptr)
3253 return -TARGET_EFAULT;
3254 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3255 unlock_user(argptr, arg, 0);
3257 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3258 target_ifc_len = host_ifconf->ifc_len;
3259 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3261 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3262 nb_ifreq = target_ifc_len / target_ifreq_size;
3263 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3265 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3266 if (outbufsz > MAX_STRUCT_SIZE) {
3267 /* We can't fit all the extents into the fixed size buffer.
3268 * Allocate one that is large enough and use it instead.
3270 host_ifconf = malloc(outbufsz);
3271 if (!host_ifconf) {
3272 return -TARGET_ENOMEM;
3274 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3275 free_buf = 1;
3277 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3279 host_ifconf->ifc_len = host_ifc_len;
3280 host_ifconf->ifc_buf = host_ifc_buf;
3282 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3283 if (!is_error(ret)) {
3284 /* convert host ifc_len to target ifc_len */
3286 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3287 target_ifc_len = nb_ifreq * target_ifreq_size;
3288 host_ifconf->ifc_len = target_ifc_len;
3290 /* restore target ifc_buf */
3292 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3294 /* copy struct ifconf to target user */
3296 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3297 if (!argptr)
3298 return -TARGET_EFAULT;
3299 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3300 unlock_user(argptr, arg, target_size);
3302 /* copy ifreq[] to target user */
3304 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3305 for (i = 0; i < nb_ifreq ; i++) {
3306 thunk_convert(argptr + i * target_ifreq_size,
3307 host_ifc_buf + i * sizeof(struct ifreq),
3308 ifreq_arg_type, THUNK_TARGET);
3310 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3313 if (free_buf) {
3314 free(host_ifconf);
3317 return ret;
3320 static IOCTLEntry ioctl_entries[] = {
3321 #define IOCTL(cmd, access, ...) \
3322 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3323 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3324 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3325 #include "ioctls.h"
3326 { 0, 0, },
3329 /* ??? Implement proper locking for ioctls. */
3330 /* do_ioctl() Must return target values and target errnos. */
3331 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3333 const IOCTLEntry *ie;
3334 const argtype *arg_type;
3335 abi_long ret;
3336 uint8_t buf_temp[MAX_STRUCT_SIZE];
3337 int target_size;
3338 void *argptr;
3340 ie = ioctl_entries;
3341 for(;;) {
3342 if (ie->target_cmd == 0) {
3343 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3344 return -TARGET_ENOSYS;
3346 if (ie->target_cmd == cmd)
3347 break;
3348 ie++;
3350 arg_type = ie->arg_type;
3351 #if defined(DEBUG)
3352 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3353 #endif
3354 if (ie->do_ioctl) {
3355 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3358 switch(arg_type[0]) {
3359 case TYPE_NULL:
3360 /* no argument */
3361 ret = get_errno(ioctl(fd, ie->host_cmd));
3362 break;
3363 case TYPE_PTRVOID:
3364 case TYPE_INT:
3365 /* int argment */
3366 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3367 break;
3368 case TYPE_PTR:
3369 arg_type++;
3370 target_size = thunk_type_size(arg_type, 0);
3371 switch(ie->access) {
3372 case IOC_R:
3373 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3374 if (!is_error(ret)) {
3375 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3376 if (!argptr)
3377 return -TARGET_EFAULT;
3378 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3379 unlock_user(argptr, arg, target_size);
3381 break;
3382 case IOC_W:
3383 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3384 if (!argptr)
3385 return -TARGET_EFAULT;
3386 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3387 unlock_user(argptr, arg, 0);
3388 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3389 break;
3390 default:
3391 case IOC_RW:
3392 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3393 if (!argptr)
3394 return -TARGET_EFAULT;
3395 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3396 unlock_user(argptr, arg, 0);
3397 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3398 if (!is_error(ret)) {
3399 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3400 if (!argptr)
3401 return -TARGET_EFAULT;
3402 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3403 unlock_user(argptr, arg, target_size);
3405 break;
3407 break;
3408 default:
3409 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3410 (long)cmd, arg_type[0]);
3411 ret = -TARGET_ENOSYS;
3412 break;
3414 return ret;
3417 static const bitmask_transtbl iflag_tbl[] = {
3418 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3419 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3420 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3421 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3422 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3423 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3424 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3425 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3426 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3427 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3428 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3429 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3430 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3431 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3432 { 0, 0, 0, 0 }
3435 static const bitmask_transtbl oflag_tbl[] = {
3436 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3437 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3438 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3439 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3440 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3441 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3442 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3443 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3444 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3445 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3446 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3447 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3448 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3449 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3450 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3451 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3452 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3453 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3454 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3455 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3456 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3457 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3458 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3459 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3460 { 0, 0, 0, 0 }
3463 static const bitmask_transtbl cflag_tbl[] = {
3464 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3465 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3466 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3467 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3468 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3469 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3470 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3471 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3472 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3473 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3474 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3475 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3476 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3477 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3478 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3479 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3480 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3481 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3482 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3483 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3484 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3485 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3486 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3487 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3488 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3489 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3490 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3491 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3492 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3493 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3494 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3495 { 0, 0, 0, 0 }
3498 static const bitmask_transtbl lflag_tbl[] = {
3499 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3500 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3501 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3502 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3503 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3504 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3505 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3506 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3507 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3508 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3509 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3510 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3511 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3512 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3513 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3514 { 0, 0, 0, 0 }
3517 static void target_to_host_termios (void *dst, const void *src)
3519 struct host_termios *host = dst;
3520 const struct target_termios *target = src;
3522 host->c_iflag =
3523 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3524 host->c_oflag =
3525 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3526 host->c_cflag =
3527 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3528 host->c_lflag =
3529 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3530 host->c_line = target->c_line;
3532 memset(host->c_cc, 0, sizeof(host->c_cc));
3533 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3534 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3535 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3536 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3537 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3538 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3539 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3540 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3541 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3542 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3543 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3544 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3545 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3546 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3547 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3548 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3549 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3552 static void host_to_target_termios (void *dst, const void *src)
3554 struct target_termios *target = dst;
3555 const struct host_termios *host = src;
3557 target->c_iflag =
3558 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3559 target->c_oflag =
3560 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3561 target->c_cflag =
3562 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3563 target->c_lflag =
3564 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3565 target->c_line = host->c_line;
3567 memset(target->c_cc, 0, sizeof(target->c_cc));
3568 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3569 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3570 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3571 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3572 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3573 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3574 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3575 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3576 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3577 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3578 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3579 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3580 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3581 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3582 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3583 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3584 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3587 static const StructEntry struct_termios_def = {
3588 .convert = { host_to_target_termios, target_to_host_termios },
3589 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3590 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3593 static bitmask_transtbl mmap_flags_tbl[] = {
3594 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3595 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3596 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3597 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3598 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3599 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3600 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3601 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3602 { 0, 0, 0, 0 }
3605 #if defined(TARGET_I386)
3607 /* NOTE: there is really one LDT for all the threads */
3608 static uint8_t *ldt_table;
3610 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3612 int size;
3613 void *p;
3615 if (!ldt_table)
3616 return 0;
3617 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3618 if (size > bytecount)
3619 size = bytecount;
3620 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3621 if (!p)
3622 return -TARGET_EFAULT;
3623 /* ??? Should this by byteswapped? */
3624 memcpy(p, ldt_table, size);
3625 unlock_user(p, ptr, size);
3626 return size;
3629 /* XXX: add locking support */
3630 static abi_long write_ldt(CPUX86State *env,
3631 abi_ulong ptr, unsigned long bytecount, int oldmode)
3633 struct target_modify_ldt_ldt_s ldt_info;
3634 struct target_modify_ldt_ldt_s *target_ldt_info;
3635 int seg_32bit, contents, read_exec_only, limit_in_pages;
3636 int seg_not_present, useable, lm;
3637 uint32_t *lp, entry_1, entry_2;
3639 if (bytecount != sizeof(ldt_info))
3640 return -TARGET_EINVAL;
3641 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3642 return -TARGET_EFAULT;
3643 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3644 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3645 ldt_info.limit = tswap32(target_ldt_info->limit);
3646 ldt_info.flags = tswap32(target_ldt_info->flags);
3647 unlock_user_struct(target_ldt_info, ptr, 0);
3649 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3650 return -TARGET_EINVAL;
3651 seg_32bit = ldt_info.flags & 1;
3652 contents = (ldt_info.flags >> 1) & 3;
3653 read_exec_only = (ldt_info.flags >> 3) & 1;
3654 limit_in_pages = (ldt_info.flags >> 4) & 1;
3655 seg_not_present = (ldt_info.flags >> 5) & 1;
3656 useable = (ldt_info.flags >> 6) & 1;
3657 #ifdef TARGET_ABI32
3658 lm = 0;
3659 #else
3660 lm = (ldt_info.flags >> 7) & 1;
3661 #endif
3662 if (contents == 3) {
3663 if (oldmode)
3664 return -TARGET_EINVAL;
3665 if (seg_not_present == 0)
3666 return -TARGET_EINVAL;
3668 /* allocate the LDT */
3669 if (!ldt_table) {
3670 env->ldt.base = target_mmap(0,
3671 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3672 PROT_READ|PROT_WRITE,
3673 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3674 if (env->ldt.base == -1)
3675 return -TARGET_ENOMEM;
3676 memset(g2h(env->ldt.base), 0,
3677 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3678 env->ldt.limit = 0xffff;
3679 ldt_table = g2h(env->ldt.base);
3682 /* NOTE: same code as Linux kernel */
3683 /* Allow LDTs to be cleared by the user. */
3684 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3685 if (oldmode ||
3686 (contents == 0 &&
3687 read_exec_only == 1 &&
3688 seg_32bit == 0 &&
3689 limit_in_pages == 0 &&
3690 seg_not_present == 1 &&
3691 useable == 0 )) {
3692 entry_1 = 0;
3693 entry_2 = 0;
3694 goto install;
3698 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3699 (ldt_info.limit & 0x0ffff);
3700 entry_2 = (ldt_info.base_addr & 0xff000000) |
3701 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3702 (ldt_info.limit & 0xf0000) |
3703 ((read_exec_only ^ 1) << 9) |
3704 (contents << 10) |
3705 ((seg_not_present ^ 1) << 15) |
3706 (seg_32bit << 22) |
3707 (limit_in_pages << 23) |
3708 (lm << 21) |
3709 0x7000;
3710 if (!oldmode)
3711 entry_2 |= (useable << 20);
3713 /* Install the new entry ... */
3714 install:
3715 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3716 lp[0] = tswap32(entry_1);
3717 lp[1] = tswap32(entry_2);
3718 return 0;
3721 /* specific and weird i386 syscalls */
3722 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3723 unsigned long bytecount)
3725 abi_long ret;
3727 switch (func) {
3728 case 0:
3729 ret = read_ldt(ptr, bytecount);
3730 break;
3731 case 1:
3732 ret = write_ldt(env, ptr, bytecount, 1);
3733 break;
3734 case 0x11:
3735 ret = write_ldt(env, ptr, bytecount, 0);
3736 break;
3737 default:
3738 ret = -TARGET_ENOSYS;
3739 break;
3741 return ret;
3744 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3745 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3747 uint64_t *gdt_table = g2h(env->gdt.base);
3748 struct target_modify_ldt_ldt_s ldt_info;
3749 struct target_modify_ldt_ldt_s *target_ldt_info;
3750 int seg_32bit, contents, read_exec_only, limit_in_pages;
3751 int seg_not_present, useable, lm;
3752 uint32_t *lp, entry_1, entry_2;
3753 int i;
3755 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3756 if (!target_ldt_info)
3757 return -TARGET_EFAULT;
3758 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3759 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3760 ldt_info.limit = tswap32(target_ldt_info->limit);
3761 ldt_info.flags = tswap32(target_ldt_info->flags);
3762 if (ldt_info.entry_number == -1) {
3763 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3764 if (gdt_table[i] == 0) {
3765 ldt_info.entry_number = i;
3766 target_ldt_info->entry_number = tswap32(i);
3767 break;
3771 unlock_user_struct(target_ldt_info, ptr, 1);
3773 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3774 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3775 return -TARGET_EINVAL;
3776 seg_32bit = ldt_info.flags & 1;
3777 contents = (ldt_info.flags >> 1) & 3;
3778 read_exec_only = (ldt_info.flags >> 3) & 1;
3779 limit_in_pages = (ldt_info.flags >> 4) & 1;
3780 seg_not_present = (ldt_info.flags >> 5) & 1;
3781 useable = (ldt_info.flags >> 6) & 1;
3782 #ifdef TARGET_ABI32
3783 lm = 0;
3784 #else
3785 lm = (ldt_info.flags >> 7) & 1;
3786 #endif
3788 if (contents == 3) {
3789 if (seg_not_present == 0)
3790 return -TARGET_EINVAL;
3793 /* NOTE: same code as Linux kernel */
3794 /* Allow LDTs to be cleared by the user. */
3795 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3796 if ((contents == 0 &&
3797 read_exec_only == 1 &&
3798 seg_32bit == 0 &&
3799 limit_in_pages == 0 &&
3800 seg_not_present == 1 &&
3801 useable == 0 )) {
3802 entry_1 = 0;
3803 entry_2 = 0;
3804 goto install;
3808 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3809 (ldt_info.limit & 0x0ffff);
3810 entry_2 = (ldt_info.base_addr & 0xff000000) |
3811 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3812 (ldt_info.limit & 0xf0000) |
3813 ((read_exec_only ^ 1) << 9) |
3814 (contents << 10) |
3815 ((seg_not_present ^ 1) << 15) |
3816 (seg_32bit << 22) |
3817 (limit_in_pages << 23) |
3818 (useable << 20) |
3819 (lm << 21) |
3820 0x7000;
3822 /* Install the new entry ... */
3823 install:
3824 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3825 lp[0] = tswap32(entry_1);
3826 lp[1] = tswap32(entry_2);
3827 return 0;
3830 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3832 struct target_modify_ldt_ldt_s *target_ldt_info;
3833 uint64_t *gdt_table = g2h(env->gdt.base);
3834 uint32_t base_addr, limit, flags;
3835 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3836 int seg_not_present, useable, lm;
3837 uint32_t *lp, entry_1, entry_2;
3839 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3840 if (!target_ldt_info)
3841 return -TARGET_EFAULT;
3842 idx = tswap32(target_ldt_info->entry_number);
3843 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3844 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3845 unlock_user_struct(target_ldt_info, ptr, 1);
3846 return -TARGET_EINVAL;
3848 lp = (uint32_t *)(gdt_table + idx);
3849 entry_1 = tswap32(lp[0]);
3850 entry_2 = tswap32(lp[1]);
3852 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3853 contents = (entry_2 >> 10) & 3;
3854 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3855 seg_32bit = (entry_2 >> 22) & 1;
3856 limit_in_pages = (entry_2 >> 23) & 1;
3857 useable = (entry_2 >> 20) & 1;
3858 #ifdef TARGET_ABI32
3859 lm = 0;
3860 #else
3861 lm = (entry_2 >> 21) & 1;
3862 #endif
3863 flags = (seg_32bit << 0) | (contents << 1) |
3864 (read_exec_only << 3) | (limit_in_pages << 4) |
3865 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3866 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3867 base_addr = (entry_1 >> 16) |
3868 (entry_2 & 0xff000000) |
3869 ((entry_2 & 0xff) << 16);
3870 target_ldt_info->base_addr = tswapal(base_addr);
3871 target_ldt_info->limit = tswap32(limit);
3872 target_ldt_info->flags = tswap32(flags);
3873 unlock_user_struct(target_ldt_info, ptr, 1);
3874 return 0;
3876 #endif /* TARGET_I386 && TARGET_ABI32 */
3878 #ifndef TARGET_ABI32
3879 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3881 abi_long ret = 0;
3882 abi_ulong val;
3883 int idx;
3885 switch(code) {
3886 case TARGET_ARCH_SET_GS:
3887 case TARGET_ARCH_SET_FS:
3888 if (code == TARGET_ARCH_SET_GS)
3889 idx = R_GS;
3890 else
3891 idx = R_FS;
3892 cpu_x86_load_seg(env, idx, 0);
3893 env->segs[idx].base = addr;
3894 break;
3895 case TARGET_ARCH_GET_GS:
3896 case TARGET_ARCH_GET_FS:
3897 if (code == TARGET_ARCH_GET_GS)
3898 idx = R_GS;
3899 else
3900 idx = R_FS;
3901 val = env->segs[idx].base;
3902 if (put_user(val, addr, abi_ulong))
3903 ret = -TARGET_EFAULT;
3904 break;
3905 default:
3906 ret = -TARGET_EINVAL;
3907 break;
3909 return ret;
3911 #endif
3913 #endif /* defined(TARGET_I386) */
3915 #define NEW_STACK_SIZE 0x40000
3917 #if defined(CONFIG_USE_NPTL)
3919 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3920 typedef struct {
3921 CPUState *env;
3922 pthread_mutex_t mutex;
3923 pthread_cond_t cond;
3924 pthread_t thread;
3925 uint32_t tid;
3926 abi_ulong child_tidptr;
3927 abi_ulong parent_tidptr;
3928 sigset_t sigmask;
3929 } new_thread_info;
3931 static void *clone_func(void *arg)
3933 new_thread_info *info = arg;
3934 CPUState *env;
3935 TaskState *ts;
3937 env = info->env;
3938 thread_env = env;
3939 ts = (TaskState *)thread_env->opaque;
3940 info->tid = gettid();
3941 env->host_tid = info->tid;
3942 task_settid(ts);
3943 if (info->child_tidptr)
3944 put_user_u32(info->tid, info->child_tidptr);
3945 if (info->parent_tidptr)
3946 put_user_u32(info->tid, info->parent_tidptr);
3947 /* Enable signals. */
3948 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3949 /* Signal to the parent that we're ready. */
3950 pthread_mutex_lock(&info->mutex);
3951 pthread_cond_broadcast(&info->cond);
3952 pthread_mutex_unlock(&info->mutex);
3953 /* Wait until the parent has finshed initializing the tls state. */
3954 pthread_mutex_lock(&clone_lock);
3955 pthread_mutex_unlock(&clone_lock);
3956 cpu_loop(env);
3957 /* never exits */
3958 return NULL;
3960 #else
3962 static int clone_func(void *arg)
3964 CPUState *env = arg;
3965 cpu_loop(env);
3966 /* never exits */
3967 return 0;
3969 #endif
3971 /* do_fork() Must return host values and target errnos (unlike most
3972 do_*() functions). */
3973 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3974 abi_ulong parent_tidptr, target_ulong newtls,
3975 abi_ulong child_tidptr)
3977 int ret;
3978 TaskState *ts;
3979 CPUState *new_env;
3980 #if defined(CONFIG_USE_NPTL)
3981 unsigned int nptl_flags;
3982 sigset_t sigmask;
3983 #else
3984 uint8_t *new_stack;
3985 #endif
3987 /* Emulate vfork() with fork() */
3988 if (flags & CLONE_VFORK)
3989 flags &= ~(CLONE_VFORK | CLONE_VM);
3991 if (flags & CLONE_VM) {
3992 TaskState *parent_ts = (TaskState *)env->opaque;
3993 #if defined(CONFIG_USE_NPTL)
3994 new_thread_info info;
3995 pthread_attr_t attr;
3996 #endif
3997 ts = g_malloc0(sizeof(TaskState));
3998 init_task_state(ts);
3999 /* we create a new CPU instance. */
4000 new_env = cpu_copy(env);
4001 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4002 cpu_reset(new_env);
4003 #endif
4004 /* Init regs that differ from the parent. */
4005 cpu_clone_regs(new_env, newsp);
4006 new_env->opaque = ts;
4007 ts->bprm = parent_ts->bprm;
4008 ts->info = parent_ts->info;
4009 #if defined(CONFIG_USE_NPTL)
4010 nptl_flags = flags;
4011 flags &= ~CLONE_NPTL_FLAGS2;
4013 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4014 ts->child_tidptr = child_tidptr;
4017 if (nptl_flags & CLONE_SETTLS)
4018 cpu_set_tls (new_env, newtls);
4020 /* Grab a mutex so that thread setup appears atomic. */
4021 pthread_mutex_lock(&clone_lock);
4023 memset(&info, 0, sizeof(info));
4024 pthread_mutex_init(&info.mutex, NULL);
4025 pthread_mutex_lock(&info.mutex);
4026 pthread_cond_init(&info.cond, NULL);
4027 info.env = new_env;
4028 if (nptl_flags & CLONE_CHILD_SETTID)
4029 info.child_tidptr = child_tidptr;
4030 if (nptl_flags & CLONE_PARENT_SETTID)
4031 info.parent_tidptr = parent_tidptr;
4033 ret = pthread_attr_init(&attr);
4034 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4035 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4036 /* It is not safe to deliver signals until the child has finished
4037 initializing, so temporarily block all signals. */
4038 sigfillset(&sigmask);
4039 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4041 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4042 /* TODO: Free new CPU state if thread creation failed. */
4044 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4045 pthread_attr_destroy(&attr);
4046 if (ret == 0) {
4047 /* Wait for the child to initialize. */
4048 pthread_cond_wait(&info.cond, &info.mutex);
4049 ret = info.tid;
4050 if (flags & CLONE_PARENT_SETTID)
4051 put_user_u32(ret, parent_tidptr);
4052 } else {
4053 ret = -1;
4055 pthread_mutex_unlock(&info.mutex);
4056 pthread_cond_destroy(&info.cond);
4057 pthread_mutex_destroy(&info.mutex);
4058 pthread_mutex_unlock(&clone_lock);
4059 #else
4060 if (flags & CLONE_NPTL_FLAGS2)
4061 return -EINVAL;
4062 /* This is probably going to die very quickly, but do it anyway. */
4063 new_stack = g_malloc0 (NEW_STACK_SIZE);
4064 #ifdef __ia64__
4065 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4066 #else
4067 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4068 #endif
4069 #endif
4070 } else {
4071 /* if no CLONE_VM, we consider it is a fork */
4072 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4073 return -EINVAL;
4074 fork_start();
4075 ret = fork();
4076 if (ret == 0) {
4077 /* Child Process. */
4078 cpu_clone_regs(env, newsp);
4079 fork_end(1);
4080 #if defined(CONFIG_USE_NPTL)
4081 /* There is a race condition here. The parent process could
4082 theoretically read the TID in the child process before the child
4083 tid is set. This would require using either ptrace
4084 (not implemented) or having *_tidptr to point at a shared memory
4085 mapping. We can't repeat the spinlock hack used above because
4086 the child process gets its own copy of the lock. */
4087 if (flags & CLONE_CHILD_SETTID)
4088 put_user_u32(gettid(), child_tidptr);
4089 if (flags & CLONE_PARENT_SETTID)
4090 put_user_u32(gettid(), parent_tidptr);
4091 ts = (TaskState *)env->opaque;
4092 if (flags & CLONE_SETTLS)
4093 cpu_set_tls (env, newtls);
4094 if (flags & CLONE_CHILD_CLEARTID)
4095 ts->child_tidptr = child_tidptr;
4096 #endif
4097 } else {
4098 fork_end(0);
4101 return ret;
4104 /* warning : doesn't handle linux specific flags... */
4105 static int target_to_host_fcntl_cmd(int cmd)
4107 switch(cmd) {
4108 case TARGET_F_DUPFD:
4109 case TARGET_F_GETFD:
4110 case TARGET_F_SETFD:
4111 case TARGET_F_GETFL:
4112 case TARGET_F_SETFL:
4113 return cmd;
4114 case TARGET_F_GETLK:
4115 return F_GETLK;
4116 case TARGET_F_SETLK:
4117 return F_SETLK;
4118 case TARGET_F_SETLKW:
4119 return F_SETLKW;
4120 case TARGET_F_GETOWN:
4121 return F_GETOWN;
4122 case TARGET_F_SETOWN:
4123 return F_SETOWN;
4124 case TARGET_F_GETSIG:
4125 return F_GETSIG;
4126 case TARGET_F_SETSIG:
4127 return F_SETSIG;
4128 #if TARGET_ABI_BITS == 32
4129 case TARGET_F_GETLK64:
4130 return F_GETLK64;
4131 case TARGET_F_SETLK64:
4132 return F_SETLK64;
4133 case TARGET_F_SETLKW64:
4134 return F_SETLKW64;
4135 #endif
4136 case TARGET_F_SETLEASE:
4137 return F_SETLEASE;
4138 case TARGET_F_GETLEASE:
4139 return F_GETLEASE;
4140 #ifdef F_DUPFD_CLOEXEC
4141 case TARGET_F_DUPFD_CLOEXEC:
4142 return F_DUPFD_CLOEXEC;
4143 #endif
4144 case TARGET_F_NOTIFY:
4145 return F_NOTIFY;
4146 default:
4147 return -TARGET_EINVAL;
4149 return -TARGET_EINVAL;
4152 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4154 struct flock fl;
4155 struct target_flock *target_fl;
4156 struct flock64 fl64;
4157 struct target_flock64 *target_fl64;
4158 abi_long ret;
4159 int host_cmd = target_to_host_fcntl_cmd(cmd);
4161 if (host_cmd == -TARGET_EINVAL)
4162 return host_cmd;
4164 switch(cmd) {
4165 case TARGET_F_GETLK:
4166 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4167 return -TARGET_EFAULT;
4168 fl.l_type = tswap16(target_fl->l_type);
4169 fl.l_whence = tswap16(target_fl->l_whence);
4170 fl.l_start = tswapal(target_fl->l_start);
4171 fl.l_len = tswapal(target_fl->l_len);
4172 fl.l_pid = tswap32(target_fl->l_pid);
4173 unlock_user_struct(target_fl, arg, 0);
4174 ret = get_errno(fcntl(fd, host_cmd, &fl));
4175 if (ret == 0) {
4176 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4177 return -TARGET_EFAULT;
4178 target_fl->l_type = tswap16(fl.l_type);
4179 target_fl->l_whence = tswap16(fl.l_whence);
4180 target_fl->l_start = tswapal(fl.l_start);
4181 target_fl->l_len = tswapal(fl.l_len);
4182 target_fl->l_pid = tswap32(fl.l_pid);
4183 unlock_user_struct(target_fl, arg, 1);
4185 break;
4187 case TARGET_F_SETLK:
4188 case TARGET_F_SETLKW:
4189 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4190 return -TARGET_EFAULT;
4191 fl.l_type = tswap16(target_fl->l_type);
4192 fl.l_whence = tswap16(target_fl->l_whence);
4193 fl.l_start = tswapal(target_fl->l_start);
4194 fl.l_len = tswapal(target_fl->l_len);
4195 fl.l_pid = tswap32(target_fl->l_pid);
4196 unlock_user_struct(target_fl, arg, 0);
4197 ret = get_errno(fcntl(fd, host_cmd, &fl));
4198 break;
4200 case TARGET_F_GETLK64:
4201 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4202 return -TARGET_EFAULT;
4203 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4204 fl64.l_whence = tswap16(target_fl64->l_whence);
4205 fl64.l_start = tswap64(target_fl64->l_start);
4206 fl64.l_len = tswap64(target_fl64->l_len);
4207 fl64.l_pid = tswap32(target_fl64->l_pid);
4208 unlock_user_struct(target_fl64, arg, 0);
4209 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4210 if (ret == 0) {
4211 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4212 return -TARGET_EFAULT;
4213 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4214 target_fl64->l_whence = tswap16(fl64.l_whence);
4215 target_fl64->l_start = tswap64(fl64.l_start);
4216 target_fl64->l_len = tswap64(fl64.l_len);
4217 target_fl64->l_pid = tswap32(fl64.l_pid);
4218 unlock_user_struct(target_fl64, arg, 1);
4220 break;
4221 case TARGET_F_SETLK64:
4222 case TARGET_F_SETLKW64:
4223 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4224 return -TARGET_EFAULT;
4225 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4226 fl64.l_whence = tswap16(target_fl64->l_whence);
4227 fl64.l_start = tswap64(target_fl64->l_start);
4228 fl64.l_len = tswap64(target_fl64->l_len);
4229 fl64.l_pid = tswap32(target_fl64->l_pid);
4230 unlock_user_struct(target_fl64, arg, 0);
4231 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4232 break;
4234 case TARGET_F_GETFL:
4235 ret = get_errno(fcntl(fd, host_cmd, arg));
4236 if (ret >= 0) {
4237 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4239 break;
4241 case TARGET_F_SETFL:
4242 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4243 break;
4245 case TARGET_F_SETOWN:
4246 case TARGET_F_GETOWN:
4247 case TARGET_F_SETSIG:
4248 case TARGET_F_GETSIG:
4249 case TARGET_F_SETLEASE:
4250 case TARGET_F_GETLEASE:
4251 ret = get_errno(fcntl(fd, host_cmd, arg));
4252 break;
4254 default:
4255 ret = get_errno(fcntl(fd, cmd, arg));
4256 break;
4258 return ret;
4261 #ifdef USE_UID16
4263 static inline int high2lowuid(int uid)
4265 if (uid > 65535)
4266 return 65534;
4267 else
4268 return uid;
4271 static inline int high2lowgid(int gid)
4273 if (gid > 65535)
4274 return 65534;
4275 else
4276 return gid;
4279 static inline int low2highuid(int uid)
4281 if ((int16_t)uid == -1)
4282 return -1;
4283 else
4284 return uid;
4287 static inline int low2highgid(int gid)
4289 if ((int16_t)gid == -1)
4290 return -1;
4291 else
4292 return gid;
4294 static inline int tswapid(int id)
4296 return tswap16(id);
4298 #else /* !USE_UID16 */
4299 static inline int high2lowuid(int uid)
4301 return uid;
4303 static inline int high2lowgid(int gid)
4305 return gid;
4307 static inline int low2highuid(int uid)
4309 return uid;
4311 static inline int low2highgid(int gid)
4313 return gid;
4315 static inline int tswapid(int id)
4317 return tswap32(id);
4319 #endif /* USE_UID16 */
4321 void syscall_init(void)
4323 IOCTLEntry *ie;
4324 const argtype *arg_type;
4325 int size;
4326 int i;
4328 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4329 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4330 #include "syscall_types.h"
4331 #undef STRUCT
4332 #undef STRUCT_SPECIAL
4334 /* we patch the ioctl size if necessary. We rely on the fact that
4335 no ioctl has all the bits at '1' in the size field */
4336 ie = ioctl_entries;
4337 while (ie->target_cmd != 0) {
4338 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4339 TARGET_IOC_SIZEMASK) {
4340 arg_type = ie->arg_type;
4341 if (arg_type[0] != TYPE_PTR) {
4342 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4343 ie->target_cmd);
4344 exit(1);
4346 arg_type++;
4347 size = thunk_type_size(arg_type, 0);
4348 ie->target_cmd = (ie->target_cmd &
4349 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4350 (size << TARGET_IOC_SIZESHIFT);
4353 /* Build target_to_host_errno_table[] table from
4354 * host_to_target_errno_table[]. */
4355 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4356 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4358 /* automatic consistency check if same arch */
4359 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4360 (defined(__x86_64__) && defined(TARGET_X86_64))
4361 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4362 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4363 ie->name, ie->target_cmd, ie->host_cmd);
4365 #endif
4366 ie++;
4370 #if TARGET_ABI_BITS == 32
4371 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4373 #ifdef TARGET_WORDS_BIGENDIAN
4374 return ((uint64_t)word0 << 32) | word1;
4375 #else
4376 return ((uint64_t)word1 << 32) | word0;
4377 #endif
4379 #else /* TARGET_ABI_BITS == 32 */
4380 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4382 return word0;
4384 #endif /* TARGET_ABI_BITS != 32 */
4386 #ifdef TARGET_NR_truncate64
4387 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4388 abi_long arg2,
4389 abi_long arg3,
4390 abi_long arg4)
4392 if (regpairs_aligned(cpu_env)) {
4393 arg2 = arg3;
4394 arg3 = arg4;
4396 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4398 #endif
4400 #ifdef TARGET_NR_ftruncate64
4401 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4402 abi_long arg2,
4403 abi_long arg3,
4404 abi_long arg4)
4406 if (regpairs_aligned(cpu_env)) {
4407 arg2 = arg3;
4408 arg3 = arg4;
4410 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4412 #endif
4414 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4415 abi_ulong target_addr)
4417 struct target_timespec *target_ts;
4419 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4420 return -TARGET_EFAULT;
4421 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4422 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4423 unlock_user_struct(target_ts, target_addr, 0);
4424 return 0;
4427 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4428 struct timespec *host_ts)
4430 struct target_timespec *target_ts;
4432 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4433 return -TARGET_EFAULT;
4434 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4435 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4436 unlock_user_struct(target_ts, target_addr, 1);
4437 return 0;
4440 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4441 static inline abi_long host_to_target_stat64(void *cpu_env,
4442 abi_ulong target_addr,
4443 struct stat *host_st)
4445 #ifdef TARGET_ARM
4446 if (((CPUARMState *)cpu_env)->eabi) {
4447 struct target_eabi_stat64 *target_st;
4449 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4450 return -TARGET_EFAULT;
4451 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4452 __put_user(host_st->st_dev, &target_st->st_dev);
4453 __put_user(host_st->st_ino, &target_st->st_ino);
4454 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4455 __put_user(host_st->st_ino, &target_st->__st_ino);
4456 #endif
4457 __put_user(host_st->st_mode, &target_st->st_mode);
4458 __put_user(host_st->st_nlink, &target_st->st_nlink);
4459 __put_user(host_st->st_uid, &target_st->st_uid);
4460 __put_user(host_st->st_gid, &target_st->st_gid);
4461 __put_user(host_st->st_rdev, &target_st->st_rdev);
4462 __put_user(host_st->st_size, &target_st->st_size);
4463 __put_user(host_st->st_blksize, &target_st->st_blksize);
4464 __put_user(host_st->st_blocks, &target_st->st_blocks);
4465 __put_user(host_st->st_atime, &target_st->target_st_atime);
4466 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4467 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4468 unlock_user_struct(target_st, target_addr, 1);
4469 } else
4470 #endif
4472 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4473 struct target_stat *target_st;
4474 #else
4475 struct target_stat64 *target_st;
4476 #endif
4478 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4479 return -TARGET_EFAULT;
4480 memset(target_st, 0, sizeof(*target_st));
4481 __put_user(host_st->st_dev, &target_st->st_dev);
4482 __put_user(host_st->st_ino, &target_st->st_ino);
4483 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4484 __put_user(host_st->st_ino, &target_st->__st_ino);
4485 #endif
4486 __put_user(host_st->st_mode, &target_st->st_mode);
4487 __put_user(host_st->st_nlink, &target_st->st_nlink);
4488 __put_user(host_st->st_uid, &target_st->st_uid);
4489 __put_user(host_st->st_gid, &target_st->st_gid);
4490 __put_user(host_st->st_rdev, &target_st->st_rdev);
4491 /* XXX: better use of kernel struct */
4492 __put_user(host_st->st_size, &target_st->st_size);
4493 __put_user(host_st->st_blksize, &target_st->st_blksize);
4494 __put_user(host_st->st_blocks, &target_st->st_blocks);
4495 __put_user(host_st->st_atime, &target_st->target_st_atime);
4496 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4497 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4498 unlock_user_struct(target_st, target_addr, 1);
4501 return 0;
4503 #endif
4505 #if defined(CONFIG_USE_NPTL)
4506 /* ??? Using host futex calls even when target atomic operations
4507 are not really atomic probably breaks things. However implementing
4508 futexes locally would make futexes shared between multiple processes
4509 tricky. However they're probably useless because guest atomic
4510 operations won't work either. */
4511 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4512 target_ulong uaddr2, int val3)
4514 struct timespec ts, *pts;
4515 int base_op;
4517 /* ??? We assume FUTEX_* constants are the same on both host
4518 and target. */
4519 #ifdef FUTEX_CMD_MASK
4520 base_op = op & FUTEX_CMD_MASK;
4521 #else
4522 base_op = op;
4523 #endif
4524 switch (base_op) {
4525 case FUTEX_WAIT:
4526 if (timeout) {
4527 pts = &ts;
4528 target_to_host_timespec(pts, timeout);
4529 } else {
4530 pts = NULL;
4532 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4533 pts, NULL, 0));
4534 case FUTEX_WAKE:
4535 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4536 case FUTEX_FD:
4537 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4538 case FUTEX_REQUEUE:
4539 case FUTEX_CMP_REQUEUE:
4540 case FUTEX_WAKE_OP:
4541 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4542 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4543 But the prototype takes a `struct timespec *'; insert casts
4544 to satisfy the compiler. We do not need to tswap TIMEOUT
4545 since it's not compared to guest memory. */
4546 pts = (struct timespec *)(uintptr_t) timeout;
4547 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4548 g2h(uaddr2),
4549 (base_op == FUTEX_CMP_REQUEUE
4550 ? tswap32(val3)
4551 : val3)));
4552 default:
4553 return -TARGET_ENOSYS;
4556 #endif
4558 /* Map host to target signal numbers for the wait family of syscalls.
4559 Assume all other status bits are the same. */
4560 static int host_to_target_waitstatus(int status)
4562 if (WIFSIGNALED(status)) {
4563 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4565 if (WIFSTOPPED(status)) {
4566 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4567 | (status & 0xff);
4569 return status;
4572 int get_osversion(void)
4574 static int osversion;
4575 struct new_utsname buf;
4576 const char *s;
4577 int i, n, tmp;
4578 if (osversion)
4579 return osversion;
4580 if (qemu_uname_release && *qemu_uname_release) {
4581 s = qemu_uname_release;
4582 } else {
4583 if (sys_uname(&buf))
4584 return 0;
4585 s = buf.release;
4587 tmp = 0;
4588 for (i = 0; i < 3; i++) {
4589 n = 0;
4590 while (*s >= '0' && *s <= '9') {
4591 n *= 10;
4592 n += *s - '0';
4593 s++;
4595 tmp = (tmp << 8) + n;
4596 if (*s == '.')
4597 s++;
4599 osversion = tmp;
4600 return osversion;
4604 static int open_self_maps(void *cpu_env, int fd)
4606 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4608 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4609 (unsigned long long)ts->info->stack_limit,
4610 (unsigned long long)(ts->stack_base + (TARGET_PAGE_SIZE - 1))
4611 & TARGET_PAGE_MASK,
4612 (unsigned long long)ts->stack_base);
4614 return 0;
4617 static int open_self_stat(void *cpu_env, int fd)
4619 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4620 abi_ulong start_stack = ts->info->start_stack;
4621 int i;
4623 for (i = 0; i < 44; i++) {
4624 char buf[128];
4625 int len;
4626 uint64_t val = 0;
4628 if (i == 27) {
4629 /* stack bottom */
4630 val = start_stack;
4632 snprintf(buf, sizeof(buf), "%"PRId64 "%c", val, i == 43 ? '\n' : ' ');
4633 len = strlen(buf);
4634 if (write(fd, buf, len) != len) {
4635 return -1;
4639 return 0;
4642 static int open_self_auxv(void *cpu_env, int fd)
4644 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4645 abi_ulong auxv = ts->info->saved_auxv;
4646 abi_ulong len = ts->info->auxv_len;
4647 char *ptr;
4650 * Auxiliary vector is stored in target process stack.
4651 * read in whole auxv vector and copy it to file
4653 ptr = lock_user(VERIFY_READ, auxv, len, 0);
4654 if (ptr != NULL) {
4655 while (len > 0) {
4656 ssize_t r;
4657 r = write(fd, ptr, len);
4658 if (r <= 0) {
4659 break;
4661 len -= r;
4662 ptr += r;
4664 lseek(fd, 0, SEEK_SET);
4665 unlock_user(ptr, auxv, len);
4668 return 0;
4671 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
4673 struct fake_open {
4674 const char *filename;
4675 int (*fill)(void *cpu_env, int fd);
4677 const struct fake_open *fake_open;
4678 static const struct fake_open fakes[] = {
4679 { "/proc/self/maps", open_self_maps },
4680 { "/proc/self/stat", open_self_stat },
4681 { "/proc/self/auxv", open_self_auxv },
4682 { NULL, NULL }
4685 for (fake_open = fakes; fake_open->filename; fake_open++) {
4686 if (!strncmp(pathname, fake_open->filename,
4687 strlen(fake_open->filename))) {
4688 break;
4692 if (fake_open->filename) {
4693 const char *tmpdir;
4694 char filename[PATH_MAX];
4695 int fd, r;
4697 /* create temporary file to map stat to */
4698 tmpdir = getenv("TMPDIR");
4699 if (!tmpdir)
4700 tmpdir = "/tmp";
4701 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
4702 fd = mkstemp(filename);
4703 if (fd < 0) {
4704 return fd;
4706 unlink(filename);
4708 if ((r = fake_open->fill(cpu_env, fd))) {
4709 close(fd);
4710 return r;
4712 lseek(fd, 0, SEEK_SET);
4714 return fd;
4717 return get_errno(open(path(pathname), flags, mode));
4720 /* do_syscall() should always have a single exit point at the end so
4721 that actions, such as logging of syscall results, can be performed.
4722 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4723 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4724 abi_long arg2, abi_long arg3, abi_long arg4,
4725 abi_long arg5, abi_long arg6, abi_long arg7,
4726 abi_long arg8)
4728 abi_long ret;
4729 struct stat st;
4730 struct statfs stfs;
4731 void *p;
4733 #ifdef DEBUG
4734 gemu_log("syscall %d", num);
4735 #endif
4736 if(do_strace)
4737 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4739 switch(num) {
4740 case TARGET_NR_exit:
4741 #ifdef CONFIG_USE_NPTL
4742 /* In old applications this may be used to implement _exit(2).
4743 However in threaded applictions it is used for thread termination,
4744 and _exit_group is used for application termination.
4745 Do thread termination if we have more then one thread. */
4746 /* FIXME: This probably breaks if a signal arrives. We should probably
4747 be disabling signals. */
4748 if (first_cpu->next_cpu) {
4749 TaskState *ts;
4750 CPUState **lastp;
4751 CPUState *p;
4753 cpu_list_lock();
4754 lastp = &first_cpu;
4755 p = first_cpu;
4756 while (p && p != (CPUState *)cpu_env) {
4757 lastp = &p->next_cpu;
4758 p = p->next_cpu;
4760 /* If we didn't find the CPU for this thread then something is
4761 horribly wrong. */
4762 if (!p)
4763 abort();
4764 /* Remove the CPU from the list. */
4765 *lastp = p->next_cpu;
4766 cpu_list_unlock();
4767 ts = ((CPUState *)cpu_env)->opaque;
4768 if (ts->child_tidptr) {
4769 put_user_u32(0, ts->child_tidptr);
4770 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4771 NULL, NULL, 0);
4773 thread_env = NULL;
4774 g_free(cpu_env);
4775 g_free(ts);
4776 pthread_exit(NULL);
4778 #endif
4779 #ifdef TARGET_GPROF
4780 _mcleanup();
4781 #endif
4782 gdb_exit(cpu_env, arg1);
4783 _exit(arg1);
4784 ret = 0; /* avoid warning */
4785 break;
4786 case TARGET_NR_read:
4787 if (arg3 == 0)
4788 ret = 0;
4789 else {
4790 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4791 goto efault;
4792 ret = get_errno(read(arg1, p, arg3));
4793 unlock_user(p, arg2, ret);
4795 break;
4796 case TARGET_NR_write:
4797 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4798 goto efault;
4799 ret = get_errno(write(arg1, p, arg3));
4800 unlock_user(p, arg2, 0);
4801 break;
4802 case TARGET_NR_open:
4803 if (!(p = lock_user_string(arg1)))
4804 goto efault;
4805 ret = get_errno(do_open(cpu_env, p,
4806 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4807 arg3));
4808 unlock_user(p, arg1, 0);
4809 break;
4810 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4811 case TARGET_NR_openat:
4812 if (!(p = lock_user_string(arg2)))
4813 goto efault;
4814 ret = get_errno(sys_openat(arg1,
4815 path(p),
4816 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4817 arg4));
4818 unlock_user(p, arg2, 0);
4819 break;
4820 #endif
4821 case TARGET_NR_close:
4822 ret = get_errno(close(arg1));
4823 break;
4824 case TARGET_NR_brk:
4825 ret = do_brk(arg1);
4826 break;
4827 case TARGET_NR_fork:
4828 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4829 break;
4830 #ifdef TARGET_NR_waitpid
4831 case TARGET_NR_waitpid:
4833 int status;
4834 ret = get_errno(waitpid(arg1, &status, arg3));
4835 if (!is_error(ret) && arg2
4836 && put_user_s32(host_to_target_waitstatus(status), arg2))
4837 goto efault;
4839 break;
4840 #endif
4841 #ifdef TARGET_NR_waitid
4842 case TARGET_NR_waitid:
4844 siginfo_t info;
4845 info.si_pid = 0;
4846 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4847 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4848 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4849 goto efault;
4850 host_to_target_siginfo(p, &info);
4851 unlock_user(p, arg3, sizeof(target_siginfo_t));
4854 break;
4855 #endif
4856 #ifdef TARGET_NR_creat /* not on alpha */
4857 case TARGET_NR_creat:
4858 if (!(p = lock_user_string(arg1)))
4859 goto efault;
4860 ret = get_errno(creat(p, arg2));
4861 unlock_user(p, arg1, 0);
4862 break;
4863 #endif
4864 case TARGET_NR_link:
4866 void * p2;
4867 p = lock_user_string(arg1);
4868 p2 = lock_user_string(arg2);
4869 if (!p || !p2)
4870 ret = -TARGET_EFAULT;
4871 else
4872 ret = get_errno(link(p, p2));
4873 unlock_user(p2, arg2, 0);
4874 unlock_user(p, arg1, 0);
4876 break;
4877 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4878 case TARGET_NR_linkat:
4880 void * p2 = NULL;
4881 if (!arg2 || !arg4)
4882 goto efault;
4883 p = lock_user_string(arg2);
4884 p2 = lock_user_string(arg4);
4885 if (!p || !p2)
4886 ret = -TARGET_EFAULT;
4887 else
4888 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4889 unlock_user(p, arg2, 0);
4890 unlock_user(p2, arg4, 0);
4892 break;
4893 #endif
4894 case TARGET_NR_unlink:
4895 if (!(p = lock_user_string(arg1)))
4896 goto efault;
4897 ret = get_errno(unlink(p));
4898 unlock_user(p, arg1, 0);
4899 break;
4900 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4901 case TARGET_NR_unlinkat:
4902 if (!(p = lock_user_string(arg2)))
4903 goto efault;
4904 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4905 unlock_user(p, arg2, 0);
4906 break;
4907 #endif
4908 case TARGET_NR_execve:
4910 char **argp, **envp;
4911 int argc, envc;
4912 abi_ulong gp;
4913 abi_ulong guest_argp;
4914 abi_ulong guest_envp;
4915 abi_ulong addr;
4916 char **q;
4918 argc = 0;
4919 guest_argp = arg2;
4920 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4921 if (get_user_ual(addr, gp))
4922 goto efault;
4923 if (!addr)
4924 break;
4925 argc++;
4927 envc = 0;
4928 guest_envp = arg3;
4929 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4930 if (get_user_ual(addr, gp))
4931 goto efault;
4932 if (!addr)
4933 break;
4934 envc++;
4937 argp = alloca((argc + 1) * sizeof(void *));
4938 envp = alloca((envc + 1) * sizeof(void *));
4940 for (gp = guest_argp, q = argp; gp;
4941 gp += sizeof(abi_ulong), q++) {
4942 if (get_user_ual(addr, gp))
4943 goto execve_efault;
4944 if (!addr)
4945 break;
4946 if (!(*q = lock_user_string(addr)))
4947 goto execve_efault;
4949 *q = NULL;
4951 for (gp = guest_envp, q = envp; gp;
4952 gp += sizeof(abi_ulong), q++) {
4953 if (get_user_ual(addr, gp))
4954 goto execve_efault;
4955 if (!addr)
4956 break;
4957 if (!(*q = lock_user_string(addr)))
4958 goto execve_efault;
4960 *q = NULL;
4962 if (!(p = lock_user_string(arg1)))
4963 goto execve_efault;
4964 ret = get_errno(execve(p, argp, envp));
4965 unlock_user(p, arg1, 0);
4967 goto execve_end;
4969 execve_efault:
4970 ret = -TARGET_EFAULT;
4972 execve_end:
4973 for (gp = guest_argp, q = argp; *q;
4974 gp += sizeof(abi_ulong), q++) {
4975 if (get_user_ual(addr, gp)
4976 || !addr)
4977 break;
4978 unlock_user(*q, addr, 0);
4980 for (gp = guest_envp, q = envp; *q;
4981 gp += sizeof(abi_ulong), q++) {
4982 if (get_user_ual(addr, gp)
4983 || !addr)
4984 break;
4985 unlock_user(*q, addr, 0);
4988 break;
4989 case TARGET_NR_chdir:
4990 if (!(p = lock_user_string(arg1)))
4991 goto efault;
4992 ret = get_errno(chdir(p));
4993 unlock_user(p, arg1, 0);
4994 break;
4995 #ifdef TARGET_NR_time
4996 case TARGET_NR_time:
4998 time_t host_time;
4999 ret = get_errno(time(&host_time));
5000 if (!is_error(ret)
5001 && arg1
5002 && put_user_sal(host_time, arg1))
5003 goto efault;
5005 break;
5006 #endif
5007 case TARGET_NR_mknod:
5008 if (!(p = lock_user_string(arg1)))
5009 goto efault;
5010 ret = get_errno(mknod(p, arg2, arg3));
5011 unlock_user(p, arg1, 0);
5012 break;
5013 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5014 case TARGET_NR_mknodat:
5015 if (!(p = lock_user_string(arg2)))
5016 goto efault;
5017 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5018 unlock_user(p, arg2, 0);
5019 break;
5020 #endif
5021 case TARGET_NR_chmod:
5022 if (!(p = lock_user_string(arg1)))
5023 goto efault;
5024 ret = get_errno(chmod(p, arg2));
5025 unlock_user(p, arg1, 0);
5026 break;
5027 #ifdef TARGET_NR_break
5028 case TARGET_NR_break:
5029 goto unimplemented;
5030 #endif
5031 #ifdef TARGET_NR_oldstat
5032 case TARGET_NR_oldstat:
5033 goto unimplemented;
5034 #endif
5035 case TARGET_NR_lseek:
5036 ret = get_errno(lseek(arg1, arg2, arg3));
5037 break;
5038 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5039 /* Alpha specific */
5040 case TARGET_NR_getxpid:
5041 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5042 ret = get_errno(getpid());
5043 break;
5044 #endif
5045 #ifdef TARGET_NR_getpid
5046 case TARGET_NR_getpid:
5047 ret = get_errno(getpid());
5048 break;
5049 #endif
5050 case TARGET_NR_mount:
5052 /* need to look at the data field */
5053 void *p2, *p3;
5054 p = lock_user_string(arg1);
5055 p2 = lock_user_string(arg2);
5056 p3 = lock_user_string(arg3);
5057 if (!p || !p2 || !p3)
5058 ret = -TARGET_EFAULT;
5059 else {
5060 /* FIXME - arg5 should be locked, but it isn't clear how to
5061 * do that since it's not guaranteed to be a NULL-terminated
5062 * string.
5064 if ( ! arg5 )
5065 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5066 else
5067 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5069 unlock_user(p, arg1, 0);
5070 unlock_user(p2, arg2, 0);
5071 unlock_user(p3, arg3, 0);
5072 break;
5074 #ifdef TARGET_NR_umount
5075 case TARGET_NR_umount:
5076 if (!(p = lock_user_string(arg1)))
5077 goto efault;
5078 ret = get_errno(umount(p));
5079 unlock_user(p, arg1, 0);
5080 break;
5081 #endif
5082 #ifdef TARGET_NR_stime /* not on alpha */
5083 case TARGET_NR_stime:
5085 time_t host_time;
5086 if (get_user_sal(host_time, arg1))
5087 goto efault;
5088 ret = get_errno(stime(&host_time));
5090 break;
5091 #endif
5092 case TARGET_NR_ptrace:
5093 goto unimplemented;
5094 #ifdef TARGET_NR_alarm /* not on alpha */
5095 case TARGET_NR_alarm:
5096 ret = alarm(arg1);
5097 break;
5098 #endif
5099 #ifdef TARGET_NR_oldfstat
5100 case TARGET_NR_oldfstat:
5101 goto unimplemented;
5102 #endif
5103 #ifdef TARGET_NR_pause /* not on alpha */
5104 case TARGET_NR_pause:
5105 ret = get_errno(pause());
5106 break;
5107 #endif
5108 #ifdef TARGET_NR_utime
5109 case TARGET_NR_utime:
5111 struct utimbuf tbuf, *host_tbuf;
5112 struct target_utimbuf *target_tbuf;
5113 if (arg2) {
5114 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5115 goto efault;
5116 tbuf.actime = tswapal(target_tbuf->actime);
5117 tbuf.modtime = tswapal(target_tbuf->modtime);
5118 unlock_user_struct(target_tbuf, arg2, 0);
5119 host_tbuf = &tbuf;
5120 } else {
5121 host_tbuf = NULL;
5123 if (!(p = lock_user_string(arg1)))
5124 goto efault;
5125 ret = get_errno(utime(p, host_tbuf));
5126 unlock_user(p, arg1, 0);
5128 break;
5129 #endif
5130 case TARGET_NR_utimes:
5132 struct timeval *tvp, tv[2];
5133 if (arg2) {
5134 if (copy_from_user_timeval(&tv[0], arg2)
5135 || copy_from_user_timeval(&tv[1],
5136 arg2 + sizeof(struct target_timeval)))
5137 goto efault;
5138 tvp = tv;
5139 } else {
5140 tvp = NULL;
5142 if (!(p = lock_user_string(arg1)))
5143 goto efault;
5144 ret = get_errno(utimes(p, tvp));
5145 unlock_user(p, arg1, 0);
5147 break;
5148 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5149 case TARGET_NR_futimesat:
5151 struct timeval *tvp, tv[2];
5152 if (arg3) {
5153 if (copy_from_user_timeval(&tv[0], arg3)
5154 || copy_from_user_timeval(&tv[1],
5155 arg3 + sizeof(struct target_timeval)))
5156 goto efault;
5157 tvp = tv;
5158 } else {
5159 tvp = NULL;
5161 if (!(p = lock_user_string(arg2)))
5162 goto efault;
5163 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5164 unlock_user(p, arg2, 0);
5166 break;
5167 #endif
5168 #ifdef TARGET_NR_stty
5169 case TARGET_NR_stty:
5170 goto unimplemented;
5171 #endif
5172 #ifdef TARGET_NR_gtty
5173 case TARGET_NR_gtty:
5174 goto unimplemented;
5175 #endif
5176 case TARGET_NR_access:
5177 if (!(p = lock_user_string(arg1)))
5178 goto efault;
5179 ret = get_errno(access(path(p), arg2));
5180 unlock_user(p, arg1, 0);
5181 break;
5182 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5183 case TARGET_NR_faccessat:
5184 if (!(p = lock_user_string(arg2)))
5185 goto efault;
5186 ret = get_errno(sys_faccessat(arg1, p, arg3));
5187 unlock_user(p, arg2, 0);
5188 break;
5189 #endif
5190 #ifdef TARGET_NR_nice /* not on alpha */
5191 case TARGET_NR_nice:
5192 ret = get_errno(nice(arg1));
5193 break;
5194 #endif
5195 #ifdef TARGET_NR_ftime
5196 case TARGET_NR_ftime:
5197 goto unimplemented;
5198 #endif
5199 case TARGET_NR_sync:
5200 sync();
5201 ret = 0;
5202 break;
5203 case TARGET_NR_kill:
5204 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5205 break;
5206 case TARGET_NR_rename:
5208 void *p2;
5209 p = lock_user_string(arg1);
5210 p2 = lock_user_string(arg2);
5211 if (!p || !p2)
5212 ret = -TARGET_EFAULT;
5213 else
5214 ret = get_errno(rename(p, p2));
5215 unlock_user(p2, arg2, 0);
5216 unlock_user(p, arg1, 0);
5218 break;
5219 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5220 case TARGET_NR_renameat:
5222 void *p2;
5223 p = lock_user_string(arg2);
5224 p2 = lock_user_string(arg4);
5225 if (!p || !p2)
5226 ret = -TARGET_EFAULT;
5227 else
5228 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5229 unlock_user(p2, arg4, 0);
5230 unlock_user(p, arg2, 0);
5232 break;
5233 #endif
5234 case TARGET_NR_mkdir:
5235 if (!(p = lock_user_string(arg1)))
5236 goto efault;
5237 ret = get_errno(mkdir(p, arg2));
5238 unlock_user(p, arg1, 0);
5239 break;
5240 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5241 case TARGET_NR_mkdirat:
5242 if (!(p = lock_user_string(arg2)))
5243 goto efault;
5244 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5245 unlock_user(p, arg2, 0);
5246 break;
5247 #endif
5248 case TARGET_NR_rmdir:
5249 if (!(p = lock_user_string(arg1)))
5250 goto efault;
5251 ret = get_errno(rmdir(p));
5252 unlock_user(p, arg1, 0);
5253 break;
5254 case TARGET_NR_dup:
5255 ret = get_errno(dup(arg1));
5256 break;
5257 case TARGET_NR_pipe:
5258 ret = do_pipe(cpu_env, arg1, 0, 0);
5259 break;
5260 #ifdef TARGET_NR_pipe2
5261 case TARGET_NR_pipe2:
5262 ret = do_pipe(cpu_env, arg1, arg2, 1);
5263 break;
5264 #endif
5265 case TARGET_NR_times:
5267 struct target_tms *tmsp;
5268 struct tms tms;
5269 ret = get_errno(times(&tms));
5270 if (arg1) {
5271 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5272 if (!tmsp)
5273 goto efault;
5274 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5275 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5276 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5277 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5279 if (!is_error(ret))
5280 ret = host_to_target_clock_t(ret);
5282 break;
5283 #ifdef TARGET_NR_prof
5284 case TARGET_NR_prof:
5285 goto unimplemented;
5286 #endif
5287 #ifdef TARGET_NR_signal
5288 case TARGET_NR_signal:
5289 goto unimplemented;
5290 #endif
5291 case TARGET_NR_acct:
5292 if (arg1 == 0) {
5293 ret = get_errno(acct(NULL));
5294 } else {
5295 if (!(p = lock_user_string(arg1)))
5296 goto efault;
5297 ret = get_errno(acct(path(p)));
5298 unlock_user(p, arg1, 0);
5300 break;
5301 #ifdef TARGET_NR_umount2 /* not on alpha */
5302 case TARGET_NR_umount2:
5303 if (!(p = lock_user_string(arg1)))
5304 goto efault;
5305 ret = get_errno(umount2(p, arg2));
5306 unlock_user(p, arg1, 0);
5307 break;
5308 #endif
5309 #ifdef TARGET_NR_lock
5310 case TARGET_NR_lock:
5311 goto unimplemented;
5312 #endif
5313 case TARGET_NR_ioctl:
5314 ret = do_ioctl(arg1, arg2, arg3);
5315 break;
5316 case TARGET_NR_fcntl:
5317 ret = do_fcntl(arg1, arg2, arg3);
5318 break;
5319 #ifdef TARGET_NR_mpx
5320 case TARGET_NR_mpx:
5321 goto unimplemented;
5322 #endif
5323 case TARGET_NR_setpgid:
5324 ret = get_errno(setpgid(arg1, arg2));
5325 break;
5326 #ifdef TARGET_NR_ulimit
5327 case TARGET_NR_ulimit:
5328 goto unimplemented;
5329 #endif
5330 #ifdef TARGET_NR_oldolduname
5331 case TARGET_NR_oldolduname:
5332 goto unimplemented;
5333 #endif
5334 case TARGET_NR_umask:
5335 ret = get_errno(umask(arg1));
5336 break;
5337 case TARGET_NR_chroot:
5338 if (!(p = lock_user_string(arg1)))
5339 goto efault;
5340 ret = get_errno(chroot(p));
5341 unlock_user(p, arg1, 0);
5342 break;
5343 case TARGET_NR_ustat:
5344 goto unimplemented;
5345 case TARGET_NR_dup2:
5346 ret = get_errno(dup2(arg1, arg2));
5347 break;
5348 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5349 case TARGET_NR_dup3:
5350 ret = get_errno(dup3(arg1, arg2, arg3));
5351 break;
5352 #endif
5353 #ifdef TARGET_NR_getppid /* not on alpha */
5354 case TARGET_NR_getppid:
5355 ret = get_errno(getppid());
5356 break;
5357 #endif
5358 case TARGET_NR_getpgrp:
5359 ret = get_errno(getpgrp());
5360 break;
5361 case TARGET_NR_setsid:
5362 ret = get_errno(setsid());
5363 break;
5364 #ifdef TARGET_NR_sigaction
5365 case TARGET_NR_sigaction:
5367 #if defined(TARGET_ALPHA)
5368 struct target_sigaction act, oact, *pact = 0;
5369 struct target_old_sigaction *old_act;
5370 if (arg2) {
5371 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5372 goto efault;
5373 act._sa_handler = old_act->_sa_handler;
5374 target_siginitset(&act.sa_mask, old_act->sa_mask);
5375 act.sa_flags = old_act->sa_flags;
5376 act.sa_restorer = 0;
5377 unlock_user_struct(old_act, arg2, 0);
5378 pact = &act;
5380 ret = get_errno(do_sigaction(arg1, pact, &oact));
5381 if (!is_error(ret) && arg3) {
5382 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5383 goto efault;
5384 old_act->_sa_handler = oact._sa_handler;
5385 old_act->sa_mask = oact.sa_mask.sig[0];
5386 old_act->sa_flags = oact.sa_flags;
5387 unlock_user_struct(old_act, arg3, 1);
5389 #elif defined(TARGET_MIPS)
5390 struct target_sigaction act, oact, *pact, *old_act;
5392 if (arg2) {
5393 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5394 goto efault;
5395 act._sa_handler = old_act->_sa_handler;
5396 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5397 act.sa_flags = old_act->sa_flags;
5398 unlock_user_struct(old_act, arg2, 0);
5399 pact = &act;
5400 } else {
5401 pact = NULL;
5404 ret = get_errno(do_sigaction(arg1, pact, &oact));
5406 if (!is_error(ret) && arg3) {
5407 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5408 goto efault;
5409 old_act->_sa_handler = oact._sa_handler;
5410 old_act->sa_flags = oact.sa_flags;
5411 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5412 old_act->sa_mask.sig[1] = 0;
5413 old_act->sa_mask.sig[2] = 0;
5414 old_act->sa_mask.sig[3] = 0;
5415 unlock_user_struct(old_act, arg3, 1);
5417 #else
5418 struct target_old_sigaction *old_act;
5419 struct target_sigaction act, oact, *pact;
5420 if (arg2) {
5421 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5422 goto efault;
5423 act._sa_handler = old_act->_sa_handler;
5424 target_siginitset(&act.sa_mask, old_act->sa_mask);
5425 act.sa_flags = old_act->sa_flags;
5426 act.sa_restorer = old_act->sa_restorer;
5427 unlock_user_struct(old_act, arg2, 0);
5428 pact = &act;
5429 } else {
5430 pact = NULL;
5432 ret = get_errno(do_sigaction(arg1, pact, &oact));
5433 if (!is_error(ret) && arg3) {
5434 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5435 goto efault;
5436 old_act->_sa_handler = oact._sa_handler;
5437 old_act->sa_mask = oact.sa_mask.sig[0];
5438 old_act->sa_flags = oact.sa_flags;
5439 old_act->sa_restorer = oact.sa_restorer;
5440 unlock_user_struct(old_act, arg3, 1);
5442 #endif
5444 break;
5445 #endif
5446 case TARGET_NR_rt_sigaction:
5448 #if defined(TARGET_ALPHA)
5449 struct target_sigaction act, oact, *pact = 0;
5450 struct target_rt_sigaction *rt_act;
5451 /* ??? arg4 == sizeof(sigset_t). */
5452 if (arg2) {
5453 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5454 goto efault;
5455 act._sa_handler = rt_act->_sa_handler;
5456 act.sa_mask = rt_act->sa_mask;
5457 act.sa_flags = rt_act->sa_flags;
5458 act.sa_restorer = arg5;
5459 unlock_user_struct(rt_act, arg2, 0);
5460 pact = &act;
5462 ret = get_errno(do_sigaction(arg1, pact, &oact));
5463 if (!is_error(ret) && arg3) {
5464 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5465 goto efault;
5466 rt_act->_sa_handler = oact._sa_handler;
5467 rt_act->sa_mask = oact.sa_mask;
5468 rt_act->sa_flags = oact.sa_flags;
5469 unlock_user_struct(rt_act, arg3, 1);
5471 #else
5472 struct target_sigaction *act;
5473 struct target_sigaction *oact;
5475 if (arg2) {
5476 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5477 goto efault;
5478 } else
5479 act = NULL;
5480 if (arg3) {
5481 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5482 ret = -TARGET_EFAULT;
5483 goto rt_sigaction_fail;
5485 } else
5486 oact = NULL;
5487 ret = get_errno(do_sigaction(arg1, act, oact));
5488 rt_sigaction_fail:
5489 if (act)
5490 unlock_user_struct(act, arg2, 0);
5491 if (oact)
5492 unlock_user_struct(oact, arg3, 1);
5493 #endif
5495 break;
5496 #ifdef TARGET_NR_sgetmask /* not on alpha */
5497 case TARGET_NR_sgetmask:
5499 sigset_t cur_set;
5500 abi_ulong target_set;
5501 sigprocmask(0, NULL, &cur_set);
5502 host_to_target_old_sigset(&target_set, &cur_set);
5503 ret = target_set;
5505 break;
5506 #endif
5507 #ifdef TARGET_NR_ssetmask /* not on alpha */
5508 case TARGET_NR_ssetmask:
5510 sigset_t set, oset, cur_set;
5511 abi_ulong target_set = arg1;
5512 sigprocmask(0, NULL, &cur_set);
5513 target_to_host_old_sigset(&set, &target_set);
5514 sigorset(&set, &set, &cur_set);
5515 sigprocmask(SIG_SETMASK, &set, &oset);
5516 host_to_target_old_sigset(&target_set, &oset);
5517 ret = target_set;
5519 break;
5520 #endif
5521 #ifdef TARGET_NR_sigprocmask
5522 case TARGET_NR_sigprocmask:
5524 #if defined(TARGET_ALPHA)
5525 sigset_t set, oldset;
5526 abi_ulong mask;
5527 int how;
5529 switch (arg1) {
5530 case TARGET_SIG_BLOCK:
5531 how = SIG_BLOCK;
5532 break;
5533 case TARGET_SIG_UNBLOCK:
5534 how = SIG_UNBLOCK;
5535 break;
5536 case TARGET_SIG_SETMASK:
5537 how = SIG_SETMASK;
5538 break;
5539 default:
5540 ret = -TARGET_EINVAL;
5541 goto fail;
5543 mask = arg2;
5544 target_to_host_old_sigset(&set, &mask);
5546 ret = get_errno(sigprocmask(how, &set, &oldset));
5548 if (!is_error(ret)) {
5549 host_to_target_old_sigset(&mask, &oldset);
5550 ret = mask;
5551 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5553 #else
5554 sigset_t set, oldset, *set_ptr;
5555 int how;
5557 if (arg2) {
5558 switch (arg1) {
5559 case TARGET_SIG_BLOCK:
5560 how = SIG_BLOCK;
5561 break;
5562 case TARGET_SIG_UNBLOCK:
5563 how = SIG_UNBLOCK;
5564 break;
5565 case TARGET_SIG_SETMASK:
5566 how = SIG_SETMASK;
5567 break;
5568 default:
5569 ret = -TARGET_EINVAL;
5570 goto fail;
5572 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5573 goto efault;
5574 target_to_host_old_sigset(&set, p);
5575 unlock_user(p, arg2, 0);
5576 set_ptr = &set;
5577 } else {
5578 how = 0;
5579 set_ptr = NULL;
5581 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5582 if (!is_error(ret) && arg3) {
5583 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5584 goto efault;
5585 host_to_target_old_sigset(p, &oldset);
5586 unlock_user(p, arg3, sizeof(target_sigset_t));
5588 #endif
5590 break;
5591 #endif
5592 case TARGET_NR_rt_sigprocmask:
5594 int how = arg1;
5595 sigset_t set, oldset, *set_ptr;
5597 if (arg2) {
5598 switch(how) {
5599 case TARGET_SIG_BLOCK:
5600 how = SIG_BLOCK;
5601 break;
5602 case TARGET_SIG_UNBLOCK:
5603 how = SIG_UNBLOCK;
5604 break;
5605 case TARGET_SIG_SETMASK:
5606 how = SIG_SETMASK;
5607 break;
5608 default:
5609 ret = -TARGET_EINVAL;
5610 goto fail;
5612 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5613 goto efault;
5614 target_to_host_sigset(&set, p);
5615 unlock_user(p, arg2, 0);
5616 set_ptr = &set;
5617 } else {
5618 how = 0;
5619 set_ptr = NULL;
5621 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5622 if (!is_error(ret) && arg3) {
5623 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5624 goto efault;
5625 host_to_target_sigset(p, &oldset);
5626 unlock_user(p, arg3, sizeof(target_sigset_t));
5629 break;
5630 #ifdef TARGET_NR_sigpending
5631 case TARGET_NR_sigpending:
5633 sigset_t set;
5634 ret = get_errno(sigpending(&set));
5635 if (!is_error(ret)) {
5636 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5637 goto efault;
5638 host_to_target_old_sigset(p, &set);
5639 unlock_user(p, arg1, sizeof(target_sigset_t));
5642 break;
5643 #endif
5644 case TARGET_NR_rt_sigpending:
5646 sigset_t set;
5647 ret = get_errno(sigpending(&set));
5648 if (!is_error(ret)) {
5649 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5650 goto efault;
5651 host_to_target_sigset(p, &set);
5652 unlock_user(p, arg1, sizeof(target_sigset_t));
5655 break;
5656 #ifdef TARGET_NR_sigsuspend
5657 case TARGET_NR_sigsuspend:
5659 sigset_t set;
5660 #if defined(TARGET_ALPHA)
5661 abi_ulong mask = arg1;
5662 target_to_host_old_sigset(&set, &mask);
5663 #else
5664 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5665 goto efault;
5666 target_to_host_old_sigset(&set, p);
5667 unlock_user(p, arg1, 0);
5668 #endif
5669 ret = get_errno(sigsuspend(&set));
5671 break;
5672 #endif
5673 case TARGET_NR_rt_sigsuspend:
5675 sigset_t set;
5676 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5677 goto efault;
5678 target_to_host_sigset(&set, p);
5679 unlock_user(p, arg1, 0);
5680 ret = get_errno(sigsuspend(&set));
5682 break;
5683 case TARGET_NR_rt_sigtimedwait:
5685 sigset_t set;
5686 struct timespec uts, *puts;
5687 siginfo_t uinfo;
5689 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5690 goto efault;
5691 target_to_host_sigset(&set, p);
5692 unlock_user(p, arg1, 0);
5693 if (arg3) {
5694 puts = &uts;
5695 target_to_host_timespec(puts, arg3);
5696 } else {
5697 puts = NULL;
5699 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5700 if (!is_error(ret) && arg2) {
5701 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5702 goto efault;
5703 host_to_target_siginfo(p, &uinfo);
5704 unlock_user(p, arg2, sizeof(target_siginfo_t));
5707 break;
5708 case TARGET_NR_rt_sigqueueinfo:
5710 siginfo_t uinfo;
5711 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5712 goto efault;
5713 target_to_host_siginfo(&uinfo, p);
5714 unlock_user(p, arg1, 0);
5715 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5717 break;
5718 #ifdef TARGET_NR_sigreturn
5719 case TARGET_NR_sigreturn:
5720 /* NOTE: ret is eax, so not transcoding must be done */
5721 ret = do_sigreturn(cpu_env);
5722 break;
5723 #endif
5724 case TARGET_NR_rt_sigreturn:
5725 /* NOTE: ret is eax, so not transcoding must be done */
5726 ret = do_rt_sigreturn(cpu_env);
5727 break;
5728 case TARGET_NR_sethostname:
5729 if (!(p = lock_user_string(arg1)))
5730 goto efault;
5731 ret = get_errno(sethostname(p, arg2));
5732 unlock_user(p, arg1, 0);
5733 break;
5734 case TARGET_NR_setrlimit:
5736 int resource = target_to_host_resource(arg1);
5737 struct target_rlimit *target_rlim;
5738 struct rlimit rlim;
5739 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5740 goto efault;
5741 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5742 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5743 unlock_user_struct(target_rlim, arg2, 0);
5744 ret = get_errno(setrlimit(resource, &rlim));
5746 break;
5747 case TARGET_NR_getrlimit:
5749 int resource = target_to_host_resource(arg1);
5750 struct target_rlimit *target_rlim;
5751 struct rlimit rlim;
5753 ret = get_errno(getrlimit(resource, &rlim));
5754 if (!is_error(ret)) {
5755 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5756 goto efault;
5757 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5758 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5759 unlock_user_struct(target_rlim, arg2, 1);
5762 break;
5763 case TARGET_NR_getrusage:
5765 struct rusage rusage;
5766 ret = get_errno(getrusage(arg1, &rusage));
5767 if (!is_error(ret)) {
5768 host_to_target_rusage(arg2, &rusage);
5771 break;
5772 case TARGET_NR_gettimeofday:
5774 struct timeval tv;
5775 ret = get_errno(gettimeofday(&tv, NULL));
5776 if (!is_error(ret)) {
5777 if (copy_to_user_timeval(arg1, &tv))
5778 goto efault;
5781 break;
5782 case TARGET_NR_settimeofday:
5784 struct timeval tv;
5785 if (copy_from_user_timeval(&tv, arg1))
5786 goto efault;
5787 ret = get_errno(settimeofday(&tv, NULL));
5789 break;
5790 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5791 case TARGET_NR_select:
5793 struct target_sel_arg_struct *sel;
5794 abi_ulong inp, outp, exp, tvp;
5795 long nsel;
5797 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5798 goto efault;
5799 nsel = tswapal(sel->n);
5800 inp = tswapal(sel->inp);
5801 outp = tswapal(sel->outp);
5802 exp = tswapal(sel->exp);
5803 tvp = tswapal(sel->tvp);
5804 unlock_user_struct(sel, arg1, 0);
5805 ret = do_select(nsel, inp, outp, exp, tvp);
5807 break;
5808 #endif
5809 #ifdef TARGET_NR_pselect6
5810 case TARGET_NR_pselect6:
5812 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5813 fd_set rfds, wfds, efds;
5814 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5815 struct timespec ts, *ts_ptr;
5818 * The 6th arg is actually two args smashed together,
5819 * so we cannot use the C library.
5821 sigset_t set;
5822 struct {
5823 sigset_t *set;
5824 size_t size;
5825 } sig, *sig_ptr;
5827 abi_ulong arg_sigset, arg_sigsize, *arg7;
5828 target_sigset_t *target_sigset;
5830 n = arg1;
5831 rfd_addr = arg2;
5832 wfd_addr = arg3;
5833 efd_addr = arg4;
5834 ts_addr = arg5;
5836 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5837 if (ret) {
5838 goto fail;
5840 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5841 if (ret) {
5842 goto fail;
5844 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5845 if (ret) {
5846 goto fail;
5850 * This takes a timespec, and not a timeval, so we cannot
5851 * use the do_select() helper ...
5853 if (ts_addr) {
5854 if (target_to_host_timespec(&ts, ts_addr)) {
5855 goto efault;
5857 ts_ptr = &ts;
5858 } else {
5859 ts_ptr = NULL;
5862 /* Extract the two packed args for the sigset */
5863 if (arg6) {
5864 sig_ptr = &sig;
5865 sig.size = _NSIG / 8;
5867 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5868 if (!arg7) {
5869 goto efault;
5871 arg_sigset = tswapal(arg7[0]);
5872 arg_sigsize = tswapal(arg7[1]);
5873 unlock_user(arg7, arg6, 0);
5875 if (arg_sigset) {
5876 sig.set = &set;
5877 if (arg_sigsize != sizeof(*target_sigset)) {
5878 /* Like the kernel, we enforce correct size sigsets */
5879 ret = -TARGET_EINVAL;
5880 goto fail;
5882 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5883 sizeof(*target_sigset), 1);
5884 if (!target_sigset) {
5885 goto efault;
5887 target_to_host_sigset(&set, target_sigset);
5888 unlock_user(target_sigset, arg_sigset, 0);
5889 } else {
5890 sig.set = NULL;
5892 } else {
5893 sig_ptr = NULL;
5896 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5897 ts_ptr, sig_ptr));
5899 if (!is_error(ret)) {
5900 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5901 goto efault;
5902 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5903 goto efault;
5904 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5905 goto efault;
5907 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5908 goto efault;
5911 break;
5912 #endif
5913 case TARGET_NR_symlink:
5915 void *p2;
5916 p = lock_user_string(arg1);
5917 p2 = lock_user_string(arg2);
5918 if (!p || !p2)
5919 ret = -TARGET_EFAULT;
5920 else
5921 ret = get_errno(symlink(p, p2));
5922 unlock_user(p2, arg2, 0);
5923 unlock_user(p, arg1, 0);
5925 break;
5926 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5927 case TARGET_NR_symlinkat:
5929 void *p2;
5930 p = lock_user_string(arg1);
5931 p2 = lock_user_string(arg3);
5932 if (!p || !p2)
5933 ret = -TARGET_EFAULT;
5934 else
5935 ret = get_errno(sys_symlinkat(p, arg2, p2));
5936 unlock_user(p2, arg3, 0);
5937 unlock_user(p, arg1, 0);
5939 break;
5940 #endif
5941 #ifdef TARGET_NR_oldlstat
5942 case TARGET_NR_oldlstat:
5943 goto unimplemented;
5944 #endif
5945 case TARGET_NR_readlink:
5947 void *p2, *temp;
5948 p = lock_user_string(arg1);
5949 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5950 if (!p || !p2)
5951 ret = -TARGET_EFAULT;
5952 else {
5953 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5954 char real[PATH_MAX];
5955 temp = realpath(exec_path,real);
5956 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5957 snprintf((char *)p2, arg3, "%s", real);
5959 else
5960 ret = get_errno(readlink(path(p), p2, arg3));
5962 unlock_user(p2, arg2, ret);
5963 unlock_user(p, arg1, 0);
5965 break;
5966 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5967 case TARGET_NR_readlinkat:
5969 void *p2;
5970 p = lock_user_string(arg2);
5971 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5972 if (!p || !p2)
5973 ret = -TARGET_EFAULT;
5974 else
5975 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5976 unlock_user(p2, arg3, ret);
5977 unlock_user(p, arg2, 0);
5979 break;
5980 #endif
5981 #ifdef TARGET_NR_uselib
5982 case TARGET_NR_uselib:
5983 goto unimplemented;
5984 #endif
5985 #ifdef TARGET_NR_swapon
5986 case TARGET_NR_swapon:
5987 if (!(p = lock_user_string(arg1)))
5988 goto efault;
5989 ret = get_errno(swapon(p, arg2));
5990 unlock_user(p, arg1, 0);
5991 break;
5992 #endif
5993 case TARGET_NR_reboot:
5994 if (!(p = lock_user_string(arg4)))
5995 goto efault;
5996 ret = reboot(arg1, arg2, arg3, p);
5997 unlock_user(p, arg4, 0);
5998 break;
5999 #ifdef TARGET_NR_readdir
6000 case TARGET_NR_readdir:
6001 goto unimplemented;
6002 #endif
6003 #ifdef TARGET_NR_mmap
6004 case TARGET_NR_mmap:
6005 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6006 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6007 || defined(TARGET_S390X)
6009 abi_ulong *v;
6010 abi_ulong v1, v2, v3, v4, v5, v6;
6011 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6012 goto efault;
6013 v1 = tswapal(v[0]);
6014 v2 = tswapal(v[1]);
6015 v3 = tswapal(v[2]);
6016 v4 = tswapal(v[3]);
6017 v5 = tswapal(v[4]);
6018 v6 = tswapal(v[5]);
6019 unlock_user(v, arg1, 0);
6020 ret = get_errno(target_mmap(v1, v2, v3,
6021 target_to_host_bitmask(v4, mmap_flags_tbl),
6022 v5, v6));
6024 #else
6025 ret = get_errno(target_mmap(arg1, arg2, arg3,
6026 target_to_host_bitmask(arg4, mmap_flags_tbl),
6027 arg5,
6028 arg6));
6029 #endif
6030 break;
6031 #endif
6032 #ifdef TARGET_NR_mmap2
6033 case TARGET_NR_mmap2:
6034 #ifndef MMAP_SHIFT
6035 #define MMAP_SHIFT 12
6036 #endif
6037 ret = get_errno(target_mmap(arg1, arg2, arg3,
6038 target_to_host_bitmask(arg4, mmap_flags_tbl),
6039 arg5,
6040 arg6 << MMAP_SHIFT));
6041 break;
6042 #endif
6043 case TARGET_NR_munmap:
6044 ret = get_errno(target_munmap(arg1, arg2));
6045 break;
6046 case TARGET_NR_mprotect:
6048 TaskState *ts = ((CPUState *)cpu_env)->opaque;
6049 /* Special hack to detect libc making the stack executable. */
6050 if ((arg3 & PROT_GROWSDOWN)
6051 && arg1 >= ts->info->stack_limit
6052 && arg1 <= ts->info->start_stack) {
6053 arg3 &= ~PROT_GROWSDOWN;
6054 arg2 = arg2 + arg1 - ts->info->stack_limit;
6055 arg1 = ts->info->stack_limit;
6058 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6059 break;
6060 #ifdef TARGET_NR_mremap
6061 case TARGET_NR_mremap:
6062 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6063 break;
6064 #endif
6065 /* ??? msync/mlock/munlock are broken for softmmu. */
6066 #ifdef TARGET_NR_msync
6067 case TARGET_NR_msync:
6068 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6069 break;
6070 #endif
6071 #ifdef TARGET_NR_mlock
6072 case TARGET_NR_mlock:
6073 ret = get_errno(mlock(g2h(arg1), arg2));
6074 break;
6075 #endif
6076 #ifdef TARGET_NR_munlock
6077 case TARGET_NR_munlock:
6078 ret = get_errno(munlock(g2h(arg1), arg2));
6079 break;
6080 #endif
6081 #ifdef TARGET_NR_mlockall
6082 case TARGET_NR_mlockall:
6083 ret = get_errno(mlockall(arg1));
6084 break;
6085 #endif
6086 #ifdef TARGET_NR_munlockall
6087 case TARGET_NR_munlockall:
6088 ret = get_errno(munlockall());
6089 break;
6090 #endif
6091 case TARGET_NR_truncate:
6092 if (!(p = lock_user_string(arg1)))
6093 goto efault;
6094 ret = get_errno(truncate(p, arg2));
6095 unlock_user(p, arg1, 0);
6096 break;
6097 case TARGET_NR_ftruncate:
6098 ret = get_errno(ftruncate(arg1, arg2));
6099 break;
6100 case TARGET_NR_fchmod:
6101 ret = get_errno(fchmod(arg1, arg2));
6102 break;
6103 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6104 case TARGET_NR_fchmodat:
6105 if (!(p = lock_user_string(arg2)))
6106 goto efault;
6107 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6108 unlock_user(p, arg2, 0);
6109 break;
6110 #endif
6111 case TARGET_NR_getpriority:
6112 /* libc does special remapping of the return value of
6113 * sys_getpriority() so it's just easiest to call
6114 * sys_getpriority() directly rather than through libc. */
6115 ret = get_errno(sys_getpriority(arg1, arg2));
6116 break;
6117 case TARGET_NR_setpriority:
6118 ret = get_errno(setpriority(arg1, arg2, arg3));
6119 break;
6120 #ifdef TARGET_NR_profil
6121 case TARGET_NR_profil:
6122 goto unimplemented;
6123 #endif
6124 case TARGET_NR_statfs:
6125 if (!(p = lock_user_string(arg1)))
6126 goto efault;
6127 ret = get_errno(statfs(path(p), &stfs));
6128 unlock_user(p, arg1, 0);
6129 convert_statfs:
6130 if (!is_error(ret)) {
6131 struct target_statfs *target_stfs;
6133 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6134 goto efault;
6135 __put_user(stfs.f_type, &target_stfs->f_type);
6136 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6137 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6138 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6139 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6140 __put_user(stfs.f_files, &target_stfs->f_files);
6141 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6142 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6143 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6144 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6145 unlock_user_struct(target_stfs, arg2, 1);
6147 break;
6148 case TARGET_NR_fstatfs:
6149 ret = get_errno(fstatfs(arg1, &stfs));
6150 goto convert_statfs;
6151 #ifdef TARGET_NR_statfs64
6152 case TARGET_NR_statfs64:
6153 if (!(p = lock_user_string(arg1)))
6154 goto efault;
6155 ret = get_errno(statfs(path(p), &stfs));
6156 unlock_user(p, arg1, 0);
6157 convert_statfs64:
6158 if (!is_error(ret)) {
6159 struct target_statfs64 *target_stfs;
6161 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6162 goto efault;
6163 __put_user(stfs.f_type, &target_stfs->f_type);
6164 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6165 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6166 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6167 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6168 __put_user(stfs.f_files, &target_stfs->f_files);
6169 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6170 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6171 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6172 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6173 unlock_user_struct(target_stfs, arg3, 1);
6175 break;
6176 case TARGET_NR_fstatfs64:
6177 ret = get_errno(fstatfs(arg1, &stfs));
6178 goto convert_statfs64;
6179 #endif
6180 #ifdef TARGET_NR_ioperm
6181 case TARGET_NR_ioperm:
6182 goto unimplemented;
6183 #endif
6184 #ifdef TARGET_NR_socketcall
6185 case TARGET_NR_socketcall:
6186 ret = do_socketcall(arg1, arg2);
6187 break;
6188 #endif
6189 #ifdef TARGET_NR_accept
6190 case TARGET_NR_accept:
6191 ret = do_accept(arg1, arg2, arg3);
6192 break;
6193 #endif
6194 #ifdef TARGET_NR_bind
6195 case TARGET_NR_bind:
6196 ret = do_bind(arg1, arg2, arg3);
6197 break;
6198 #endif
6199 #ifdef TARGET_NR_connect
6200 case TARGET_NR_connect:
6201 ret = do_connect(arg1, arg2, arg3);
6202 break;
6203 #endif
6204 #ifdef TARGET_NR_getpeername
6205 case TARGET_NR_getpeername:
6206 ret = do_getpeername(arg1, arg2, arg3);
6207 break;
6208 #endif
6209 #ifdef TARGET_NR_getsockname
6210 case TARGET_NR_getsockname:
6211 ret = do_getsockname(arg1, arg2, arg3);
6212 break;
6213 #endif
6214 #ifdef TARGET_NR_getsockopt
6215 case TARGET_NR_getsockopt:
6216 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6217 break;
6218 #endif
6219 #ifdef TARGET_NR_listen
6220 case TARGET_NR_listen:
6221 ret = get_errno(listen(arg1, arg2));
6222 break;
6223 #endif
6224 #ifdef TARGET_NR_recv
6225 case TARGET_NR_recv:
6226 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6227 break;
6228 #endif
6229 #ifdef TARGET_NR_recvfrom
6230 case TARGET_NR_recvfrom:
6231 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6232 break;
6233 #endif
6234 #ifdef TARGET_NR_recvmsg
6235 case TARGET_NR_recvmsg:
6236 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6237 break;
6238 #endif
6239 #ifdef TARGET_NR_send
6240 case TARGET_NR_send:
6241 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6242 break;
6243 #endif
6244 #ifdef TARGET_NR_sendmsg
6245 case TARGET_NR_sendmsg:
6246 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6247 break;
6248 #endif
6249 #ifdef TARGET_NR_sendto
6250 case TARGET_NR_sendto:
6251 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6252 break;
6253 #endif
6254 #ifdef TARGET_NR_shutdown
6255 case TARGET_NR_shutdown:
6256 ret = get_errno(shutdown(arg1, arg2));
6257 break;
6258 #endif
6259 #ifdef TARGET_NR_socket
6260 case TARGET_NR_socket:
6261 ret = do_socket(arg1, arg2, arg3);
6262 break;
6263 #endif
6264 #ifdef TARGET_NR_socketpair
6265 case TARGET_NR_socketpair:
6266 ret = do_socketpair(arg1, arg2, arg3, arg4);
6267 break;
6268 #endif
6269 #ifdef TARGET_NR_setsockopt
6270 case TARGET_NR_setsockopt:
6271 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6272 break;
6273 #endif
6275 case TARGET_NR_syslog:
6276 if (!(p = lock_user_string(arg2)))
6277 goto efault;
6278 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6279 unlock_user(p, arg2, 0);
6280 break;
6282 case TARGET_NR_setitimer:
6284 struct itimerval value, ovalue, *pvalue;
6286 if (arg2) {
6287 pvalue = &value;
6288 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6289 || copy_from_user_timeval(&pvalue->it_value,
6290 arg2 + sizeof(struct target_timeval)))
6291 goto efault;
6292 } else {
6293 pvalue = NULL;
6295 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6296 if (!is_error(ret) && arg3) {
6297 if (copy_to_user_timeval(arg3,
6298 &ovalue.it_interval)
6299 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6300 &ovalue.it_value))
6301 goto efault;
6304 break;
6305 case TARGET_NR_getitimer:
6307 struct itimerval value;
6309 ret = get_errno(getitimer(arg1, &value));
6310 if (!is_error(ret) && arg2) {
6311 if (copy_to_user_timeval(arg2,
6312 &value.it_interval)
6313 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6314 &value.it_value))
6315 goto efault;
6318 break;
6319 case TARGET_NR_stat:
6320 if (!(p = lock_user_string(arg1)))
6321 goto efault;
6322 ret = get_errno(stat(path(p), &st));
6323 unlock_user(p, arg1, 0);
6324 goto do_stat;
6325 case TARGET_NR_lstat:
6326 if (!(p = lock_user_string(arg1)))
6327 goto efault;
6328 ret = get_errno(lstat(path(p), &st));
6329 unlock_user(p, arg1, 0);
6330 goto do_stat;
6331 case TARGET_NR_fstat:
6333 ret = get_errno(fstat(arg1, &st));
6334 do_stat:
6335 if (!is_error(ret)) {
6336 struct target_stat *target_st;
6338 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6339 goto efault;
6340 memset(target_st, 0, sizeof(*target_st));
6341 __put_user(st.st_dev, &target_st->st_dev);
6342 __put_user(st.st_ino, &target_st->st_ino);
6343 __put_user(st.st_mode, &target_st->st_mode);
6344 __put_user(st.st_uid, &target_st->st_uid);
6345 __put_user(st.st_gid, &target_st->st_gid);
6346 __put_user(st.st_nlink, &target_st->st_nlink);
6347 __put_user(st.st_rdev, &target_st->st_rdev);
6348 __put_user(st.st_size, &target_st->st_size);
6349 __put_user(st.st_blksize, &target_st->st_blksize);
6350 __put_user(st.st_blocks, &target_st->st_blocks);
6351 __put_user(st.st_atime, &target_st->target_st_atime);
6352 __put_user(st.st_mtime, &target_st->target_st_mtime);
6353 __put_user(st.st_ctime, &target_st->target_st_ctime);
6354 unlock_user_struct(target_st, arg2, 1);
6357 break;
6358 #ifdef TARGET_NR_olduname
6359 case TARGET_NR_olduname:
6360 goto unimplemented;
6361 #endif
6362 #ifdef TARGET_NR_iopl
6363 case TARGET_NR_iopl:
6364 goto unimplemented;
6365 #endif
6366 case TARGET_NR_vhangup:
6367 ret = get_errno(vhangup());
6368 break;
6369 #ifdef TARGET_NR_idle
6370 case TARGET_NR_idle:
6371 goto unimplemented;
6372 #endif
6373 #ifdef TARGET_NR_syscall
6374 case TARGET_NR_syscall:
6375 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6376 arg6, arg7, arg8, 0);
6377 break;
6378 #endif
6379 case TARGET_NR_wait4:
6381 int status;
6382 abi_long status_ptr = arg2;
6383 struct rusage rusage, *rusage_ptr;
6384 abi_ulong target_rusage = arg4;
6385 if (target_rusage)
6386 rusage_ptr = &rusage;
6387 else
6388 rusage_ptr = NULL;
6389 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6390 if (!is_error(ret)) {
6391 if (status_ptr) {
6392 status = host_to_target_waitstatus(status);
6393 if (put_user_s32(status, status_ptr))
6394 goto efault;
6396 if (target_rusage)
6397 host_to_target_rusage(target_rusage, &rusage);
6400 break;
6401 #ifdef TARGET_NR_swapoff
6402 case TARGET_NR_swapoff:
6403 if (!(p = lock_user_string(arg1)))
6404 goto efault;
6405 ret = get_errno(swapoff(p));
6406 unlock_user(p, arg1, 0);
6407 break;
6408 #endif
6409 case TARGET_NR_sysinfo:
6411 struct target_sysinfo *target_value;
6412 struct sysinfo value;
6413 ret = get_errno(sysinfo(&value));
6414 if (!is_error(ret) && arg1)
6416 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6417 goto efault;
6418 __put_user(value.uptime, &target_value->uptime);
6419 __put_user(value.loads[0], &target_value->loads[0]);
6420 __put_user(value.loads[1], &target_value->loads[1]);
6421 __put_user(value.loads[2], &target_value->loads[2]);
6422 __put_user(value.totalram, &target_value->totalram);
6423 __put_user(value.freeram, &target_value->freeram);
6424 __put_user(value.sharedram, &target_value->sharedram);
6425 __put_user(value.bufferram, &target_value->bufferram);
6426 __put_user(value.totalswap, &target_value->totalswap);
6427 __put_user(value.freeswap, &target_value->freeswap);
6428 __put_user(value.procs, &target_value->procs);
6429 __put_user(value.totalhigh, &target_value->totalhigh);
6430 __put_user(value.freehigh, &target_value->freehigh);
6431 __put_user(value.mem_unit, &target_value->mem_unit);
6432 unlock_user_struct(target_value, arg1, 1);
6435 break;
6436 #ifdef TARGET_NR_ipc
6437 case TARGET_NR_ipc:
6438 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6439 break;
6440 #endif
6441 #ifdef TARGET_NR_semget
6442 case TARGET_NR_semget:
6443 ret = get_errno(semget(arg1, arg2, arg3));
6444 break;
6445 #endif
6446 #ifdef TARGET_NR_semop
6447 case TARGET_NR_semop:
6448 ret = get_errno(do_semop(arg1, arg2, arg3));
6449 break;
6450 #endif
6451 #ifdef TARGET_NR_semctl
6452 case TARGET_NR_semctl:
6453 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6454 break;
6455 #endif
6456 #ifdef TARGET_NR_msgctl
6457 case TARGET_NR_msgctl:
6458 ret = do_msgctl(arg1, arg2, arg3);
6459 break;
6460 #endif
6461 #ifdef TARGET_NR_msgget
6462 case TARGET_NR_msgget:
6463 ret = get_errno(msgget(arg1, arg2));
6464 break;
6465 #endif
6466 #ifdef TARGET_NR_msgrcv
6467 case TARGET_NR_msgrcv:
6468 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6469 break;
6470 #endif
6471 #ifdef TARGET_NR_msgsnd
6472 case TARGET_NR_msgsnd:
6473 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6474 break;
6475 #endif
6476 #ifdef TARGET_NR_shmget
6477 case TARGET_NR_shmget:
6478 ret = get_errno(shmget(arg1, arg2, arg3));
6479 break;
6480 #endif
6481 #ifdef TARGET_NR_shmctl
6482 case TARGET_NR_shmctl:
6483 ret = do_shmctl(arg1, arg2, arg3);
6484 break;
6485 #endif
6486 #ifdef TARGET_NR_shmat
6487 case TARGET_NR_shmat:
6488 ret = do_shmat(arg1, arg2, arg3);
6489 break;
6490 #endif
6491 #ifdef TARGET_NR_shmdt
6492 case TARGET_NR_shmdt:
6493 ret = do_shmdt(arg1);
6494 break;
6495 #endif
6496 case TARGET_NR_fsync:
6497 ret = get_errno(fsync(arg1));
6498 break;
6499 case TARGET_NR_clone:
6500 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6501 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6502 #elif defined(TARGET_CRIS)
6503 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6504 #elif defined(TARGET_S390X)
6505 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6506 #else
6507 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6508 #endif
6509 break;
6510 #ifdef __NR_exit_group
6511 /* new thread calls */
6512 case TARGET_NR_exit_group:
6513 #ifdef TARGET_GPROF
6514 _mcleanup();
6515 #endif
6516 gdb_exit(cpu_env, arg1);
6517 ret = get_errno(exit_group(arg1));
6518 break;
6519 #endif
6520 case TARGET_NR_setdomainname:
6521 if (!(p = lock_user_string(arg1)))
6522 goto efault;
6523 ret = get_errno(setdomainname(p, arg2));
6524 unlock_user(p, arg1, 0);
6525 break;
6526 case TARGET_NR_uname:
6527 /* no need to transcode because we use the linux syscall */
6529 struct new_utsname * buf;
6531 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6532 goto efault;
6533 ret = get_errno(sys_uname(buf));
6534 if (!is_error(ret)) {
6535 /* Overrite the native machine name with whatever is being
6536 emulated. */
6537 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6538 /* Allow the user to override the reported release. */
6539 if (qemu_uname_release && *qemu_uname_release)
6540 strcpy (buf->release, qemu_uname_release);
6542 unlock_user_struct(buf, arg1, 1);
6544 break;
6545 #ifdef TARGET_I386
6546 case TARGET_NR_modify_ldt:
6547 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6548 break;
6549 #if !defined(TARGET_X86_64)
6550 case TARGET_NR_vm86old:
6551 goto unimplemented;
6552 case TARGET_NR_vm86:
6553 ret = do_vm86(cpu_env, arg1, arg2);
6554 break;
6555 #endif
6556 #endif
6557 case TARGET_NR_adjtimex:
6558 goto unimplemented;
6559 #ifdef TARGET_NR_create_module
6560 case TARGET_NR_create_module:
6561 #endif
6562 case TARGET_NR_init_module:
6563 case TARGET_NR_delete_module:
6564 #ifdef TARGET_NR_get_kernel_syms
6565 case TARGET_NR_get_kernel_syms:
6566 #endif
6567 goto unimplemented;
6568 case TARGET_NR_quotactl:
6569 goto unimplemented;
6570 case TARGET_NR_getpgid:
6571 ret = get_errno(getpgid(arg1));
6572 break;
6573 case TARGET_NR_fchdir:
6574 ret = get_errno(fchdir(arg1));
6575 break;
6576 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6577 case TARGET_NR_bdflush:
6578 goto unimplemented;
6579 #endif
6580 #ifdef TARGET_NR_sysfs
6581 case TARGET_NR_sysfs:
6582 goto unimplemented;
6583 #endif
6584 case TARGET_NR_personality:
6585 ret = get_errno(personality(arg1));
6586 break;
6587 #ifdef TARGET_NR_afs_syscall
6588 case TARGET_NR_afs_syscall:
6589 goto unimplemented;
6590 #endif
6591 #ifdef TARGET_NR__llseek /* Not on alpha */
6592 case TARGET_NR__llseek:
6594 int64_t res;
6595 #if !defined(__NR_llseek)
6596 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6597 if (res == -1) {
6598 ret = get_errno(res);
6599 } else {
6600 ret = 0;
6602 #else
6603 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6604 #endif
6605 if ((ret == 0) && put_user_s64(res, arg4)) {
6606 goto efault;
6609 break;
6610 #endif
6611 case TARGET_NR_getdents:
6612 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6614 struct target_dirent *target_dirp;
6615 struct linux_dirent *dirp;
6616 abi_long count = arg3;
6618 dirp = malloc(count);
6619 if (!dirp) {
6620 ret = -TARGET_ENOMEM;
6621 goto fail;
6624 ret = get_errno(sys_getdents(arg1, dirp, count));
6625 if (!is_error(ret)) {
6626 struct linux_dirent *de;
6627 struct target_dirent *tde;
6628 int len = ret;
6629 int reclen, treclen;
6630 int count1, tnamelen;
6632 count1 = 0;
6633 de = dirp;
6634 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6635 goto efault;
6636 tde = target_dirp;
6637 while (len > 0) {
6638 reclen = de->d_reclen;
6639 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6640 tde->d_reclen = tswap16(treclen);
6641 tde->d_ino = tswapal(de->d_ino);
6642 tde->d_off = tswapal(de->d_off);
6643 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6644 if (tnamelen > 256)
6645 tnamelen = 256;
6646 /* XXX: may not be correct */
6647 pstrcpy(tde->d_name, tnamelen, de->d_name);
6648 de = (struct linux_dirent *)((char *)de + reclen);
6649 len -= reclen;
6650 tde = (struct target_dirent *)((char *)tde + treclen);
6651 count1 += treclen;
6653 ret = count1;
6654 unlock_user(target_dirp, arg2, ret);
6656 free(dirp);
6658 #else
6660 struct linux_dirent *dirp;
6661 abi_long count = arg3;
6663 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6664 goto efault;
6665 ret = get_errno(sys_getdents(arg1, dirp, count));
6666 if (!is_error(ret)) {
6667 struct linux_dirent *de;
6668 int len = ret;
6669 int reclen;
6670 de = dirp;
6671 while (len > 0) {
6672 reclen = de->d_reclen;
6673 if (reclen > len)
6674 break;
6675 de->d_reclen = tswap16(reclen);
6676 tswapls(&de->d_ino);
6677 tswapls(&de->d_off);
6678 de = (struct linux_dirent *)((char *)de + reclen);
6679 len -= reclen;
6682 unlock_user(dirp, arg2, ret);
6684 #endif
6685 break;
6686 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6687 case TARGET_NR_getdents64:
6689 struct linux_dirent64 *dirp;
6690 abi_long count = arg3;
6691 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6692 goto efault;
6693 ret = get_errno(sys_getdents64(arg1, dirp, count));
6694 if (!is_error(ret)) {
6695 struct linux_dirent64 *de;
6696 int len = ret;
6697 int reclen;
6698 de = dirp;
6699 while (len > 0) {
6700 reclen = de->d_reclen;
6701 if (reclen > len)
6702 break;
6703 de->d_reclen = tswap16(reclen);
6704 tswap64s((uint64_t *)&de->d_ino);
6705 tswap64s((uint64_t *)&de->d_off);
6706 de = (struct linux_dirent64 *)((char *)de + reclen);
6707 len -= reclen;
6710 unlock_user(dirp, arg2, ret);
6712 break;
6713 #endif /* TARGET_NR_getdents64 */
6714 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6715 #ifdef TARGET_S390X
6716 case TARGET_NR_select:
6717 #else
6718 case TARGET_NR__newselect:
6719 #endif
6720 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6721 break;
6722 #endif
6723 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6724 # ifdef TARGET_NR_poll
6725 case TARGET_NR_poll:
6726 # endif
6727 # ifdef TARGET_NR_ppoll
6728 case TARGET_NR_ppoll:
6729 # endif
6731 struct target_pollfd *target_pfd;
6732 unsigned int nfds = arg2;
6733 int timeout = arg3;
6734 struct pollfd *pfd;
6735 unsigned int i;
6737 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6738 if (!target_pfd)
6739 goto efault;
6741 pfd = alloca(sizeof(struct pollfd) * nfds);
6742 for(i = 0; i < nfds; i++) {
6743 pfd[i].fd = tswap32(target_pfd[i].fd);
6744 pfd[i].events = tswap16(target_pfd[i].events);
6747 # ifdef TARGET_NR_ppoll
6748 if (num == TARGET_NR_ppoll) {
6749 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6750 target_sigset_t *target_set;
6751 sigset_t _set, *set = &_set;
6753 if (arg3) {
6754 if (target_to_host_timespec(timeout_ts, arg3)) {
6755 unlock_user(target_pfd, arg1, 0);
6756 goto efault;
6758 } else {
6759 timeout_ts = NULL;
6762 if (arg4) {
6763 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6764 if (!target_set) {
6765 unlock_user(target_pfd, arg1, 0);
6766 goto efault;
6768 target_to_host_sigset(set, target_set);
6769 } else {
6770 set = NULL;
6773 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6775 if (!is_error(ret) && arg3) {
6776 host_to_target_timespec(arg3, timeout_ts);
6778 if (arg4) {
6779 unlock_user(target_set, arg4, 0);
6781 } else
6782 # endif
6783 ret = get_errno(poll(pfd, nfds, timeout));
6785 if (!is_error(ret)) {
6786 for(i = 0; i < nfds; i++) {
6787 target_pfd[i].revents = tswap16(pfd[i].revents);
6790 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6792 break;
6793 #endif
6794 case TARGET_NR_flock:
6795 /* NOTE: the flock constant seems to be the same for every
6796 Linux platform */
6797 ret = get_errno(flock(arg1, arg2));
6798 break;
6799 case TARGET_NR_readv:
6801 int count = arg3;
6802 struct iovec *vec;
6804 vec = alloca(count * sizeof(struct iovec));
6805 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6806 goto efault;
6807 ret = get_errno(readv(arg1, vec, count));
6808 unlock_iovec(vec, arg2, count, 1);
6810 break;
6811 case TARGET_NR_writev:
6813 int count = arg3;
6814 struct iovec *vec;
6816 vec = alloca(count * sizeof(struct iovec));
6817 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6818 goto efault;
6819 ret = get_errno(writev(arg1, vec, count));
6820 unlock_iovec(vec, arg2, count, 0);
6822 break;
6823 case TARGET_NR_getsid:
6824 ret = get_errno(getsid(arg1));
6825 break;
6826 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6827 case TARGET_NR_fdatasync:
6828 ret = get_errno(fdatasync(arg1));
6829 break;
6830 #endif
6831 case TARGET_NR__sysctl:
6832 /* We don't implement this, but ENOTDIR is always a safe
6833 return value. */
6834 ret = -TARGET_ENOTDIR;
6835 break;
6836 case TARGET_NR_sched_getaffinity:
6838 unsigned int mask_size;
6839 unsigned long *mask;
6842 * sched_getaffinity needs multiples of ulong, so need to take
6843 * care of mismatches between target ulong and host ulong sizes.
6845 if (arg2 & (sizeof(abi_ulong) - 1)) {
6846 ret = -TARGET_EINVAL;
6847 break;
6849 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6851 mask = alloca(mask_size);
6852 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6854 if (!is_error(ret)) {
6855 if (copy_to_user(arg3, mask, ret)) {
6856 goto efault;
6860 break;
6861 case TARGET_NR_sched_setaffinity:
6863 unsigned int mask_size;
6864 unsigned long *mask;
6867 * sched_setaffinity needs multiples of ulong, so need to take
6868 * care of mismatches between target ulong and host ulong sizes.
6870 if (arg2 & (sizeof(abi_ulong) - 1)) {
6871 ret = -TARGET_EINVAL;
6872 break;
6874 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6876 mask = alloca(mask_size);
6877 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6878 goto efault;
6880 memcpy(mask, p, arg2);
6881 unlock_user_struct(p, arg2, 0);
6883 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6885 break;
6886 case TARGET_NR_sched_setparam:
6888 struct sched_param *target_schp;
6889 struct sched_param schp;
6891 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6892 goto efault;
6893 schp.sched_priority = tswap32(target_schp->sched_priority);
6894 unlock_user_struct(target_schp, arg2, 0);
6895 ret = get_errno(sched_setparam(arg1, &schp));
6897 break;
6898 case TARGET_NR_sched_getparam:
6900 struct sched_param *target_schp;
6901 struct sched_param schp;
6902 ret = get_errno(sched_getparam(arg1, &schp));
6903 if (!is_error(ret)) {
6904 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6905 goto efault;
6906 target_schp->sched_priority = tswap32(schp.sched_priority);
6907 unlock_user_struct(target_schp, arg2, 1);
6910 break;
6911 case TARGET_NR_sched_setscheduler:
6913 struct sched_param *target_schp;
6914 struct sched_param schp;
6915 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6916 goto efault;
6917 schp.sched_priority = tswap32(target_schp->sched_priority);
6918 unlock_user_struct(target_schp, arg3, 0);
6919 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6921 break;
6922 case TARGET_NR_sched_getscheduler:
6923 ret = get_errno(sched_getscheduler(arg1));
6924 break;
6925 case TARGET_NR_sched_yield:
6926 ret = get_errno(sched_yield());
6927 break;
6928 case TARGET_NR_sched_get_priority_max:
6929 ret = get_errno(sched_get_priority_max(arg1));
6930 break;
6931 case TARGET_NR_sched_get_priority_min:
6932 ret = get_errno(sched_get_priority_min(arg1));
6933 break;
6934 case TARGET_NR_sched_rr_get_interval:
6936 struct timespec ts;
6937 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6938 if (!is_error(ret)) {
6939 host_to_target_timespec(arg2, &ts);
6942 break;
6943 case TARGET_NR_nanosleep:
6945 struct timespec req, rem;
6946 target_to_host_timespec(&req, arg1);
6947 ret = get_errno(nanosleep(&req, &rem));
6948 if (is_error(ret) && arg2) {
6949 host_to_target_timespec(arg2, &rem);
6952 break;
6953 #ifdef TARGET_NR_query_module
6954 case TARGET_NR_query_module:
6955 goto unimplemented;
6956 #endif
6957 #ifdef TARGET_NR_nfsservctl
6958 case TARGET_NR_nfsservctl:
6959 goto unimplemented;
6960 #endif
6961 case TARGET_NR_prctl:
6962 switch (arg1)
6964 case PR_GET_PDEATHSIG:
6966 int deathsig;
6967 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6968 if (!is_error(ret) && arg2
6969 && put_user_ual(deathsig, arg2))
6970 goto efault;
6972 break;
6973 default:
6974 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6975 break;
6977 break;
6978 #ifdef TARGET_NR_arch_prctl
6979 case TARGET_NR_arch_prctl:
6980 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6981 ret = do_arch_prctl(cpu_env, arg1, arg2);
6982 break;
6983 #else
6984 goto unimplemented;
6985 #endif
6986 #endif
6987 #ifdef TARGET_NR_pread
6988 case TARGET_NR_pread:
6989 if (regpairs_aligned(cpu_env))
6990 arg4 = arg5;
6991 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6992 goto efault;
6993 ret = get_errno(pread(arg1, p, arg3, arg4));
6994 unlock_user(p, arg2, ret);
6995 break;
6996 case TARGET_NR_pwrite:
6997 if (regpairs_aligned(cpu_env))
6998 arg4 = arg5;
6999 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7000 goto efault;
7001 ret = get_errno(pwrite(arg1, p, arg3, arg4));
7002 unlock_user(p, arg2, 0);
7003 break;
7004 #endif
7005 #ifdef TARGET_NR_pread64
7006 case TARGET_NR_pread64:
7007 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7008 goto efault;
7009 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7010 unlock_user(p, arg2, ret);
7011 break;
7012 case TARGET_NR_pwrite64:
7013 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7014 goto efault;
7015 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7016 unlock_user(p, arg2, 0);
7017 break;
7018 #endif
7019 case TARGET_NR_getcwd:
7020 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7021 goto efault;
7022 ret = get_errno(sys_getcwd1(p, arg2));
7023 unlock_user(p, arg1, ret);
7024 break;
7025 case TARGET_NR_capget:
7026 goto unimplemented;
7027 case TARGET_NR_capset:
7028 goto unimplemented;
7029 case TARGET_NR_sigaltstack:
7030 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7031 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7032 defined(TARGET_M68K) || defined(TARGET_S390X)
7033 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
7034 break;
7035 #else
7036 goto unimplemented;
7037 #endif
7038 case TARGET_NR_sendfile:
7039 goto unimplemented;
7040 #ifdef TARGET_NR_getpmsg
7041 case TARGET_NR_getpmsg:
7042 goto unimplemented;
7043 #endif
7044 #ifdef TARGET_NR_putpmsg
7045 case TARGET_NR_putpmsg:
7046 goto unimplemented;
7047 #endif
7048 #ifdef TARGET_NR_vfork
7049 case TARGET_NR_vfork:
7050 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7051 0, 0, 0, 0));
7052 break;
7053 #endif
7054 #ifdef TARGET_NR_ugetrlimit
7055 case TARGET_NR_ugetrlimit:
7057 struct rlimit rlim;
7058 int resource = target_to_host_resource(arg1);
7059 ret = get_errno(getrlimit(resource, &rlim));
7060 if (!is_error(ret)) {
7061 struct target_rlimit *target_rlim;
7062 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7063 goto efault;
7064 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7065 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7066 unlock_user_struct(target_rlim, arg2, 1);
7068 break;
7070 #endif
7071 #ifdef TARGET_NR_truncate64
7072 case TARGET_NR_truncate64:
7073 if (!(p = lock_user_string(arg1)))
7074 goto efault;
7075 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7076 unlock_user(p, arg1, 0);
7077 break;
7078 #endif
7079 #ifdef TARGET_NR_ftruncate64
7080 case TARGET_NR_ftruncate64:
7081 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7082 break;
7083 #endif
7084 #ifdef TARGET_NR_stat64
7085 case TARGET_NR_stat64:
7086 if (!(p = lock_user_string(arg1)))
7087 goto efault;
7088 ret = get_errno(stat(path(p), &st));
7089 unlock_user(p, arg1, 0);
7090 if (!is_error(ret))
7091 ret = host_to_target_stat64(cpu_env, arg2, &st);
7092 break;
7093 #endif
7094 #ifdef TARGET_NR_lstat64
7095 case TARGET_NR_lstat64:
7096 if (!(p = lock_user_string(arg1)))
7097 goto efault;
7098 ret = get_errno(lstat(path(p), &st));
7099 unlock_user(p, arg1, 0);
7100 if (!is_error(ret))
7101 ret = host_to_target_stat64(cpu_env, arg2, &st);
7102 break;
7103 #endif
7104 #ifdef TARGET_NR_fstat64
7105 case TARGET_NR_fstat64:
7106 ret = get_errno(fstat(arg1, &st));
7107 if (!is_error(ret))
7108 ret = host_to_target_stat64(cpu_env, arg2, &st);
7109 break;
7110 #endif
7111 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7112 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7113 #ifdef TARGET_NR_fstatat64
7114 case TARGET_NR_fstatat64:
7115 #endif
7116 #ifdef TARGET_NR_newfstatat
7117 case TARGET_NR_newfstatat:
7118 #endif
7119 if (!(p = lock_user_string(arg2)))
7120 goto efault;
7121 #ifdef __NR_fstatat64
7122 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7123 #else
7124 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7125 #endif
7126 if (!is_error(ret))
7127 ret = host_to_target_stat64(cpu_env, arg3, &st);
7128 break;
7129 #endif
7130 case TARGET_NR_lchown:
7131 if (!(p = lock_user_string(arg1)))
7132 goto efault;
7133 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7134 unlock_user(p, arg1, 0);
7135 break;
7136 #ifdef TARGET_NR_getuid
7137 case TARGET_NR_getuid:
7138 ret = get_errno(high2lowuid(getuid()));
7139 break;
7140 #endif
7141 #ifdef TARGET_NR_getgid
7142 case TARGET_NR_getgid:
7143 ret = get_errno(high2lowgid(getgid()));
7144 break;
7145 #endif
7146 #ifdef TARGET_NR_geteuid
7147 case TARGET_NR_geteuid:
7148 ret = get_errno(high2lowuid(geteuid()));
7149 break;
7150 #endif
7151 #ifdef TARGET_NR_getegid
7152 case TARGET_NR_getegid:
7153 ret = get_errno(high2lowgid(getegid()));
7154 break;
7155 #endif
7156 case TARGET_NR_setreuid:
7157 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7158 break;
7159 case TARGET_NR_setregid:
7160 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7161 break;
7162 case TARGET_NR_getgroups:
7164 int gidsetsize = arg1;
7165 target_id *target_grouplist;
7166 gid_t *grouplist;
7167 int i;
7169 grouplist = alloca(gidsetsize * sizeof(gid_t));
7170 ret = get_errno(getgroups(gidsetsize, grouplist));
7171 if (gidsetsize == 0)
7172 break;
7173 if (!is_error(ret)) {
7174 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7175 if (!target_grouplist)
7176 goto efault;
7177 for(i = 0;i < ret; i++)
7178 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7179 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7182 break;
7183 case TARGET_NR_setgroups:
7185 int gidsetsize = arg1;
7186 target_id *target_grouplist;
7187 gid_t *grouplist;
7188 int i;
7190 grouplist = alloca(gidsetsize * sizeof(gid_t));
7191 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7192 if (!target_grouplist) {
7193 ret = -TARGET_EFAULT;
7194 goto fail;
7196 for(i = 0;i < gidsetsize; i++)
7197 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7198 unlock_user(target_grouplist, arg2, 0);
7199 ret = get_errno(setgroups(gidsetsize, grouplist));
7201 break;
7202 case TARGET_NR_fchown:
7203 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7204 break;
7205 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7206 case TARGET_NR_fchownat:
7207 if (!(p = lock_user_string(arg2)))
7208 goto efault;
7209 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7210 unlock_user(p, arg2, 0);
7211 break;
7212 #endif
7213 #ifdef TARGET_NR_setresuid
7214 case TARGET_NR_setresuid:
7215 ret = get_errno(setresuid(low2highuid(arg1),
7216 low2highuid(arg2),
7217 low2highuid(arg3)));
7218 break;
7219 #endif
7220 #ifdef TARGET_NR_getresuid
7221 case TARGET_NR_getresuid:
7223 uid_t ruid, euid, suid;
7224 ret = get_errno(getresuid(&ruid, &euid, &suid));
7225 if (!is_error(ret)) {
7226 if (put_user_u16(high2lowuid(ruid), arg1)
7227 || put_user_u16(high2lowuid(euid), arg2)
7228 || put_user_u16(high2lowuid(suid), arg3))
7229 goto efault;
7232 break;
7233 #endif
7234 #ifdef TARGET_NR_getresgid
7235 case TARGET_NR_setresgid:
7236 ret = get_errno(setresgid(low2highgid(arg1),
7237 low2highgid(arg2),
7238 low2highgid(arg3)));
7239 break;
7240 #endif
7241 #ifdef TARGET_NR_getresgid
7242 case TARGET_NR_getresgid:
7244 gid_t rgid, egid, sgid;
7245 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7246 if (!is_error(ret)) {
7247 if (put_user_u16(high2lowgid(rgid), arg1)
7248 || put_user_u16(high2lowgid(egid), arg2)
7249 || put_user_u16(high2lowgid(sgid), arg3))
7250 goto efault;
7253 break;
7254 #endif
7255 case TARGET_NR_chown:
7256 if (!(p = lock_user_string(arg1)))
7257 goto efault;
7258 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7259 unlock_user(p, arg1, 0);
7260 break;
7261 case TARGET_NR_setuid:
7262 ret = get_errno(setuid(low2highuid(arg1)));
7263 break;
7264 case TARGET_NR_setgid:
7265 ret = get_errno(setgid(low2highgid(arg1)));
7266 break;
7267 case TARGET_NR_setfsuid:
7268 ret = get_errno(setfsuid(arg1));
7269 break;
7270 case TARGET_NR_setfsgid:
7271 ret = get_errno(setfsgid(arg1));
7272 break;
7274 #ifdef TARGET_NR_lchown32
7275 case TARGET_NR_lchown32:
7276 if (!(p = lock_user_string(arg1)))
7277 goto efault;
7278 ret = get_errno(lchown(p, arg2, arg3));
7279 unlock_user(p, arg1, 0);
7280 break;
7281 #endif
7282 #ifdef TARGET_NR_getuid32
7283 case TARGET_NR_getuid32:
7284 ret = get_errno(getuid());
7285 break;
7286 #endif
7288 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7289 /* Alpha specific */
7290 case TARGET_NR_getxuid:
7292 uid_t euid;
7293 euid=geteuid();
7294 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7296 ret = get_errno(getuid());
7297 break;
7298 #endif
7299 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7300 /* Alpha specific */
7301 case TARGET_NR_getxgid:
7303 uid_t egid;
7304 egid=getegid();
7305 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7307 ret = get_errno(getgid());
7308 break;
7309 #endif
7310 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7311 /* Alpha specific */
7312 case TARGET_NR_osf_getsysinfo:
7313 ret = -TARGET_EOPNOTSUPP;
7314 switch (arg1) {
7315 case TARGET_GSI_IEEE_FP_CONTROL:
7317 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7319 /* Copied from linux ieee_fpcr_to_swcr. */
7320 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7321 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7322 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7323 | SWCR_TRAP_ENABLE_DZE
7324 | SWCR_TRAP_ENABLE_OVF);
7325 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7326 | SWCR_TRAP_ENABLE_INE);
7327 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7328 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7330 if (put_user_u64 (swcr, arg2))
7331 goto efault;
7332 ret = 0;
7334 break;
7336 /* case GSI_IEEE_STATE_AT_SIGNAL:
7337 -- Not implemented in linux kernel.
7338 case GSI_UACPROC:
7339 -- Retrieves current unaligned access state; not much used.
7340 case GSI_PROC_TYPE:
7341 -- Retrieves implver information; surely not used.
7342 case GSI_GET_HWRPB:
7343 -- Grabs a copy of the HWRPB; surely not used.
7346 break;
7347 #endif
7348 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7349 /* Alpha specific */
7350 case TARGET_NR_osf_setsysinfo:
7351 ret = -TARGET_EOPNOTSUPP;
7352 switch (arg1) {
7353 case TARGET_SSI_IEEE_FP_CONTROL:
7354 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7356 uint64_t swcr, fpcr, orig_fpcr;
7358 if (get_user_u64 (swcr, arg2))
7359 goto efault;
7360 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7361 fpcr = orig_fpcr & FPCR_DYN_MASK;
7363 /* Copied from linux ieee_swcr_to_fpcr. */
7364 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7365 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7366 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7367 | SWCR_TRAP_ENABLE_DZE
7368 | SWCR_TRAP_ENABLE_OVF)) << 48;
7369 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7370 | SWCR_TRAP_ENABLE_INE)) << 57;
7371 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7372 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7374 cpu_alpha_store_fpcr (cpu_env, fpcr);
7375 ret = 0;
7377 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7378 /* Old exceptions are not signaled. */
7379 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7381 /* If any exceptions set by this call, and are unmasked,
7382 send a signal. */
7383 /* ??? FIXME */
7386 break;
7388 /* case SSI_NVPAIRS:
7389 -- Used with SSIN_UACPROC to enable unaligned accesses.
7390 case SSI_IEEE_STATE_AT_SIGNAL:
7391 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7392 -- Not implemented in linux kernel
7395 break;
7396 #endif
7397 #ifdef TARGET_NR_osf_sigprocmask
7398 /* Alpha specific. */
7399 case TARGET_NR_osf_sigprocmask:
7401 abi_ulong mask;
7402 int how;
7403 sigset_t set, oldset;
7405 switch(arg1) {
7406 case TARGET_SIG_BLOCK:
7407 how = SIG_BLOCK;
7408 break;
7409 case TARGET_SIG_UNBLOCK:
7410 how = SIG_UNBLOCK;
7411 break;
7412 case TARGET_SIG_SETMASK:
7413 how = SIG_SETMASK;
7414 break;
7415 default:
7416 ret = -TARGET_EINVAL;
7417 goto fail;
7419 mask = arg2;
7420 target_to_host_old_sigset(&set, &mask);
7421 sigprocmask(how, &set, &oldset);
7422 host_to_target_old_sigset(&mask, &oldset);
7423 ret = mask;
7425 break;
7426 #endif
7428 #ifdef TARGET_NR_getgid32
7429 case TARGET_NR_getgid32:
7430 ret = get_errno(getgid());
7431 break;
7432 #endif
7433 #ifdef TARGET_NR_geteuid32
7434 case TARGET_NR_geteuid32:
7435 ret = get_errno(geteuid());
7436 break;
7437 #endif
7438 #ifdef TARGET_NR_getegid32
7439 case TARGET_NR_getegid32:
7440 ret = get_errno(getegid());
7441 break;
7442 #endif
7443 #ifdef TARGET_NR_setreuid32
7444 case TARGET_NR_setreuid32:
7445 ret = get_errno(setreuid(arg1, arg2));
7446 break;
7447 #endif
7448 #ifdef TARGET_NR_setregid32
7449 case TARGET_NR_setregid32:
7450 ret = get_errno(setregid(arg1, arg2));
7451 break;
7452 #endif
7453 #ifdef TARGET_NR_getgroups32
7454 case TARGET_NR_getgroups32:
7456 int gidsetsize = arg1;
7457 uint32_t *target_grouplist;
7458 gid_t *grouplist;
7459 int i;
7461 grouplist = alloca(gidsetsize * sizeof(gid_t));
7462 ret = get_errno(getgroups(gidsetsize, grouplist));
7463 if (gidsetsize == 0)
7464 break;
7465 if (!is_error(ret)) {
7466 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7467 if (!target_grouplist) {
7468 ret = -TARGET_EFAULT;
7469 goto fail;
7471 for(i = 0;i < ret; i++)
7472 target_grouplist[i] = tswap32(grouplist[i]);
7473 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7476 break;
7477 #endif
7478 #ifdef TARGET_NR_setgroups32
7479 case TARGET_NR_setgroups32:
7481 int gidsetsize = arg1;
7482 uint32_t *target_grouplist;
7483 gid_t *grouplist;
7484 int i;
7486 grouplist = alloca(gidsetsize * sizeof(gid_t));
7487 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7488 if (!target_grouplist) {
7489 ret = -TARGET_EFAULT;
7490 goto fail;
7492 for(i = 0;i < gidsetsize; i++)
7493 grouplist[i] = tswap32(target_grouplist[i]);
7494 unlock_user(target_grouplist, arg2, 0);
7495 ret = get_errno(setgroups(gidsetsize, grouplist));
7497 break;
7498 #endif
7499 #ifdef TARGET_NR_fchown32
7500 case TARGET_NR_fchown32:
7501 ret = get_errno(fchown(arg1, arg2, arg3));
7502 break;
7503 #endif
7504 #ifdef TARGET_NR_setresuid32
7505 case TARGET_NR_setresuid32:
7506 ret = get_errno(setresuid(arg1, arg2, arg3));
7507 break;
7508 #endif
7509 #ifdef TARGET_NR_getresuid32
7510 case TARGET_NR_getresuid32:
7512 uid_t ruid, euid, suid;
7513 ret = get_errno(getresuid(&ruid, &euid, &suid));
7514 if (!is_error(ret)) {
7515 if (put_user_u32(ruid, arg1)
7516 || put_user_u32(euid, arg2)
7517 || put_user_u32(suid, arg3))
7518 goto efault;
7521 break;
7522 #endif
7523 #ifdef TARGET_NR_setresgid32
7524 case TARGET_NR_setresgid32:
7525 ret = get_errno(setresgid(arg1, arg2, arg3));
7526 break;
7527 #endif
7528 #ifdef TARGET_NR_getresgid32
7529 case TARGET_NR_getresgid32:
7531 gid_t rgid, egid, sgid;
7532 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7533 if (!is_error(ret)) {
7534 if (put_user_u32(rgid, arg1)
7535 || put_user_u32(egid, arg2)
7536 || put_user_u32(sgid, arg3))
7537 goto efault;
7540 break;
7541 #endif
7542 #ifdef TARGET_NR_chown32
7543 case TARGET_NR_chown32:
7544 if (!(p = lock_user_string(arg1)))
7545 goto efault;
7546 ret = get_errno(chown(p, arg2, arg3));
7547 unlock_user(p, arg1, 0);
7548 break;
7549 #endif
7550 #ifdef TARGET_NR_setuid32
7551 case TARGET_NR_setuid32:
7552 ret = get_errno(setuid(arg1));
7553 break;
7554 #endif
7555 #ifdef TARGET_NR_setgid32
7556 case TARGET_NR_setgid32:
7557 ret = get_errno(setgid(arg1));
7558 break;
7559 #endif
7560 #ifdef TARGET_NR_setfsuid32
7561 case TARGET_NR_setfsuid32:
7562 ret = get_errno(setfsuid(arg1));
7563 break;
7564 #endif
7565 #ifdef TARGET_NR_setfsgid32
7566 case TARGET_NR_setfsgid32:
7567 ret = get_errno(setfsgid(arg1));
7568 break;
7569 #endif
7571 case TARGET_NR_pivot_root:
7572 goto unimplemented;
7573 #ifdef TARGET_NR_mincore
7574 case TARGET_NR_mincore:
7576 void *a;
7577 ret = -TARGET_EFAULT;
7578 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7579 goto efault;
7580 if (!(p = lock_user_string(arg3)))
7581 goto mincore_fail;
7582 ret = get_errno(mincore(a, arg2, p));
7583 unlock_user(p, arg3, ret);
7584 mincore_fail:
7585 unlock_user(a, arg1, 0);
7587 break;
7588 #endif
7589 #ifdef TARGET_NR_arm_fadvise64_64
7590 case TARGET_NR_arm_fadvise64_64:
7593 * arm_fadvise64_64 looks like fadvise64_64 but
7594 * with different argument order
7596 abi_long temp;
7597 temp = arg3;
7598 arg3 = arg4;
7599 arg4 = temp;
7601 #endif
7602 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7603 #ifdef TARGET_NR_fadvise64_64
7604 case TARGET_NR_fadvise64_64:
7605 #endif
7606 #ifdef TARGET_NR_fadvise64
7607 case TARGET_NR_fadvise64:
7608 #endif
7609 #ifdef TARGET_S390X
7610 switch (arg4) {
7611 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7612 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7613 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7614 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7615 default: break;
7617 #endif
7618 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7619 break;
7620 #endif
7621 #ifdef TARGET_NR_madvise
7622 case TARGET_NR_madvise:
7623 /* A straight passthrough may not be safe because qemu sometimes
7624 turns private flie-backed mappings into anonymous mappings.
7625 This will break MADV_DONTNEED.
7626 This is a hint, so ignoring and returning success is ok. */
7627 ret = get_errno(0);
7628 break;
7629 #endif
7630 #if TARGET_ABI_BITS == 32
7631 case TARGET_NR_fcntl64:
7633 int cmd;
7634 struct flock64 fl;
7635 struct target_flock64 *target_fl;
7636 #ifdef TARGET_ARM
7637 struct target_eabi_flock64 *target_efl;
7638 #endif
7640 cmd = target_to_host_fcntl_cmd(arg2);
7641 if (cmd == -TARGET_EINVAL) {
7642 ret = cmd;
7643 break;
7646 switch(arg2) {
7647 case TARGET_F_GETLK64:
7648 #ifdef TARGET_ARM
7649 if (((CPUARMState *)cpu_env)->eabi) {
7650 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7651 goto efault;
7652 fl.l_type = tswap16(target_efl->l_type);
7653 fl.l_whence = tswap16(target_efl->l_whence);
7654 fl.l_start = tswap64(target_efl->l_start);
7655 fl.l_len = tswap64(target_efl->l_len);
7656 fl.l_pid = tswap32(target_efl->l_pid);
7657 unlock_user_struct(target_efl, arg3, 0);
7658 } else
7659 #endif
7661 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7662 goto efault;
7663 fl.l_type = tswap16(target_fl->l_type);
7664 fl.l_whence = tswap16(target_fl->l_whence);
7665 fl.l_start = tswap64(target_fl->l_start);
7666 fl.l_len = tswap64(target_fl->l_len);
7667 fl.l_pid = tswap32(target_fl->l_pid);
7668 unlock_user_struct(target_fl, arg3, 0);
7670 ret = get_errno(fcntl(arg1, cmd, &fl));
7671 if (ret == 0) {
7672 #ifdef TARGET_ARM
7673 if (((CPUARMState *)cpu_env)->eabi) {
7674 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7675 goto efault;
7676 target_efl->l_type = tswap16(fl.l_type);
7677 target_efl->l_whence = tswap16(fl.l_whence);
7678 target_efl->l_start = tswap64(fl.l_start);
7679 target_efl->l_len = tswap64(fl.l_len);
7680 target_efl->l_pid = tswap32(fl.l_pid);
7681 unlock_user_struct(target_efl, arg3, 1);
7682 } else
7683 #endif
7685 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7686 goto efault;
7687 target_fl->l_type = tswap16(fl.l_type);
7688 target_fl->l_whence = tswap16(fl.l_whence);
7689 target_fl->l_start = tswap64(fl.l_start);
7690 target_fl->l_len = tswap64(fl.l_len);
7691 target_fl->l_pid = tswap32(fl.l_pid);
7692 unlock_user_struct(target_fl, arg3, 1);
7695 break;
7697 case TARGET_F_SETLK64:
7698 case TARGET_F_SETLKW64:
7699 #ifdef TARGET_ARM
7700 if (((CPUARMState *)cpu_env)->eabi) {
7701 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7702 goto efault;
7703 fl.l_type = tswap16(target_efl->l_type);
7704 fl.l_whence = tswap16(target_efl->l_whence);
7705 fl.l_start = tswap64(target_efl->l_start);
7706 fl.l_len = tswap64(target_efl->l_len);
7707 fl.l_pid = tswap32(target_efl->l_pid);
7708 unlock_user_struct(target_efl, arg3, 0);
7709 } else
7710 #endif
7712 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7713 goto efault;
7714 fl.l_type = tswap16(target_fl->l_type);
7715 fl.l_whence = tswap16(target_fl->l_whence);
7716 fl.l_start = tswap64(target_fl->l_start);
7717 fl.l_len = tswap64(target_fl->l_len);
7718 fl.l_pid = tswap32(target_fl->l_pid);
7719 unlock_user_struct(target_fl, arg3, 0);
7721 ret = get_errno(fcntl(arg1, cmd, &fl));
7722 break;
7723 default:
7724 ret = do_fcntl(arg1, arg2, arg3);
7725 break;
7727 break;
7729 #endif
7730 #ifdef TARGET_NR_cacheflush
7731 case TARGET_NR_cacheflush:
7732 /* self-modifying code is handled automatically, so nothing needed */
7733 ret = 0;
7734 break;
7735 #endif
7736 #ifdef TARGET_NR_security
7737 case TARGET_NR_security:
7738 goto unimplemented;
7739 #endif
7740 #ifdef TARGET_NR_getpagesize
7741 case TARGET_NR_getpagesize:
7742 ret = TARGET_PAGE_SIZE;
7743 break;
7744 #endif
7745 case TARGET_NR_gettid:
7746 ret = get_errno(gettid());
7747 break;
7748 #ifdef TARGET_NR_readahead
7749 case TARGET_NR_readahead:
7750 #if TARGET_ABI_BITS == 32
7751 if (regpairs_aligned(cpu_env)) {
7752 arg2 = arg3;
7753 arg3 = arg4;
7754 arg4 = arg5;
7756 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7757 #else
7758 ret = get_errno(readahead(arg1, arg2, arg3));
7759 #endif
7760 break;
7761 #endif
7762 #ifdef CONFIG_ATTR
7763 #ifdef TARGET_NR_setxattr
7764 case TARGET_NR_lsetxattr:
7765 case TARGET_NR_fsetxattr:
7766 case TARGET_NR_lgetxattr:
7767 case TARGET_NR_fgetxattr:
7768 case TARGET_NR_listxattr:
7769 case TARGET_NR_llistxattr:
7770 case TARGET_NR_flistxattr:
7771 case TARGET_NR_lremovexattr:
7772 case TARGET_NR_fremovexattr:
7773 ret = -TARGET_EOPNOTSUPP;
7774 break;
7775 case TARGET_NR_setxattr:
7777 void *p, *n, *v;
7778 p = lock_user_string(arg1);
7779 n = lock_user_string(arg2);
7780 v = lock_user(VERIFY_READ, arg3, arg4, 1);
7781 if (p && n && v) {
7782 ret = get_errno(setxattr(p, n, v, arg4, arg5));
7783 } else {
7784 ret = -TARGET_EFAULT;
7786 unlock_user(p, arg1, 0);
7787 unlock_user(n, arg2, 0);
7788 unlock_user(v, arg3, 0);
7790 break;
7791 case TARGET_NR_getxattr:
7793 void *p, *n, *v;
7794 p = lock_user_string(arg1);
7795 n = lock_user_string(arg2);
7796 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7797 if (p && n && v) {
7798 ret = get_errno(getxattr(p, n, v, arg4));
7799 } else {
7800 ret = -TARGET_EFAULT;
7802 unlock_user(p, arg1, 0);
7803 unlock_user(n, arg2, 0);
7804 unlock_user(v, arg3, arg4);
7806 break;
7807 case TARGET_NR_removexattr:
7809 void *p, *n;
7810 p = lock_user_string(arg1);
7811 n = lock_user_string(arg2);
7812 if (p && n) {
7813 ret = get_errno(removexattr(p, n));
7814 } else {
7815 ret = -TARGET_EFAULT;
7817 unlock_user(p, arg1, 0);
7818 unlock_user(n, arg2, 0);
7820 break;
7821 #endif
7822 #endif /* CONFIG_ATTR */
7823 #ifdef TARGET_NR_set_thread_area
7824 case TARGET_NR_set_thread_area:
7825 #if defined(TARGET_MIPS)
7826 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7827 ret = 0;
7828 break;
7829 #elif defined(TARGET_CRIS)
7830 if (arg1 & 0xff)
7831 ret = -TARGET_EINVAL;
7832 else {
7833 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7834 ret = 0;
7836 break;
7837 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7838 ret = do_set_thread_area(cpu_env, arg1);
7839 break;
7840 #else
7841 goto unimplemented_nowarn;
7842 #endif
7843 #endif
7844 #ifdef TARGET_NR_get_thread_area
7845 case TARGET_NR_get_thread_area:
7846 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7847 ret = do_get_thread_area(cpu_env, arg1);
7848 #else
7849 goto unimplemented_nowarn;
7850 #endif
7851 #endif
7852 #ifdef TARGET_NR_getdomainname
7853 case TARGET_NR_getdomainname:
7854 goto unimplemented_nowarn;
7855 #endif
7857 #ifdef TARGET_NR_clock_gettime
7858 case TARGET_NR_clock_gettime:
7860 struct timespec ts;
7861 ret = get_errno(clock_gettime(arg1, &ts));
7862 if (!is_error(ret)) {
7863 host_to_target_timespec(arg2, &ts);
7865 break;
7867 #endif
7868 #ifdef TARGET_NR_clock_getres
7869 case TARGET_NR_clock_getres:
7871 struct timespec ts;
7872 ret = get_errno(clock_getres(arg1, &ts));
7873 if (!is_error(ret)) {
7874 host_to_target_timespec(arg2, &ts);
7876 break;
7878 #endif
7879 #ifdef TARGET_NR_clock_nanosleep
7880 case TARGET_NR_clock_nanosleep:
7882 struct timespec ts;
7883 target_to_host_timespec(&ts, arg3);
7884 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7885 if (arg4)
7886 host_to_target_timespec(arg4, &ts);
7887 break;
7889 #endif
7891 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7892 case TARGET_NR_set_tid_address:
7893 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7894 break;
7895 #endif
7897 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7898 case TARGET_NR_tkill:
7899 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7900 break;
7901 #endif
7903 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7904 case TARGET_NR_tgkill:
7905 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7906 target_to_host_signal(arg3)));
7907 break;
7908 #endif
7910 #ifdef TARGET_NR_set_robust_list
7911 case TARGET_NR_set_robust_list:
7912 goto unimplemented_nowarn;
7913 #endif
7915 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7916 case TARGET_NR_utimensat:
7918 struct timespec *tsp, ts[2];
7919 if (!arg3) {
7920 tsp = NULL;
7921 } else {
7922 target_to_host_timespec(ts, arg3);
7923 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7924 tsp = ts;
7926 if (!arg2)
7927 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7928 else {
7929 if (!(p = lock_user_string(arg2))) {
7930 ret = -TARGET_EFAULT;
7931 goto fail;
7933 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7934 unlock_user(p, arg2, 0);
7937 break;
7938 #endif
7939 #if defined(CONFIG_USE_NPTL)
7940 case TARGET_NR_futex:
7941 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7942 break;
7943 #endif
7944 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7945 case TARGET_NR_inotify_init:
7946 ret = get_errno(sys_inotify_init());
7947 break;
7948 #endif
7949 #ifdef CONFIG_INOTIFY1
7950 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7951 case TARGET_NR_inotify_init1:
7952 ret = get_errno(sys_inotify_init1(arg1));
7953 break;
7954 #endif
7955 #endif
7956 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7957 case TARGET_NR_inotify_add_watch:
7958 p = lock_user_string(arg2);
7959 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7960 unlock_user(p, arg2, 0);
7961 break;
7962 #endif
7963 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7964 case TARGET_NR_inotify_rm_watch:
7965 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7966 break;
7967 #endif
7969 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7970 case TARGET_NR_mq_open:
7972 struct mq_attr posix_mq_attr;
7974 p = lock_user_string(arg1 - 1);
7975 if (arg4 != 0)
7976 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7977 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7978 unlock_user (p, arg1, 0);
7980 break;
7982 case TARGET_NR_mq_unlink:
7983 p = lock_user_string(arg1 - 1);
7984 ret = get_errno(mq_unlink(p));
7985 unlock_user (p, arg1, 0);
7986 break;
7988 case TARGET_NR_mq_timedsend:
7990 struct timespec ts;
7992 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7993 if (arg5 != 0) {
7994 target_to_host_timespec(&ts, arg5);
7995 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7996 host_to_target_timespec(arg5, &ts);
7998 else
7999 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8000 unlock_user (p, arg2, arg3);
8002 break;
8004 case TARGET_NR_mq_timedreceive:
8006 struct timespec ts;
8007 unsigned int prio;
8009 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8010 if (arg5 != 0) {
8011 target_to_host_timespec(&ts, arg5);
8012 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8013 host_to_target_timespec(arg5, &ts);
8015 else
8016 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8017 unlock_user (p, arg2, arg3);
8018 if (arg4 != 0)
8019 put_user_u32(prio, arg4);
8021 break;
8023 /* Not implemented for now... */
8024 /* case TARGET_NR_mq_notify: */
8025 /* break; */
8027 case TARGET_NR_mq_getsetattr:
8029 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8030 ret = 0;
8031 if (arg3 != 0) {
8032 ret = mq_getattr(arg1, &posix_mq_attr_out);
8033 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8035 if (arg2 != 0) {
8036 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8037 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8041 break;
8042 #endif
8044 #ifdef CONFIG_SPLICE
8045 #ifdef TARGET_NR_tee
8046 case TARGET_NR_tee:
8048 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8050 break;
8051 #endif
8052 #ifdef TARGET_NR_splice
8053 case TARGET_NR_splice:
8055 loff_t loff_in, loff_out;
8056 loff_t *ploff_in = NULL, *ploff_out = NULL;
8057 if(arg2) {
8058 get_user_u64(loff_in, arg2);
8059 ploff_in = &loff_in;
8061 if(arg4) {
8062 get_user_u64(loff_out, arg2);
8063 ploff_out = &loff_out;
8065 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8067 break;
8068 #endif
8069 #ifdef TARGET_NR_vmsplice
8070 case TARGET_NR_vmsplice:
8072 int count = arg3;
8073 struct iovec *vec;
8075 vec = alloca(count * sizeof(struct iovec));
8076 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
8077 goto efault;
8078 ret = get_errno(vmsplice(arg1, vec, count, arg4));
8079 unlock_iovec(vec, arg2, count, 0);
8081 break;
8082 #endif
8083 #endif /* CONFIG_SPLICE */
8084 #ifdef CONFIG_EVENTFD
8085 #if defined(TARGET_NR_eventfd)
8086 case TARGET_NR_eventfd:
8087 ret = get_errno(eventfd(arg1, 0));
8088 break;
8089 #endif
8090 #if defined(TARGET_NR_eventfd2)
8091 case TARGET_NR_eventfd2:
8092 ret = get_errno(eventfd(arg1, arg2));
8093 break;
8094 #endif
8095 #endif /* CONFIG_EVENTFD */
8096 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8097 case TARGET_NR_fallocate:
8098 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8099 break;
8100 #endif
8101 #if defined(CONFIG_SYNC_FILE_RANGE)
8102 #if defined(TARGET_NR_sync_file_range)
8103 case TARGET_NR_sync_file_range:
8104 #if TARGET_ABI_BITS == 32
8105 #if defined(TARGET_MIPS)
8106 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8107 target_offset64(arg5, arg6), arg7));
8108 #else
8109 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8110 target_offset64(arg4, arg5), arg6));
8111 #endif /* !TARGET_MIPS */
8112 #else
8113 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8114 #endif
8115 break;
8116 #endif
8117 #if defined(TARGET_NR_sync_file_range2)
8118 case TARGET_NR_sync_file_range2:
8119 /* This is like sync_file_range but the arguments are reordered */
8120 #if TARGET_ABI_BITS == 32
8121 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8122 target_offset64(arg5, arg6), arg2));
8123 #else
8124 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8125 #endif
8126 break;
8127 #endif
8128 #endif
8129 #if defined(CONFIG_EPOLL)
8130 #if defined(TARGET_NR_epoll_create)
8131 case TARGET_NR_epoll_create:
8132 ret = get_errno(epoll_create(arg1));
8133 break;
8134 #endif
8135 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8136 case TARGET_NR_epoll_create1:
8137 ret = get_errno(epoll_create1(arg1));
8138 break;
8139 #endif
8140 #if defined(TARGET_NR_epoll_ctl)
8141 case TARGET_NR_epoll_ctl:
8143 struct epoll_event ep;
8144 struct epoll_event *epp = 0;
8145 if (arg4) {
8146 struct target_epoll_event *target_ep;
8147 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8148 goto efault;
8150 ep.events = tswap32(target_ep->events);
8151 /* The epoll_data_t union is just opaque data to the kernel,
8152 * so we transfer all 64 bits across and need not worry what
8153 * actual data type it is.
8155 ep.data.u64 = tswap64(target_ep->data.u64);
8156 unlock_user_struct(target_ep, arg4, 0);
8157 epp = &ep;
8159 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8160 break;
8162 #endif
8164 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8165 #define IMPLEMENT_EPOLL_PWAIT
8166 #endif
8167 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8168 #if defined(TARGET_NR_epoll_wait)
8169 case TARGET_NR_epoll_wait:
8170 #endif
8171 #if defined(IMPLEMENT_EPOLL_PWAIT)
8172 case TARGET_NR_epoll_pwait:
8173 #endif
8175 struct target_epoll_event *target_ep;
8176 struct epoll_event *ep;
8177 int epfd = arg1;
8178 int maxevents = arg3;
8179 int timeout = arg4;
8181 target_ep = lock_user(VERIFY_WRITE, arg2,
8182 maxevents * sizeof(struct target_epoll_event), 1);
8183 if (!target_ep) {
8184 goto efault;
8187 ep = alloca(maxevents * sizeof(struct epoll_event));
8189 switch (num) {
8190 #if defined(IMPLEMENT_EPOLL_PWAIT)
8191 case TARGET_NR_epoll_pwait:
8193 target_sigset_t *target_set;
8194 sigset_t _set, *set = &_set;
8196 if (arg5) {
8197 target_set = lock_user(VERIFY_READ, arg5,
8198 sizeof(target_sigset_t), 1);
8199 if (!target_set) {
8200 unlock_user(target_ep, arg2, 0);
8201 goto efault;
8203 target_to_host_sigset(set, target_set);
8204 unlock_user(target_set, arg5, 0);
8205 } else {
8206 set = NULL;
8209 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8210 break;
8212 #endif
8213 #if defined(TARGET_NR_epoll_wait)
8214 case TARGET_NR_epoll_wait:
8215 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8216 break;
8217 #endif
8218 default:
8219 ret = -TARGET_ENOSYS;
8221 if (!is_error(ret)) {
8222 int i;
8223 for (i = 0; i < ret; i++) {
8224 target_ep[i].events = tswap32(ep[i].events);
8225 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8228 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8229 break;
8231 #endif
8232 #endif
8233 #ifdef TARGET_NR_prlimit64
8234 case TARGET_NR_prlimit64:
8236 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8237 struct target_rlimit64 *target_rnew, *target_rold;
8238 struct host_rlimit64 rnew, rold, *rnewp = 0;
8239 if (arg3) {
8240 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8241 goto efault;
8243 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8244 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8245 unlock_user_struct(target_rnew, arg3, 0);
8246 rnewp = &rnew;
8249 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8250 if (!is_error(ret) && arg4) {
8251 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8252 goto efault;
8254 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8255 target_rold->rlim_max = tswap64(rold.rlim_max);
8256 unlock_user_struct(target_rold, arg4, 1);
8258 break;
8260 #endif
8261 default:
8262 unimplemented:
8263 gemu_log("qemu: Unsupported syscall: %d\n", num);
8264 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8265 unimplemented_nowarn:
8266 #endif
8267 ret = -TARGET_ENOSYS;
8268 break;
8270 fail:
8271 #ifdef DEBUG
8272 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8273 #endif
8274 if(do_strace)
8275 print_syscall_ret(num, ret);
8276 return ret;
8277 efault:
8278 ret = -TARGET_EFAULT;
8279 goto fail;