virtio-net: pass host features to virtio_net_init
[qemu/agraf.git] / linux-user / syscall.c
blob9e31ea72004ae3bc4b98f51a22255056c03af82a
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/utsname.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
95 #include <linux/kd.h>
96 #include <linux/mtio.h>
97 #include <linux/fs.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
100 #endif
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include <linux/reboot.h>
105 #include "linux_loop.h"
106 #include "cpu-uname.h"
108 #include "qemu.h"
110 #if defined(CONFIG_USE_NPTL)
111 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
112 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
113 #else
114 /* XXX: Hardcode the above values. */
115 #define CLONE_NPTL_FLAGS2 0
116 #endif
118 //#define DEBUG
120 //#include <linux/msdos_fs.h>
121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
125 #undef _syscall0
126 #undef _syscall1
127 #undef _syscall2
128 #undef _syscall3
129 #undef _syscall4
130 #undef _syscall5
131 #undef _syscall6
133 #define _syscall0(type,name) \
134 static type name (void) \
136 return syscall(__NR_##name); \
139 #define _syscall1(type,name,type1,arg1) \
140 static type name (type1 arg1) \
142 return syscall(__NR_##name, arg1); \
145 #define _syscall2(type,name,type1,arg1,type2,arg2) \
146 static type name (type1 arg1,type2 arg2) \
148 return syscall(__NR_##name, arg1, arg2); \
151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
152 static type name (type1 arg1,type2 arg2,type3 arg3) \
154 return syscall(__NR_##name, arg1, arg2, arg3); \
157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
164 type5,arg5) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 type5,arg5,type6,arg6) \
173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
174 type6 arg6) \
176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
180 #define __NR_sys_uname __NR_uname
181 #define __NR_sys_faccessat __NR_faccessat
182 #define __NR_sys_fchmodat __NR_fchmodat
183 #define __NR_sys_fchownat __NR_fchownat
184 #define __NR_sys_fstatat64 __NR_fstatat64
185 #define __NR_sys_futimesat __NR_futimesat
186 #define __NR_sys_getcwd1 __NR_getcwd
187 #define __NR_sys_getdents __NR_getdents
188 #define __NR_sys_getdents64 __NR_getdents64
189 #define __NR_sys_getpriority __NR_getpriority
190 #define __NR_sys_linkat __NR_linkat
191 #define __NR_sys_mkdirat __NR_mkdirat
192 #define __NR_sys_mknodat __NR_mknodat
193 #define __NR_sys_newfstatat __NR_newfstatat
194 #define __NR_sys_openat __NR_openat
195 #define __NR_sys_readlinkat __NR_readlinkat
196 #define __NR_sys_renameat __NR_renameat
197 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
198 #define __NR_sys_symlinkat __NR_symlinkat
199 #define __NR_sys_syslog __NR_syslog
200 #define __NR_sys_tgkill __NR_tgkill
201 #define __NR_sys_tkill __NR_tkill
202 #define __NR_sys_unlinkat __NR_unlinkat
203 #define __NR_sys_utimensat __NR_utimensat
204 #define __NR_sys_futex __NR_futex
205 #define __NR_sys_inotify_init __NR_inotify_init
206 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
207 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
209 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
210 defined(__s390x__)
211 #define __NR__llseek __NR_lseek
212 #endif
214 #ifdef __NR_gettid
215 _syscall0(int, gettid)
216 #else
217 /* This is a replacement for the host gettid() and must return a host
218 errno. */
219 static int gettid(void) {
220 return -ENOSYS;
222 #endif
223 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
224 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
225 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
226 #endif
227 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
228 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
229 loff_t *, res, uint, wh);
230 #endif
231 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
232 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
233 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
234 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
235 #endif
236 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
237 _syscall2(int,sys_tkill,int,tid,int,sig)
238 #endif
239 #ifdef __NR_exit_group
240 _syscall1(int,exit_group,int,error_code)
241 #endif
242 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
243 _syscall1(int,set_tid_address,int *,tidptr)
244 #endif
245 #if defined(CONFIG_USE_NPTL)
246 #if defined(TARGET_NR_futex) && defined(__NR_futex)
247 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
248 const struct timespec *,timeout,int *,uaddr2,int,val3)
249 #endif
250 #endif
251 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
252 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
253 unsigned long *, user_mask_ptr);
254 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
255 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
256 unsigned long *, user_mask_ptr);
257 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
258 void *, arg);
260 static bitmask_transtbl fcntl_flags_tbl[] = {
261 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
262 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
263 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
264 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
265 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
266 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
267 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
268 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
269 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
270 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
271 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
272 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
273 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
274 #if defined(O_DIRECT)
275 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
276 #endif
277 #if defined(O_NOATIME)
278 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
279 #endif
280 #if defined(O_CLOEXEC)
281 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
282 #endif
283 #if defined(O_PATH)
284 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
285 #endif
286 /* Don't terminate the list prematurely on 64-bit host+guest. */
287 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
288 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
289 #endif
290 { 0, 0, 0, 0 }
293 #define COPY_UTSNAME_FIELD(dest, src) \
294 do { \
295 /* __NEW_UTS_LEN doesn't include terminating null */ \
296 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
297 (dest)[__NEW_UTS_LEN] = '\0'; \
298 } while (0)
300 static int sys_uname(struct new_utsname *buf)
302 struct utsname uts_buf;
304 if (uname(&uts_buf) < 0)
305 return (-1);
308 * Just in case these have some differences, we
309 * translate utsname to new_utsname (which is the
310 * struct linux kernel uses).
313 memset(buf, 0, sizeof(*buf));
314 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
315 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
316 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
317 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
318 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
319 #ifdef _GNU_SOURCE
320 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
321 #endif
322 return (0);
324 #undef COPY_UTSNAME_FIELD
327 static int sys_getcwd1(char *buf, size_t size)
329 if (getcwd(buf, size) == NULL) {
330 /* getcwd() sets errno */
331 return (-1);
333 return strlen(buf)+1;
336 #ifdef CONFIG_ATFILE
338 * Host system seems to have atfile syscall stubs available. We
339 * now enable them one by one as specified by target syscall_nr.h.
342 #ifdef TARGET_NR_faccessat
343 static int sys_faccessat(int dirfd, const char *pathname, int mode)
345 return (faccessat(dirfd, pathname, mode, 0));
347 #endif
348 #ifdef TARGET_NR_fchmodat
349 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
351 return (fchmodat(dirfd, pathname, mode, 0));
353 #endif
354 #if defined(TARGET_NR_fchownat)
355 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
356 gid_t group, int flags)
358 return (fchownat(dirfd, pathname, owner, group, flags));
360 #endif
361 #ifdef __NR_fstatat64
362 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
363 int flags)
365 return (fstatat(dirfd, pathname, buf, flags));
367 #endif
368 #ifdef __NR_newfstatat
369 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
370 int flags)
372 return (fstatat(dirfd, pathname, buf, flags));
374 #endif
375 #ifdef TARGET_NR_futimesat
376 static int sys_futimesat(int dirfd, const char *pathname,
377 const struct timeval times[2])
379 return (futimesat(dirfd, pathname, times));
381 #endif
382 #ifdef TARGET_NR_linkat
383 static int sys_linkat(int olddirfd, const char *oldpath,
384 int newdirfd, const char *newpath, int flags)
386 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
388 #endif
389 #ifdef TARGET_NR_mkdirat
390 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
392 return (mkdirat(dirfd, pathname, mode));
394 #endif
395 #ifdef TARGET_NR_mknodat
396 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
397 dev_t dev)
399 return (mknodat(dirfd, pathname, mode, dev));
401 #endif
402 #ifdef TARGET_NR_openat
403 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
406 * open(2) has extra parameter 'mode' when called with
407 * flag O_CREAT.
409 if ((flags & O_CREAT) != 0) {
410 return (openat(dirfd, pathname, flags, mode));
412 return (openat(dirfd, pathname, flags));
414 #endif
415 #ifdef TARGET_NR_readlinkat
416 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
418 return (readlinkat(dirfd, pathname, buf, bufsiz));
420 #endif
421 #ifdef TARGET_NR_renameat
422 static int sys_renameat(int olddirfd, const char *oldpath,
423 int newdirfd, const char *newpath)
425 return (renameat(olddirfd, oldpath, newdirfd, newpath));
427 #endif
428 #ifdef TARGET_NR_symlinkat
429 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
431 return (symlinkat(oldpath, newdirfd, newpath));
433 #endif
434 #ifdef TARGET_NR_unlinkat
435 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
437 return (unlinkat(dirfd, pathname, flags));
439 #endif
440 #else /* !CONFIG_ATFILE */
443 * Try direct syscalls instead
445 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
446 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
447 #endif
448 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
449 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
450 #endif
451 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
452 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
453 uid_t,owner,gid_t,group,int,flags)
454 #endif
455 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
456 defined(__NR_fstatat64)
457 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
458 struct stat *,buf,int,flags)
459 #endif
460 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
461 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
462 const struct timeval *,times)
463 #endif
464 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
465 defined(__NR_newfstatat)
466 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
467 struct stat *,buf,int,flags)
468 #endif
469 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
470 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
471 int,newdirfd,const char *,newpath,int,flags)
472 #endif
473 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
474 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
475 #endif
476 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
477 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
478 mode_t,mode,dev_t,dev)
479 #endif
480 #if defined(TARGET_NR_openat) && defined(__NR_openat)
481 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
482 #endif
483 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
484 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
485 char *,buf,size_t,bufsize)
486 #endif
487 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
488 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
489 int,newdirfd,const char *,newpath)
490 #endif
491 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
492 _syscall3(int,sys_symlinkat,const char *,oldpath,
493 int,newdirfd,const char *,newpath)
494 #endif
495 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
496 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
497 #endif
499 #endif /* CONFIG_ATFILE */
501 #ifdef CONFIG_UTIMENSAT
502 static int sys_utimensat(int dirfd, const char *pathname,
503 const struct timespec times[2], int flags)
505 if (pathname == NULL)
506 return futimens(dirfd, times);
507 else
508 return utimensat(dirfd, pathname, times, flags);
510 #else
511 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
512 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
513 const struct timespec *,tsp,int,flags)
514 #endif
515 #endif /* CONFIG_UTIMENSAT */
517 #ifdef CONFIG_INOTIFY
518 #include <sys/inotify.h>
520 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
521 static int sys_inotify_init(void)
523 return (inotify_init());
525 #endif
526 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
527 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
529 return (inotify_add_watch(fd, pathname, mask));
531 #endif
532 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
533 static int sys_inotify_rm_watch(int fd, int32_t wd)
535 return (inotify_rm_watch(fd, wd));
537 #endif
538 #ifdef CONFIG_INOTIFY1
539 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
540 static int sys_inotify_init1(int flags)
542 return (inotify_init1(flags));
544 #endif
545 #endif
546 #else
547 /* Userspace can usually survive runtime without inotify */
548 #undef TARGET_NR_inotify_init
549 #undef TARGET_NR_inotify_init1
550 #undef TARGET_NR_inotify_add_watch
551 #undef TARGET_NR_inotify_rm_watch
552 #endif /* CONFIG_INOTIFY */
554 #if defined(TARGET_NR_ppoll)
555 #ifndef __NR_ppoll
556 # define __NR_ppoll -1
557 #endif
558 #define __NR_sys_ppoll __NR_ppoll
559 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
560 struct timespec *, timeout, const __sigset_t *, sigmask,
561 size_t, sigsetsize)
562 #endif
564 #if defined(TARGET_NR_pselect6)
565 #ifndef __NR_pselect6
566 # define __NR_pselect6 -1
567 #endif
568 #define __NR_sys_pselect6 __NR_pselect6
569 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
570 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
571 #endif
573 #if defined(TARGET_NR_prlimit64)
574 #ifndef __NR_prlimit64
575 # define __NR_prlimit64 -1
576 #endif
577 #define __NR_sys_prlimit64 __NR_prlimit64
578 /* The glibc rlimit structure may not be that used by the underlying syscall */
579 struct host_rlimit64 {
580 uint64_t rlim_cur;
581 uint64_t rlim_max;
583 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
584 const struct host_rlimit64 *, new_limit,
585 struct host_rlimit64 *, old_limit)
586 #endif
588 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
589 #ifdef TARGET_ARM
590 static inline int regpairs_aligned(void *cpu_env) {
591 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
593 #elif defined(TARGET_MIPS)
594 static inline int regpairs_aligned(void *cpu_env) { return 1; }
595 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
596 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
597 * of registers which translates to the same as ARM/MIPS, because we start with
598 * r3 as arg1 */
599 static inline int regpairs_aligned(void *cpu_env) { return 1; }
600 #else
601 static inline int regpairs_aligned(void *cpu_env) { return 0; }
602 #endif
604 #define ERRNO_TABLE_SIZE 1200
606 /* target_to_host_errno_table[] is initialized from
607 * host_to_target_errno_table[] in syscall_init(). */
608 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
612 * This list is the union of errno values overridden in asm-<arch>/errno.h
613 * minus the errnos that are not actually generic to all archs.
615 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
616 [EIDRM] = TARGET_EIDRM,
617 [ECHRNG] = TARGET_ECHRNG,
618 [EL2NSYNC] = TARGET_EL2NSYNC,
619 [EL3HLT] = TARGET_EL3HLT,
620 [EL3RST] = TARGET_EL3RST,
621 [ELNRNG] = TARGET_ELNRNG,
622 [EUNATCH] = TARGET_EUNATCH,
623 [ENOCSI] = TARGET_ENOCSI,
624 [EL2HLT] = TARGET_EL2HLT,
625 [EDEADLK] = TARGET_EDEADLK,
626 [ENOLCK] = TARGET_ENOLCK,
627 [EBADE] = TARGET_EBADE,
628 [EBADR] = TARGET_EBADR,
629 [EXFULL] = TARGET_EXFULL,
630 [ENOANO] = TARGET_ENOANO,
631 [EBADRQC] = TARGET_EBADRQC,
632 [EBADSLT] = TARGET_EBADSLT,
633 [EBFONT] = TARGET_EBFONT,
634 [ENOSTR] = TARGET_ENOSTR,
635 [ENODATA] = TARGET_ENODATA,
636 [ETIME] = TARGET_ETIME,
637 [ENOSR] = TARGET_ENOSR,
638 [ENONET] = TARGET_ENONET,
639 [ENOPKG] = TARGET_ENOPKG,
640 [EREMOTE] = TARGET_EREMOTE,
641 [ENOLINK] = TARGET_ENOLINK,
642 [EADV] = TARGET_EADV,
643 [ESRMNT] = TARGET_ESRMNT,
644 [ECOMM] = TARGET_ECOMM,
645 [EPROTO] = TARGET_EPROTO,
646 [EDOTDOT] = TARGET_EDOTDOT,
647 [EMULTIHOP] = TARGET_EMULTIHOP,
648 [EBADMSG] = TARGET_EBADMSG,
649 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
650 [EOVERFLOW] = TARGET_EOVERFLOW,
651 [ENOTUNIQ] = TARGET_ENOTUNIQ,
652 [EBADFD] = TARGET_EBADFD,
653 [EREMCHG] = TARGET_EREMCHG,
654 [ELIBACC] = TARGET_ELIBACC,
655 [ELIBBAD] = TARGET_ELIBBAD,
656 [ELIBSCN] = TARGET_ELIBSCN,
657 [ELIBMAX] = TARGET_ELIBMAX,
658 [ELIBEXEC] = TARGET_ELIBEXEC,
659 [EILSEQ] = TARGET_EILSEQ,
660 [ENOSYS] = TARGET_ENOSYS,
661 [ELOOP] = TARGET_ELOOP,
662 [ERESTART] = TARGET_ERESTART,
663 [ESTRPIPE] = TARGET_ESTRPIPE,
664 [ENOTEMPTY] = TARGET_ENOTEMPTY,
665 [EUSERS] = TARGET_EUSERS,
666 [ENOTSOCK] = TARGET_ENOTSOCK,
667 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
668 [EMSGSIZE] = TARGET_EMSGSIZE,
669 [EPROTOTYPE] = TARGET_EPROTOTYPE,
670 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
671 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
672 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
673 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
674 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
675 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
676 [EADDRINUSE] = TARGET_EADDRINUSE,
677 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
678 [ENETDOWN] = TARGET_ENETDOWN,
679 [ENETUNREACH] = TARGET_ENETUNREACH,
680 [ENETRESET] = TARGET_ENETRESET,
681 [ECONNABORTED] = TARGET_ECONNABORTED,
682 [ECONNRESET] = TARGET_ECONNRESET,
683 [ENOBUFS] = TARGET_ENOBUFS,
684 [EISCONN] = TARGET_EISCONN,
685 [ENOTCONN] = TARGET_ENOTCONN,
686 [EUCLEAN] = TARGET_EUCLEAN,
687 [ENOTNAM] = TARGET_ENOTNAM,
688 [ENAVAIL] = TARGET_ENAVAIL,
689 [EISNAM] = TARGET_EISNAM,
690 [EREMOTEIO] = TARGET_EREMOTEIO,
691 [ESHUTDOWN] = TARGET_ESHUTDOWN,
692 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
693 [ETIMEDOUT] = TARGET_ETIMEDOUT,
694 [ECONNREFUSED] = TARGET_ECONNREFUSED,
695 [EHOSTDOWN] = TARGET_EHOSTDOWN,
696 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
697 [EALREADY] = TARGET_EALREADY,
698 [EINPROGRESS] = TARGET_EINPROGRESS,
699 [ESTALE] = TARGET_ESTALE,
700 [ECANCELED] = TARGET_ECANCELED,
701 [ENOMEDIUM] = TARGET_ENOMEDIUM,
702 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
703 #ifdef ENOKEY
704 [ENOKEY] = TARGET_ENOKEY,
705 #endif
706 #ifdef EKEYEXPIRED
707 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
708 #endif
709 #ifdef EKEYREVOKED
710 [EKEYREVOKED] = TARGET_EKEYREVOKED,
711 #endif
712 #ifdef EKEYREJECTED
713 [EKEYREJECTED] = TARGET_EKEYREJECTED,
714 #endif
715 #ifdef EOWNERDEAD
716 [EOWNERDEAD] = TARGET_EOWNERDEAD,
717 #endif
718 #ifdef ENOTRECOVERABLE
719 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
720 #endif
723 static inline int host_to_target_errno(int err)
725 if(host_to_target_errno_table[err])
726 return host_to_target_errno_table[err];
727 return err;
730 static inline int target_to_host_errno(int err)
732 if (target_to_host_errno_table[err])
733 return target_to_host_errno_table[err];
734 return err;
737 static inline abi_long get_errno(abi_long ret)
739 if (ret == -1)
740 return -host_to_target_errno(errno);
741 else
742 return ret;
745 static inline int is_error(abi_long ret)
747 return (abi_ulong)ret >= (abi_ulong)(-4096);
750 char *target_strerror(int err)
752 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
753 return NULL;
755 return strerror(target_to_host_errno(err));
758 static abi_ulong target_brk;
759 static abi_ulong target_original_brk;
760 static abi_ulong brk_page;
762 void target_set_brk(abi_ulong new_brk)
764 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
765 brk_page = HOST_PAGE_ALIGN(target_brk);
768 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
769 #define DEBUGF_BRK(message, args...)
771 /* do_brk() must return target values and target errnos. */
772 abi_long do_brk(abi_ulong new_brk)
774 abi_long mapped_addr;
775 int new_alloc_size;
777 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
779 if (!new_brk) {
780 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
781 return target_brk;
783 if (new_brk < target_original_brk) {
784 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
785 target_brk);
786 return target_brk;
789 /* If the new brk is less than the highest page reserved to the
790 * target heap allocation, set it and we're almost done... */
791 if (new_brk <= brk_page) {
792 /* Heap contents are initialized to zero, as for anonymous
793 * mapped pages. */
794 if (new_brk > target_brk) {
795 memset(g2h(target_brk), 0, new_brk - target_brk);
797 target_brk = new_brk;
798 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
799 return target_brk;
802 /* We need to allocate more memory after the brk... Note that
803 * we don't use MAP_FIXED because that will map over the top of
804 * any existing mapping (like the one with the host libc or qemu
805 * itself); instead we treat "mapped but at wrong address" as
806 * a failure and unmap again.
808 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
809 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
810 PROT_READ|PROT_WRITE,
811 MAP_ANON|MAP_PRIVATE, 0, 0));
813 if (mapped_addr == brk_page) {
814 /* Heap contents are initialized to zero, as for anonymous
815 * mapped pages. Technically the new pages are already
816 * initialized to zero since they *are* anonymous mapped
817 * pages, however we have to take care with the contents that
818 * come from the remaining part of the previous page: it may
819 * contains garbage data due to a previous heap usage (grown
820 * then shrunken). */
821 memset(g2h(target_brk), 0, brk_page - target_brk);
823 target_brk = new_brk;
824 brk_page = HOST_PAGE_ALIGN(target_brk);
825 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
826 target_brk);
827 return target_brk;
828 } else if (mapped_addr != -1) {
829 /* Mapped but at wrong address, meaning there wasn't actually
830 * enough space for this brk.
832 target_munmap(mapped_addr, new_alloc_size);
833 mapped_addr = -1;
834 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
836 else {
837 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
840 #if defined(TARGET_ALPHA)
841 /* We (partially) emulate OSF/1 on Alpha, which requires we
842 return a proper errno, not an unchanged brk value. */
843 return -TARGET_ENOMEM;
844 #endif
845 /* For everything else, return the previous break. */
846 return target_brk;
849 static inline abi_long copy_from_user_fdset(fd_set *fds,
850 abi_ulong target_fds_addr,
851 int n)
853 int i, nw, j, k;
854 abi_ulong b, *target_fds;
856 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
857 if (!(target_fds = lock_user(VERIFY_READ,
858 target_fds_addr,
859 sizeof(abi_ulong) * nw,
860 1)))
861 return -TARGET_EFAULT;
863 FD_ZERO(fds);
864 k = 0;
865 for (i = 0; i < nw; i++) {
866 /* grab the abi_ulong */
867 __get_user(b, &target_fds[i]);
868 for (j = 0; j < TARGET_ABI_BITS; j++) {
869 /* check the bit inside the abi_ulong */
870 if ((b >> j) & 1)
871 FD_SET(k, fds);
872 k++;
876 unlock_user(target_fds, target_fds_addr, 0);
878 return 0;
881 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
882 abi_ulong target_fds_addr,
883 int n)
885 if (target_fds_addr) {
886 if (copy_from_user_fdset(fds, target_fds_addr, n))
887 return -TARGET_EFAULT;
888 *fds_ptr = fds;
889 } else {
890 *fds_ptr = NULL;
892 return 0;
895 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
896 const fd_set *fds,
897 int n)
899 int i, nw, j, k;
900 abi_long v;
901 abi_ulong *target_fds;
903 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
904 if (!(target_fds = lock_user(VERIFY_WRITE,
905 target_fds_addr,
906 sizeof(abi_ulong) * nw,
907 0)))
908 return -TARGET_EFAULT;
910 k = 0;
911 for (i = 0; i < nw; i++) {
912 v = 0;
913 for (j = 0; j < TARGET_ABI_BITS; j++) {
914 v |= ((FD_ISSET(k, fds) != 0) << j);
915 k++;
917 __put_user(v, &target_fds[i]);
920 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
922 return 0;
925 #if defined(__alpha__)
926 #define HOST_HZ 1024
927 #else
928 #define HOST_HZ 100
929 #endif
931 static inline abi_long host_to_target_clock_t(long ticks)
933 #if HOST_HZ == TARGET_HZ
934 return ticks;
935 #else
936 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
937 #endif
940 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
941 const struct rusage *rusage)
943 struct target_rusage *target_rusage;
945 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
946 return -TARGET_EFAULT;
947 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
948 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
949 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
950 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
951 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
952 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
953 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
954 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
955 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
956 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
957 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
958 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
959 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
960 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
961 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
962 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
963 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
964 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
965 unlock_user_struct(target_rusage, target_addr, 1);
967 return 0;
970 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
972 abi_ulong target_rlim_swap;
973 rlim_t result;
975 target_rlim_swap = tswapal(target_rlim);
976 if (target_rlim_swap == TARGET_RLIM_INFINITY)
977 return RLIM_INFINITY;
979 result = target_rlim_swap;
980 if (target_rlim_swap != (rlim_t)result)
981 return RLIM_INFINITY;
983 return result;
986 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
988 abi_ulong target_rlim_swap;
989 abi_ulong result;
991 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
992 target_rlim_swap = TARGET_RLIM_INFINITY;
993 else
994 target_rlim_swap = rlim;
995 result = tswapal(target_rlim_swap);
997 return result;
1000 static inline int target_to_host_resource(int code)
1002 switch (code) {
1003 case TARGET_RLIMIT_AS:
1004 return RLIMIT_AS;
1005 case TARGET_RLIMIT_CORE:
1006 return RLIMIT_CORE;
1007 case TARGET_RLIMIT_CPU:
1008 return RLIMIT_CPU;
1009 case TARGET_RLIMIT_DATA:
1010 return RLIMIT_DATA;
1011 case TARGET_RLIMIT_FSIZE:
1012 return RLIMIT_FSIZE;
1013 case TARGET_RLIMIT_LOCKS:
1014 return RLIMIT_LOCKS;
1015 case TARGET_RLIMIT_MEMLOCK:
1016 return RLIMIT_MEMLOCK;
1017 case TARGET_RLIMIT_MSGQUEUE:
1018 return RLIMIT_MSGQUEUE;
1019 case TARGET_RLIMIT_NICE:
1020 return RLIMIT_NICE;
1021 case TARGET_RLIMIT_NOFILE:
1022 return RLIMIT_NOFILE;
1023 case TARGET_RLIMIT_NPROC:
1024 return RLIMIT_NPROC;
1025 case TARGET_RLIMIT_RSS:
1026 return RLIMIT_RSS;
1027 case TARGET_RLIMIT_RTPRIO:
1028 return RLIMIT_RTPRIO;
1029 case TARGET_RLIMIT_SIGPENDING:
1030 return RLIMIT_SIGPENDING;
1031 case TARGET_RLIMIT_STACK:
1032 return RLIMIT_STACK;
1033 default:
1034 return code;
1038 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1039 abi_ulong target_tv_addr)
1041 struct target_timeval *target_tv;
1043 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1044 return -TARGET_EFAULT;
1046 __get_user(tv->tv_sec, &target_tv->tv_sec);
1047 __get_user(tv->tv_usec, &target_tv->tv_usec);
1049 unlock_user_struct(target_tv, target_tv_addr, 0);
1051 return 0;
1054 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1055 const struct timeval *tv)
1057 struct target_timeval *target_tv;
1059 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1060 return -TARGET_EFAULT;
1062 __put_user(tv->tv_sec, &target_tv->tv_sec);
1063 __put_user(tv->tv_usec, &target_tv->tv_usec);
1065 unlock_user_struct(target_tv, target_tv_addr, 1);
1067 return 0;
1070 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1071 #include <mqueue.h>
1073 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1074 abi_ulong target_mq_attr_addr)
1076 struct target_mq_attr *target_mq_attr;
1078 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1079 target_mq_attr_addr, 1))
1080 return -TARGET_EFAULT;
1082 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1083 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1084 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1085 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1087 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1089 return 0;
1092 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1093 const struct mq_attr *attr)
1095 struct target_mq_attr *target_mq_attr;
1097 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1098 target_mq_attr_addr, 0))
1099 return -TARGET_EFAULT;
1101 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1102 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1103 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1104 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1106 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1108 return 0;
1110 #endif
1112 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1113 /* do_select() must return target values and target errnos. */
1114 static abi_long do_select(int n,
1115 abi_ulong rfd_addr, abi_ulong wfd_addr,
1116 abi_ulong efd_addr, abi_ulong target_tv_addr)
1118 fd_set rfds, wfds, efds;
1119 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1120 struct timeval tv, *tv_ptr;
1121 abi_long ret;
1123 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1124 if (ret) {
1125 return ret;
1127 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1128 if (ret) {
1129 return ret;
1131 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1132 if (ret) {
1133 return ret;
1136 if (target_tv_addr) {
1137 if (copy_from_user_timeval(&tv, target_tv_addr))
1138 return -TARGET_EFAULT;
1139 tv_ptr = &tv;
1140 } else {
1141 tv_ptr = NULL;
1144 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1146 if (!is_error(ret)) {
1147 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1148 return -TARGET_EFAULT;
1149 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1150 return -TARGET_EFAULT;
1151 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1152 return -TARGET_EFAULT;
1154 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1155 return -TARGET_EFAULT;
1158 return ret;
1160 #endif
1162 static abi_long do_pipe2(int host_pipe[], int flags)
1164 #ifdef CONFIG_PIPE2
1165 return pipe2(host_pipe, flags);
1166 #else
1167 return -ENOSYS;
1168 #endif
1171 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1172 int flags, int is_pipe2)
1174 int host_pipe[2];
1175 abi_long ret;
1176 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1178 if (is_error(ret))
1179 return get_errno(ret);
1181 /* Several targets have special calling conventions for the original
1182 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1183 if (!is_pipe2) {
1184 #if defined(TARGET_ALPHA)
1185 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1186 return host_pipe[0];
1187 #elif defined(TARGET_MIPS)
1188 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1189 return host_pipe[0];
1190 #elif defined(TARGET_SH4)
1191 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1192 return host_pipe[0];
1193 #endif
1196 if (put_user_s32(host_pipe[0], pipedes)
1197 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1198 return -TARGET_EFAULT;
1199 return get_errno(ret);
1202 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1203 abi_ulong target_addr,
1204 socklen_t len)
1206 struct target_ip_mreqn *target_smreqn;
1208 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1209 if (!target_smreqn)
1210 return -TARGET_EFAULT;
1211 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1212 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1213 if (len == sizeof(struct target_ip_mreqn))
1214 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1215 unlock_user(target_smreqn, target_addr, 0);
1217 return 0;
1220 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1221 abi_ulong target_addr,
1222 socklen_t len)
1224 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1225 sa_family_t sa_family;
1226 struct target_sockaddr *target_saddr;
1228 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1229 if (!target_saddr)
1230 return -TARGET_EFAULT;
1232 sa_family = tswap16(target_saddr->sa_family);
1234 /* Oops. The caller might send a incomplete sun_path; sun_path
1235 * must be terminated by \0 (see the manual page), but
1236 * unfortunately it is quite common to specify sockaddr_un
1237 * length as "strlen(x->sun_path)" while it should be
1238 * "strlen(...) + 1". We'll fix that here if needed.
1239 * Linux kernel has a similar feature.
1242 if (sa_family == AF_UNIX) {
1243 if (len < unix_maxlen && len > 0) {
1244 char *cp = (char*)target_saddr;
1246 if ( cp[len-1] && !cp[len] )
1247 len++;
1249 if (len > unix_maxlen)
1250 len = unix_maxlen;
1253 memcpy(addr, target_saddr, len);
1254 addr->sa_family = sa_family;
1255 unlock_user(target_saddr, target_addr, 0);
1257 return 0;
1260 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1261 struct sockaddr *addr,
1262 socklen_t len)
1264 struct target_sockaddr *target_saddr;
1266 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1267 if (!target_saddr)
1268 return -TARGET_EFAULT;
1269 memcpy(target_saddr, addr, len);
1270 target_saddr->sa_family = tswap16(addr->sa_family);
1271 unlock_user(target_saddr, target_addr, len);
1273 return 0;
1276 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1277 struct target_msghdr *target_msgh)
1279 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1280 abi_long msg_controllen;
1281 abi_ulong target_cmsg_addr;
1282 struct target_cmsghdr *target_cmsg;
1283 socklen_t space = 0;
1285 msg_controllen = tswapal(target_msgh->msg_controllen);
1286 if (msg_controllen < sizeof (struct target_cmsghdr))
1287 goto the_end;
1288 target_cmsg_addr = tswapal(target_msgh->msg_control);
1289 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1290 if (!target_cmsg)
1291 return -TARGET_EFAULT;
1293 while (cmsg && target_cmsg) {
1294 void *data = CMSG_DATA(cmsg);
1295 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1297 int len = tswapal(target_cmsg->cmsg_len)
1298 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1300 space += CMSG_SPACE(len);
1301 if (space > msgh->msg_controllen) {
1302 space -= CMSG_SPACE(len);
1303 gemu_log("Host cmsg overflow\n");
1304 break;
1307 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1308 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1309 cmsg->cmsg_len = CMSG_LEN(len);
1311 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1312 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1313 memcpy(data, target_data, len);
1314 } else {
1315 int *fd = (int *)data;
1316 int *target_fd = (int *)target_data;
1317 int i, numfds = len / sizeof(int);
1319 for (i = 0; i < numfds; i++)
1320 fd[i] = tswap32(target_fd[i]);
1323 cmsg = CMSG_NXTHDR(msgh, cmsg);
1324 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1326 unlock_user(target_cmsg, target_cmsg_addr, 0);
1327 the_end:
1328 msgh->msg_controllen = space;
1329 return 0;
1332 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1333 struct msghdr *msgh)
1335 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1336 abi_long msg_controllen;
1337 abi_ulong target_cmsg_addr;
1338 struct target_cmsghdr *target_cmsg;
1339 socklen_t space = 0;
1341 msg_controllen = tswapal(target_msgh->msg_controllen);
1342 if (msg_controllen < sizeof (struct target_cmsghdr))
1343 goto the_end;
1344 target_cmsg_addr = tswapal(target_msgh->msg_control);
1345 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1346 if (!target_cmsg)
1347 return -TARGET_EFAULT;
1349 while (cmsg && target_cmsg) {
1350 void *data = CMSG_DATA(cmsg);
1351 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1353 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1355 space += TARGET_CMSG_SPACE(len);
1356 if (space > msg_controllen) {
1357 space -= TARGET_CMSG_SPACE(len);
1358 gemu_log("Target cmsg overflow\n");
1359 break;
1362 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1363 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1364 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1366 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1367 (cmsg->cmsg_type == SCM_RIGHTS)) {
1368 int *fd = (int *)data;
1369 int *target_fd = (int *)target_data;
1370 int i, numfds = len / sizeof(int);
1372 for (i = 0; i < numfds; i++)
1373 target_fd[i] = tswap32(fd[i]);
1374 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1375 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1376 (len == sizeof(struct timeval))) {
1377 /* copy struct timeval to target */
1378 struct timeval *tv = (struct timeval *)data;
1379 struct target_timeval *target_tv =
1380 (struct target_timeval *)target_data;
1382 target_tv->tv_sec = tswapal(tv->tv_sec);
1383 target_tv->tv_usec = tswapal(tv->tv_usec);
1384 } else {
1385 gemu_log("Unsupported ancillary data: %d/%d\n",
1386 cmsg->cmsg_level, cmsg->cmsg_type);
1387 memcpy(target_data, data, len);
1390 cmsg = CMSG_NXTHDR(msgh, cmsg);
1391 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1393 unlock_user(target_cmsg, target_cmsg_addr, space);
1394 the_end:
1395 target_msgh->msg_controllen = tswapal(space);
1396 return 0;
1399 /* do_setsockopt() Must return target values and target errnos. */
1400 static abi_long do_setsockopt(int sockfd, int level, int optname,
1401 abi_ulong optval_addr, socklen_t optlen)
1403 abi_long ret;
1404 int val;
1405 struct ip_mreqn *ip_mreq;
1406 struct ip_mreq_source *ip_mreq_source;
1408 switch(level) {
1409 case SOL_TCP:
1410 /* TCP options all take an 'int' value. */
1411 if (optlen < sizeof(uint32_t))
1412 return -TARGET_EINVAL;
1414 if (get_user_u32(val, optval_addr))
1415 return -TARGET_EFAULT;
1416 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1417 break;
1418 case SOL_IP:
1419 switch(optname) {
1420 case IP_TOS:
1421 case IP_TTL:
1422 case IP_HDRINCL:
1423 case IP_ROUTER_ALERT:
1424 case IP_RECVOPTS:
1425 case IP_RETOPTS:
1426 case IP_PKTINFO:
1427 case IP_MTU_DISCOVER:
1428 case IP_RECVERR:
1429 case IP_RECVTOS:
1430 #ifdef IP_FREEBIND
1431 case IP_FREEBIND:
1432 #endif
1433 case IP_MULTICAST_TTL:
1434 case IP_MULTICAST_LOOP:
1435 val = 0;
1436 if (optlen >= sizeof(uint32_t)) {
1437 if (get_user_u32(val, optval_addr))
1438 return -TARGET_EFAULT;
1439 } else if (optlen >= 1) {
1440 if (get_user_u8(val, optval_addr))
1441 return -TARGET_EFAULT;
1443 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1444 break;
1445 case IP_ADD_MEMBERSHIP:
1446 case IP_DROP_MEMBERSHIP:
1447 if (optlen < sizeof (struct target_ip_mreq) ||
1448 optlen > sizeof (struct target_ip_mreqn))
1449 return -TARGET_EINVAL;
1451 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1452 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1453 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1454 break;
1456 case IP_BLOCK_SOURCE:
1457 case IP_UNBLOCK_SOURCE:
1458 case IP_ADD_SOURCE_MEMBERSHIP:
1459 case IP_DROP_SOURCE_MEMBERSHIP:
1460 if (optlen != sizeof (struct target_ip_mreq_source))
1461 return -TARGET_EINVAL;
1463 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1464 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1465 unlock_user (ip_mreq_source, optval_addr, 0);
1466 break;
1468 default:
1469 goto unimplemented;
1471 break;
1472 case SOL_RAW:
1473 switch (optname) {
1474 case ICMP_FILTER:
1475 /* struct icmp_filter takes an u32 value */
1476 if (optlen < sizeof(uint32_t)) {
1477 return -TARGET_EINVAL;
1480 if (get_user_u32(val, optval_addr)) {
1481 return -TARGET_EFAULT;
1483 ret = get_errno(setsockopt(sockfd, level, optname,
1484 &val, sizeof(val)));
1485 break;
1487 default:
1488 goto unimplemented;
1490 break;
1491 case TARGET_SOL_SOCKET:
1492 switch (optname) {
1493 case TARGET_SO_RCVTIMEO:
1495 struct timeval tv;
1497 optname = SO_RCVTIMEO;
1499 set_timeout:
1500 if (optlen != sizeof(struct target_timeval)) {
1501 return -TARGET_EINVAL;
1504 if (copy_from_user_timeval(&tv, optval_addr)) {
1505 return -TARGET_EFAULT;
1508 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1509 &tv, sizeof(tv)));
1510 return ret;
1512 case TARGET_SO_SNDTIMEO:
1513 optname = SO_SNDTIMEO;
1514 goto set_timeout;
1515 /* Options with 'int' argument. */
1516 case TARGET_SO_DEBUG:
1517 optname = SO_DEBUG;
1518 break;
1519 case TARGET_SO_REUSEADDR:
1520 optname = SO_REUSEADDR;
1521 break;
1522 case TARGET_SO_TYPE:
1523 optname = SO_TYPE;
1524 break;
1525 case TARGET_SO_ERROR:
1526 optname = SO_ERROR;
1527 break;
1528 case TARGET_SO_DONTROUTE:
1529 optname = SO_DONTROUTE;
1530 break;
1531 case TARGET_SO_BROADCAST:
1532 optname = SO_BROADCAST;
1533 break;
1534 case TARGET_SO_SNDBUF:
1535 optname = SO_SNDBUF;
1536 break;
1537 case TARGET_SO_RCVBUF:
1538 optname = SO_RCVBUF;
1539 break;
1540 case TARGET_SO_KEEPALIVE:
1541 optname = SO_KEEPALIVE;
1542 break;
1543 case TARGET_SO_OOBINLINE:
1544 optname = SO_OOBINLINE;
1545 break;
1546 case TARGET_SO_NO_CHECK:
1547 optname = SO_NO_CHECK;
1548 break;
1549 case TARGET_SO_PRIORITY:
1550 optname = SO_PRIORITY;
1551 break;
1552 #ifdef SO_BSDCOMPAT
1553 case TARGET_SO_BSDCOMPAT:
1554 optname = SO_BSDCOMPAT;
1555 break;
1556 #endif
1557 case TARGET_SO_PASSCRED:
1558 optname = SO_PASSCRED;
1559 break;
1560 case TARGET_SO_TIMESTAMP:
1561 optname = SO_TIMESTAMP;
1562 break;
1563 case TARGET_SO_RCVLOWAT:
1564 optname = SO_RCVLOWAT;
1565 break;
1566 break;
1567 default:
1568 goto unimplemented;
1570 if (optlen < sizeof(uint32_t))
1571 return -TARGET_EINVAL;
1573 if (get_user_u32(val, optval_addr))
1574 return -TARGET_EFAULT;
1575 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1576 break;
1577 default:
1578 unimplemented:
1579 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1580 ret = -TARGET_ENOPROTOOPT;
1582 return ret;
1585 /* do_getsockopt() Must return target values and target errnos. */
1586 static abi_long do_getsockopt(int sockfd, int level, int optname,
1587 abi_ulong optval_addr, abi_ulong optlen)
1589 abi_long ret;
1590 int len, val;
1591 socklen_t lv;
1593 switch(level) {
1594 case TARGET_SOL_SOCKET:
1595 level = SOL_SOCKET;
1596 switch (optname) {
1597 /* These don't just return a single integer */
1598 case TARGET_SO_LINGER:
1599 case TARGET_SO_RCVTIMEO:
1600 case TARGET_SO_SNDTIMEO:
1601 case TARGET_SO_PEERNAME:
1602 goto unimplemented;
1603 case TARGET_SO_PEERCRED: {
1604 struct ucred cr;
1605 socklen_t crlen;
1606 struct target_ucred *tcr;
1608 if (get_user_u32(len, optlen)) {
1609 return -TARGET_EFAULT;
1611 if (len < 0) {
1612 return -TARGET_EINVAL;
1615 crlen = sizeof(cr);
1616 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1617 &cr, &crlen));
1618 if (ret < 0) {
1619 return ret;
1621 if (len > crlen) {
1622 len = crlen;
1624 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1625 return -TARGET_EFAULT;
1627 __put_user(cr.pid, &tcr->pid);
1628 __put_user(cr.uid, &tcr->uid);
1629 __put_user(cr.gid, &tcr->gid);
1630 unlock_user_struct(tcr, optval_addr, 1);
1631 if (put_user_u32(len, optlen)) {
1632 return -TARGET_EFAULT;
1634 break;
1636 /* Options with 'int' argument. */
1637 case TARGET_SO_DEBUG:
1638 optname = SO_DEBUG;
1639 goto int_case;
1640 case TARGET_SO_REUSEADDR:
1641 optname = SO_REUSEADDR;
1642 goto int_case;
1643 case TARGET_SO_TYPE:
1644 optname = SO_TYPE;
1645 goto int_case;
1646 case TARGET_SO_ERROR:
1647 optname = SO_ERROR;
1648 goto int_case;
1649 case TARGET_SO_DONTROUTE:
1650 optname = SO_DONTROUTE;
1651 goto int_case;
1652 case TARGET_SO_BROADCAST:
1653 optname = SO_BROADCAST;
1654 goto int_case;
1655 case TARGET_SO_SNDBUF:
1656 optname = SO_SNDBUF;
1657 goto int_case;
1658 case TARGET_SO_RCVBUF:
1659 optname = SO_RCVBUF;
1660 goto int_case;
1661 case TARGET_SO_KEEPALIVE:
1662 optname = SO_KEEPALIVE;
1663 goto int_case;
1664 case TARGET_SO_OOBINLINE:
1665 optname = SO_OOBINLINE;
1666 goto int_case;
1667 case TARGET_SO_NO_CHECK:
1668 optname = SO_NO_CHECK;
1669 goto int_case;
1670 case TARGET_SO_PRIORITY:
1671 optname = SO_PRIORITY;
1672 goto int_case;
1673 #ifdef SO_BSDCOMPAT
1674 case TARGET_SO_BSDCOMPAT:
1675 optname = SO_BSDCOMPAT;
1676 goto int_case;
1677 #endif
1678 case TARGET_SO_PASSCRED:
1679 optname = SO_PASSCRED;
1680 goto int_case;
1681 case TARGET_SO_TIMESTAMP:
1682 optname = SO_TIMESTAMP;
1683 goto int_case;
1684 case TARGET_SO_RCVLOWAT:
1685 optname = SO_RCVLOWAT;
1686 goto int_case;
1687 default:
1688 goto int_case;
1690 break;
1691 case SOL_TCP:
1692 /* TCP options all take an 'int' value. */
1693 int_case:
1694 if (get_user_u32(len, optlen))
1695 return -TARGET_EFAULT;
1696 if (len < 0)
1697 return -TARGET_EINVAL;
1698 lv = sizeof(lv);
1699 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1700 if (ret < 0)
1701 return ret;
1702 if (len > lv)
1703 len = lv;
1704 if (len == 4) {
1705 if (put_user_u32(val, optval_addr))
1706 return -TARGET_EFAULT;
1707 } else {
1708 if (put_user_u8(val, optval_addr))
1709 return -TARGET_EFAULT;
1711 if (put_user_u32(len, optlen))
1712 return -TARGET_EFAULT;
1713 break;
1714 case SOL_IP:
1715 switch(optname) {
1716 case IP_TOS:
1717 case IP_TTL:
1718 case IP_HDRINCL:
1719 case IP_ROUTER_ALERT:
1720 case IP_RECVOPTS:
1721 case IP_RETOPTS:
1722 case IP_PKTINFO:
1723 case IP_MTU_DISCOVER:
1724 case IP_RECVERR:
1725 case IP_RECVTOS:
1726 #ifdef IP_FREEBIND
1727 case IP_FREEBIND:
1728 #endif
1729 case IP_MULTICAST_TTL:
1730 case IP_MULTICAST_LOOP:
1731 if (get_user_u32(len, optlen))
1732 return -TARGET_EFAULT;
1733 if (len < 0)
1734 return -TARGET_EINVAL;
1735 lv = sizeof(lv);
1736 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1737 if (ret < 0)
1738 return ret;
1739 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1740 len = 1;
1741 if (put_user_u32(len, optlen)
1742 || put_user_u8(val, optval_addr))
1743 return -TARGET_EFAULT;
1744 } else {
1745 if (len > sizeof(int))
1746 len = sizeof(int);
1747 if (put_user_u32(len, optlen)
1748 || put_user_u32(val, optval_addr))
1749 return -TARGET_EFAULT;
1751 break;
1752 default:
1753 ret = -TARGET_ENOPROTOOPT;
1754 break;
1756 break;
1757 default:
1758 unimplemented:
1759 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1760 level, optname);
1761 ret = -TARGET_EOPNOTSUPP;
1762 break;
1764 return ret;
1767 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1768 int count, int copy)
1770 struct target_iovec *target_vec;
1771 struct iovec *vec;
1772 abi_ulong total_len, max_len;
1773 int i;
1775 if (count == 0) {
1776 errno = 0;
1777 return NULL;
1779 if (count > IOV_MAX) {
1780 errno = EINVAL;
1781 return NULL;
1784 vec = calloc(count, sizeof(struct iovec));
1785 if (vec == NULL) {
1786 errno = ENOMEM;
1787 return NULL;
1790 target_vec = lock_user(VERIFY_READ, target_addr,
1791 count * sizeof(struct target_iovec), 1);
1792 if (target_vec == NULL) {
1793 errno = EFAULT;
1794 goto fail2;
1797 /* ??? If host page size > target page size, this will result in a
1798 value larger than what we can actually support. */
1799 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1800 total_len = 0;
1802 for (i = 0; i < count; i++) {
1803 abi_ulong base = tswapal(target_vec[i].iov_base);
1804 abi_long len = tswapal(target_vec[i].iov_len);
1806 if (len < 0) {
1807 errno = EINVAL;
1808 goto fail;
1809 } else if (len == 0) {
1810 /* Zero length pointer is ignored. */
1811 vec[i].iov_base = 0;
1812 } else {
1813 vec[i].iov_base = lock_user(type, base, len, copy);
1814 if (!vec[i].iov_base) {
1815 errno = EFAULT;
1816 goto fail;
1818 if (len > max_len - total_len) {
1819 len = max_len - total_len;
1822 vec[i].iov_len = len;
1823 total_len += len;
1826 unlock_user(target_vec, target_addr, 0);
1827 return vec;
1829 fail:
1830 free(vec);
1831 fail2:
1832 unlock_user(target_vec, target_addr, 0);
1833 return NULL;
1836 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1837 int count, int copy)
1839 struct target_iovec *target_vec;
1840 int i;
1842 target_vec = lock_user(VERIFY_READ, target_addr,
1843 count * sizeof(struct target_iovec), 1);
1844 if (target_vec) {
1845 for (i = 0; i < count; i++) {
1846 abi_ulong base = tswapal(target_vec[i].iov_base);
1847 abi_long len = tswapal(target_vec[i].iov_base);
1848 if (len < 0) {
1849 break;
1851 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1853 unlock_user(target_vec, target_addr, 0);
1856 free(vec);
1859 /* do_socket() Must return target values and target errnos. */
1860 static abi_long do_socket(int domain, int type, int protocol)
1862 #if defined(TARGET_MIPS)
1863 switch(type) {
1864 case TARGET_SOCK_DGRAM:
1865 type = SOCK_DGRAM;
1866 break;
1867 case TARGET_SOCK_STREAM:
1868 type = SOCK_STREAM;
1869 break;
1870 case TARGET_SOCK_RAW:
1871 type = SOCK_RAW;
1872 break;
1873 case TARGET_SOCK_RDM:
1874 type = SOCK_RDM;
1875 break;
1876 case TARGET_SOCK_SEQPACKET:
1877 type = SOCK_SEQPACKET;
1878 break;
1879 case TARGET_SOCK_PACKET:
1880 type = SOCK_PACKET;
1881 break;
1883 #endif
1884 if (domain == PF_NETLINK)
1885 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1886 return get_errno(socket(domain, type, protocol));
1889 /* do_bind() Must return target values and target errnos. */
1890 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1891 socklen_t addrlen)
1893 void *addr;
1894 abi_long ret;
1896 if ((int)addrlen < 0) {
1897 return -TARGET_EINVAL;
1900 addr = alloca(addrlen+1);
1902 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1903 if (ret)
1904 return ret;
1906 return get_errno(bind(sockfd, addr, addrlen));
1909 /* do_connect() Must return target values and target errnos. */
1910 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1911 socklen_t addrlen)
1913 void *addr;
1914 abi_long ret;
1916 if ((int)addrlen < 0) {
1917 return -TARGET_EINVAL;
1920 addr = alloca(addrlen);
1922 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1923 if (ret)
1924 return ret;
1926 return get_errno(connect(sockfd, addr, addrlen));
1929 /* do_sendrecvmsg() Must return target values and target errnos. */
1930 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1931 int flags, int send)
1933 abi_long ret, len;
1934 struct target_msghdr *msgp;
1935 struct msghdr msg;
1936 int count;
1937 struct iovec *vec;
1938 abi_ulong target_vec;
1940 /* FIXME */
1941 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1942 msgp,
1943 target_msg,
1944 send ? 1 : 0))
1945 return -TARGET_EFAULT;
1946 if (msgp->msg_name) {
1947 msg.msg_namelen = tswap32(msgp->msg_namelen);
1948 msg.msg_name = alloca(msg.msg_namelen);
1949 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1950 msg.msg_namelen);
1951 if (ret) {
1952 goto out2;
1954 } else {
1955 msg.msg_name = NULL;
1956 msg.msg_namelen = 0;
1958 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1959 msg.msg_control = alloca(msg.msg_controllen);
1960 msg.msg_flags = tswap32(msgp->msg_flags);
1962 count = tswapal(msgp->msg_iovlen);
1963 target_vec = tswapal(msgp->msg_iov);
1964 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1965 target_vec, count, send);
1966 if (vec == NULL) {
1967 ret = -host_to_target_errno(errno);
1968 goto out2;
1970 msg.msg_iovlen = count;
1971 msg.msg_iov = vec;
1973 if (send) {
1974 ret = target_to_host_cmsg(&msg, msgp);
1975 if (ret == 0)
1976 ret = get_errno(sendmsg(fd, &msg, flags));
1977 } else {
1978 ret = get_errno(recvmsg(fd, &msg, flags));
1979 if (!is_error(ret)) {
1980 len = ret;
1981 ret = host_to_target_cmsg(msgp, &msg);
1982 if (!is_error(ret)) {
1983 msgp->msg_namelen = tswap32(msg.msg_namelen);
1984 if (msg.msg_name != NULL) {
1985 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1986 msg.msg_name, msg.msg_namelen);
1987 if (ret) {
1988 goto out;
1992 ret = len;
1997 out:
1998 unlock_iovec(vec, target_vec, count, !send);
1999 out2:
2000 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2001 return ret;
2004 /* do_accept() Must return target values and target errnos. */
2005 static abi_long do_accept(int fd, abi_ulong target_addr,
2006 abi_ulong target_addrlen_addr)
2008 socklen_t addrlen;
2009 void *addr;
2010 abi_long ret;
2012 if (target_addr == 0)
2013 return get_errno(accept(fd, NULL, NULL));
2015 /* linux returns EINVAL if addrlen pointer is invalid */
2016 if (get_user_u32(addrlen, target_addrlen_addr))
2017 return -TARGET_EINVAL;
2019 if ((int)addrlen < 0) {
2020 return -TARGET_EINVAL;
2023 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2024 return -TARGET_EINVAL;
2026 addr = alloca(addrlen);
2028 ret = get_errno(accept(fd, addr, &addrlen));
2029 if (!is_error(ret)) {
2030 host_to_target_sockaddr(target_addr, addr, addrlen);
2031 if (put_user_u32(addrlen, target_addrlen_addr))
2032 ret = -TARGET_EFAULT;
2034 return ret;
2037 /* do_getpeername() Must return target values and target errnos. */
2038 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2039 abi_ulong target_addrlen_addr)
2041 socklen_t addrlen;
2042 void *addr;
2043 abi_long ret;
2045 if (get_user_u32(addrlen, target_addrlen_addr))
2046 return -TARGET_EFAULT;
2048 if ((int)addrlen < 0) {
2049 return -TARGET_EINVAL;
2052 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2053 return -TARGET_EFAULT;
2055 addr = alloca(addrlen);
2057 ret = get_errno(getpeername(fd, addr, &addrlen));
2058 if (!is_error(ret)) {
2059 host_to_target_sockaddr(target_addr, addr, addrlen);
2060 if (put_user_u32(addrlen, target_addrlen_addr))
2061 ret = -TARGET_EFAULT;
2063 return ret;
2066 /* do_getsockname() Must return target values and target errnos. */
2067 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2068 abi_ulong target_addrlen_addr)
2070 socklen_t addrlen;
2071 void *addr;
2072 abi_long ret;
2074 if (get_user_u32(addrlen, target_addrlen_addr))
2075 return -TARGET_EFAULT;
2077 if ((int)addrlen < 0) {
2078 return -TARGET_EINVAL;
2081 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2082 return -TARGET_EFAULT;
2084 addr = alloca(addrlen);
2086 ret = get_errno(getsockname(fd, addr, &addrlen));
2087 if (!is_error(ret)) {
2088 host_to_target_sockaddr(target_addr, addr, addrlen);
2089 if (put_user_u32(addrlen, target_addrlen_addr))
2090 ret = -TARGET_EFAULT;
2092 return ret;
2095 /* do_socketpair() Must return target values and target errnos. */
2096 static abi_long do_socketpair(int domain, int type, int protocol,
2097 abi_ulong target_tab_addr)
2099 int tab[2];
2100 abi_long ret;
2102 ret = get_errno(socketpair(domain, type, protocol, tab));
2103 if (!is_error(ret)) {
2104 if (put_user_s32(tab[0], target_tab_addr)
2105 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2106 ret = -TARGET_EFAULT;
2108 return ret;
2111 /* do_sendto() Must return target values and target errnos. */
2112 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2113 abi_ulong target_addr, socklen_t addrlen)
2115 void *addr;
2116 void *host_msg;
2117 abi_long ret;
2119 if ((int)addrlen < 0) {
2120 return -TARGET_EINVAL;
2123 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2124 if (!host_msg)
2125 return -TARGET_EFAULT;
2126 if (target_addr) {
2127 addr = alloca(addrlen);
2128 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2129 if (ret) {
2130 unlock_user(host_msg, msg, 0);
2131 return ret;
2133 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2134 } else {
2135 ret = get_errno(send(fd, host_msg, len, flags));
2137 unlock_user(host_msg, msg, 0);
2138 return ret;
2141 /* do_recvfrom() Must return target values and target errnos. */
2142 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2143 abi_ulong target_addr,
2144 abi_ulong target_addrlen)
2146 socklen_t addrlen;
2147 void *addr;
2148 void *host_msg;
2149 abi_long ret;
2151 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2152 if (!host_msg)
2153 return -TARGET_EFAULT;
2154 if (target_addr) {
2155 if (get_user_u32(addrlen, target_addrlen)) {
2156 ret = -TARGET_EFAULT;
2157 goto fail;
2159 if ((int)addrlen < 0) {
2160 ret = -TARGET_EINVAL;
2161 goto fail;
2163 addr = alloca(addrlen);
2164 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2165 } else {
2166 addr = NULL; /* To keep compiler quiet. */
2167 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2169 if (!is_error(ret)) {
2170 if (target_addr) {
2171 host_to_target_sockaddr(target_addr, addr, addrlen);
2172 if (put_user_u32(addrlen, target_addrlen)) {
2173 ret = -TARGET_EFAULT;
2174 goto fail;
2177 unlock_user(host_msg, msg, len);
2178 } else {
2179 fail:
2180 unlock_user(host_msg, msg, 0);
2182 return ret;
2185 #ifdef TARGET_NR_socketcall
2186 /* do_socketcall() Must return target values and target errnos. */
2187 static abi_long do_socketcall(int num, abi_ulong vptr)
2189 abi_long ret;
2190 const int n = sizeof(abi_ulong);
2192 switch(num) {
2193 case SOCKOP_socket:
2195 abi_ulong domain, type, protocol;
2197 if (get_user_ual(domain, vptr)
2198 || get_user_ual(type, vptr + n)
2199 || get_user_ual(protocol, vptr + 2 * n))
2200 return -TARGET_EFAULT;
2202 ret = do_socket(domain, type, protocol);
2204 break;
2205 case SOCKOP_bind:
2207 abi_ulong sockfd;
2208 abi_ulong target_addr;
2209 socklen_t addrlen;
2211 if (get_user_ual(sockfd, vptr)
2212 || get_user_ual(target_addr, vptr + n)
2213 || get_user_ual(addrlen, vptr + 2 * n))
2214 return -TARGET_EFAULT;
2216 ret = do_bind(sockfd, target_addr, addrlen);
2218 break;
2219 case SOCKOP_connect:
2221 abi_ulong sockfd;
2222 abi_ulong target_addr;
2223 socklen_t addrlen;
2225 if (get_user_ual(sockfd, vptr)
2226 || get_user_ual(target_addr, vptr + n)
2227 || get_user_ual(addrlen, vptr + 2 * n))
2228 return -TARGET_EFAULT;
2230 ret = do_connect(sockfd, target_addr, addrlen);
2232 break;
2233 case SOCKOP_listen:
2235 abi_ulong sockfd, backlog;
2237 if (get_user_ual(sockfd, vptr)
2238 || get_user_ual(backlog, vptr + n))
2239 return -TARGET_EFAULT;
2241 ret = get_errno(listen(sockfd, backlog));
2243 break;
2244 case SOCKOP_accept:
2246 abi_ulong sockfd;
2247 abi_ulong target_addr, target_addrlen;
2249 if (get_user_ual(sockfd, vptr)
2250 || get_user_ual(target_addr, vptr + n)
2251 || get_user_ual(target_addrlen, vptr + 2 * n))
2252 return -TARGET_EFAULT;
2254 ret = do_accept(sockfd, target_addr, target_addrlen);
2256 break;
2257 case SOCKOP_getsockname:
2259 abi_ulong sockfd;
2260 abi_ulong target_addr, target_addrlen;
2262 if (get_user_ual(sockfd, vptr)
2263 || get_user_ual(target_addr, vptr + n)
2264 || get_user_ual(target_addrlen, vptr + 2 * n))
2265 return -TARGET_EFAULT;
2267 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2269 break;
2270 case SOCKOP_getpeername:
2272 abi_ulong sockfd;
2273 abi_ulong target_addr, target_addrlen;
2275 if (get_user_ual(sockfd, vptr)
2276 || get_user_ual(target_addr, vptr + n)
2277 || get_user_ual(target_addrlen, vptr + 2 * n))
2278 return -TARGET_EFAULT;
2280 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2282 break;
2283 case SOCKOP_socketpair:
2285 abi_ulong domain, type, protocol;
2286 abi_ulong tab;
2288 if (get_user_ual(domain, vptr)
2289 || get_user_ual(type, vptr + n)
2290 || get_user_ual(protocol, vptr + 2 * n)
2291 || get_user_ual(tab, vptr + 3 * n))
2292 return -TARGET_EFAULT;
2294 ret = do_socketpair(domain, type, protocol, tab);
2296 break;
2297 case SOCKOP_send:
2299 abi_ulong sockfd;
2300 abi_ulong msg;
2301 size_t len;
2302 abi_ulong flags;
2304 if (get_user_ual(sockfd, vptr)
2305 || get_user_ual(msg, vptr + n)
2306 || get_user_ual(len, vptr + 2 * n)
2307 || get_user_ual(flags, vptr + 3 * n))
2308 return -TARGET_EFAULT;
2310 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2312 break;
2313 case SOCKOP_recv:
2315 abi_ulong sockfd;
2316 abi_ulong msg;
2317 size_t len;
2318 abi_ulong flags;
2320 if (get_user_ual(sockfd, vptr)
2321 || get_user_ual(msg, vptr + n)
2322 || get_user_ual(len, vptr + 2 * n)
2323 || get_user_ual(flags, vptr + 3 * n))
2324 return -TARGET_EFAULT;
2326 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2328 break;
2329 case SOCKOP_sendto:
2331 abi_ulong sockfd;
2332 abi_ulong msg;
2333 size_t len;
2334 abi_ulong flags;
2335 abi_ulong addr;
2336 socklen_t addrlen;
2338 if (get_user_ual(sockfd, vptr)
2339 || get_user_ual(msg, vptr + n)
2340 || get_user_ual(len, vptr + 2 * n)
2341 || get_user_ual(flags, vptr + 3 * n)
2342 || get_user_ual(addr, vptr + 4 * n)
2343 || get_user_ual(addrlen, vptr + 5 * n))
2344 return -TARGET_EFAULT;
2346 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2348 break;
2349 case SOCKOP_recvfrom:
2351 abi_ulong sockfd;
2352 abi_ulong msg;
2353 size_t len;
2354 abi_ulong flags;
2355 abi_ulong addr;
2356 socklen_t addrlen;
2358 if (get_user_ual(sockfd, vptr)
2359 || get_user_ual(msg, vptr + n)
2360 || get_user_ual(len, vptr + 2 * n)
2361 || get_user_ual(flags, vptr + 3 * n)
2362 || get_user_ual(addr, vptr + 4 * n)
2363 || get_user_ual(addrlen, vptr + 5 * n))
2364 return -TARGET_EFAULT;
2366 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2368 break;
2369 case SOCKOP_shutdown:
2371 abi_ulong sockfd, how;
2373 if (get_user_ual(sockfd, vptr)
2374 || get_user_ual(how, vptr + n))
2375 return -TARGET_EFAULT;
2377 ret = get_errno(shutdown(sockfd, how));
2379 break;
2380 case SOCKOP_sendmsg:
2381 case SOCKOP_recvmsg:
2383 abi_ulong fd;
2384 abi_ulong target_msg;
2385 abi_ulong flags;
2387 if (get_user_ual(fd, vptr)
2388 || get_user_ual(target_msg, vptr + n)
2389 || get_user_ual(flags, vptr + 2 * n))
2390 return -TARGET_EFAULT;
2392 ret = do_sendrecvmsg(fd, target_msg, flags,
2393 (num == SOCKOP_sendmsg));
2395 break;
2396 case SOCKOP_setsockopt:
2398 abi_ulong sockfd;
2399 abi_ulong level;
2400 abi_ulong optname;
2401 abi_ulong optval;
2402 socklen_t optlen;
2404 if (get_user_ual(sockfd, vptr)
2405 || get_user_ual(level, vptr + n)
2406 || get_user_ual(optname, vptr + 2 * n)
2407 || get_user_ual(optval, vptr + 3 * n)
2408 || get_user_ual(optlen, vptr + 4 * n))
2409 return -TARGET_EFAULT;
2411 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2413 break;
2414 case SOCKOP_getsockopt:
2416 abi_ulong sockfd;
2417 abi_ulong level;
2418 abi_ulong optname;
2419 abi_ulong optval;
2420 socklen_t optlen;
2422 if (get_user_ual(sockfd, vptr)
2423 || get_user_ual(level, vptr + n)
2424 || get_user_ual(optname, vptr + 2 * n)
2425 || get_user_ual(optval, vptr + 3 * n)
2426 || get_user_ual(optlen, vptr + 4 * n))
2427 return -TARGET_EFAULT;
2429 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2431 break;
2432 default:
2433 gemu_log("Unsupported socketcall: %d\n", num);
2434 ret = -TARGET_ENOSYS;
2435 break;
2437 return ret;
2439 #endif
2441 #define N_SHM_REGIONS 32
2443 static struct shm_region {
2444 abi_ulong start;
2445 abi_ulong size;
2446 } shm_regions[N_SHM_REGIONS];
2448 struct target_ipc_perm
2450 abi_long __key;
2451 abi_ulong uid;
2452 abi_ulong gid;
2453 abi_ulong cuid;
2454 abi_ulong cgid;
2455 unsigned short int mode;
2456 unsigned short int __pad1;
2457 unsigned short int __seq;
2458 unsigned short int __pad2;
2459 abi_ulong __unused1;
2460 abi_ulong __unused2;
2463 struct target_semid_ds
2465 struct target_ipc_perm sem_perm;
2466 abi_ulong sem_otime;
2467 abi_ulong __unused1;
2468 abi_ulong sem_ctime;
2469 abi_ulong __unused2;
2470 abi_ulong sem_nsems;
2471 abi_ulong __unused3;
2472 abi_ulong __unused4;
2475 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2476 abi_ulong target_addr)
2478 struct target_ipc_perm *target_ip;
2479 struct target_semid_ds *target_sd;
2481 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2482 return -TARGET_EFAULT;
2483 target_ip = &(target_sd->sem_perm);
2484 host_ip->__key = tswapal(target_ip->__key);
2485 host_ip->uid = tswapal(target_ip->uid);
2486 host_ip->gid = tswapal(target_ip->gid);
2487 host_ip->cuid = tswapal(target_ip->cuid);
2488 host_ip->cgid = tswapal(target_ip->cgid);
2489 host_ip->mode = tswap16(target_ip->mode);
2490 unlock_user_struct(target_sd, target_addr, 0);
2491 return 0;
2494 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2495 struct ipc_perm *host_ip)
2497 struct target_ipc_perm *target_ip;
2498 struct target_semid_ds *target_sd;
2500 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2501 return -TARGET_EFAULT;
2502 target_ip = &(target_sd->sem_perm);
2503 target_ip->__key = tswapal(host_ip->__key);
2504 target_ip->uid = tswapal(host_ip->uid);
2505 target_ip->gid = tswapal(host_ip->gid);
2506 target_ip->cuid = tswapal(host_ip->cuid);
2507 target_ip->cgid = tswapal(host_ip->cgid);
2508 target_ip->mode = tswap16(host_ip->mode);
2509 unlock_user_struct(target_sd, target_addr, 1);
2510 return 0;
2513 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2514 abi_ulong target_addr)
2516 struct target_semid_ds *target_sd;
2518 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2519 return -TARGET_EFAULT;
2520 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2521 return -TARGET_EFAULT;
2522 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2523 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2524 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2525 unlock_user_struct(target_sd, target_addr, 0);
2526 return 0;
2529 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2530 struct semid_ds *host_sd)
2532 struct target_semid_ds *target_sd;
2534 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2535 return -TARGET_EFAULT;
2536 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2537 return -TARGET_EFAULT;
2538 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2539 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2540 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2541 unlock_user_struct(target_sd, target_addr, 1);
2542 return 0;
2545 struct target_seminfo {
2546 int semmap;
2547 int semmni;
2548 int semmns;
2549 int semmnu;
2550 int semmsl;
2551 int semopm;
2552 int semume;
2553 int semusz;
2554 int semvmx;
2555 int semaem;
2558 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2559 struct seminfo *host_seminfo)
2561 struct target_seminfo *target_seminfo;
2562 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2563 return -TARGET_EFAULT;
2564 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2565 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2566 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2567 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2568 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2569 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2570 __put_user(host_seminfo->semume, &target_seminfo->semume);
2571 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2572 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2573 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2574 unlock_user_struct(target_seminfo, target_addr, 1);
2575 return 0;
2578 union semun {
2579 int val;
2580 struct semid_ds *buf;
2581 unsigned short *array;
2582 struct seminfo *__buf;
2585 union target_semun {
2586 int val;
2587 abi_ulong buf;
2588 abi_ulong array;
2589 abi_ulong __buf;
2592 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2593 abi_ulong target_addr)
2595 int nsems;
2596 unsigned short *array;
2597 union semun semun;
2598 struct semid_ds semid_ds;
2599 int i, ret;
2601 semun.buf = &semid_ds;
2603 ret = semctl(semid, 0, IPC_STAT, semun);
2604 if (ret == -1)
2605 return get_errno(ret);
2607 nsems = semid_ds.sem_nsems;
2609 *host_array = malloc(nsems*sizeof(unsigned short));
2610 array = lock_user(VERIFY_READ, target_addr,
2611 nsems*sizeof(unsigned short), 1);
2612 if (!array)
2613 return -TARGET_EFAULT;
2615 for(i=0; i<nsems; i++) {
2616 __get_user((*host_array)[i], &array[i]);
2618 unlock_user(array, target_addr, 0);
2620 return 0;
2623 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2624 unsigned short **host_array)
2626 int nsems;
2627 unsigned short *array;
2628 union semun semun;
2629 struct semid_ds semid_ds;
2630 int i, ret;
2632 semun.buf = &semid_ds;
2634 ret = semctl(semid, 0, IPC_STAT, semun);
2635 if (ret == -1)
2636 return get_errno(ret);
2638 nsems = semid_ds.sem_nsems;
2640 array = lock_user(VERIFY_WRITE, target_addr,
2641 nsems*sizeof(unsigned short), 0);
2642 if (!array)
2643 return -TARGET_EFAULT;
2645 for(i=0; i<nsems; i++) {
2646 __put_user((*host_array)[i], &array[i]);
2648 free(*host_array);
2649 unlock_user(array, target_addr, 1);
2651 return 0;
2654 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2655 union target_semun target_su)
2657 union semun arg;
2658 struct semid_ds dsarg;
2659 unsigned short *array = NULL;
2660 struct seminfo seminfo;
2661 abi_long ret = -TARGET_EINVAL;
2662 abi_long err;
2663 cmd &= 0xff;
2665 switch( cmd ) {
2666 case GETVAL:
2667 case SETVAL:
2668 arg.val = tswap32(target_su.val);
2669 ret = get_errno(semctl(semid, semnum, cmd, arg));
2670 target_su.val = tswap32(arg.val);
2671 break;
2672 case GETALL:
2673 case SETALL:
2674 err = target_to_host_semarray(semid, &array, target_su.array);
2675 if (err)
2676 return err;
2677 arg.array = array;
2678 ret = get_errno(semctl(semid, semnum, cmd, arg));
2679 err = host_to_target_semarray(semid, target_su.array, &array);
2680 if (err)
2681 return err;
2682 break;
2683 case IPC_STAT:
2684 case IPC_SET:
2685 case SEM_STAT:
2686 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2687 if (err)
2688 return err;
2689 arg.buf = &dsarg;
2690 ret = get_errno(semctl(semid, semnum, cmd, arg));
2691 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2692 if (err)
2693 return err;
2694 break;
2695 case IPC_INFO:
2696 case SEM_INFO:
2697 arg.__buf = &seminfo;
2698 ret = get_errno(semctl(semid, semnum, cmd, arg));
2699 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2700 if (err)
2701 return err;
2702 break;
2703 case IPC_RMID:
2704 case GETPID:
2705 case GETNCNT:
2706 case GETZCNT:
2707 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2708 break;
2711 return ret;
2714 struct target_sembuf {
2715 unsigned short sem_num;
2716 short sem_op;
2717 short sem_flg;
2720 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2721 abi_ulong target_addr,
2722 unsigned nsops)
2724 struct target_sembuf *target_sembuf;
2725 int i;
2727 target_sembuf = lock_user(VERIFY_READ, target_addr,
2728 nsops*sizeof(struct target_sembuf), 1);
2729 if (!target_sembuf)
2730 return -TARGET_EFAULT;
2732 for(i=0; i<nsops; i++) {
2733 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2734 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2735 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2738 unlock_user(target_sembuf, target_addr, 0);
2740 return 0;
2743 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2745 struct sembuf sops[nsops];
2747 if (target_to_host_sembuf(sops, ptr, nsops))
2748 return -TARGET_EFAULT;
2750 return semop(semid, sops, nsops);
2753 struct target_msqid_ds
2755 struct target_ipc_perm msg_perm;
2756 abi_ulong msg_stime;
2757 #if TARGET_ABI_BITS == 32
2758 abi_ulong __unused1;
2759 #endif
2760 abi_ulong msg_rtime;
2761 #if TARGET_ABI_BITS == 32
2762 abi_ulong __unused2;
2763 #endif
2764 abi_ulong msg_ctime;
2765 #if TARGET_ABI_BITS == 32
2766 abi_ulong __unused3;
2767 #endif
2768 abi_ulong __msg_cbytes;
2769 abi_ulong msg_qnum;
2770 abi_ulong msg_qbytes;
2771 abi_ulong msg_lspid;
2772 abi_ulong msg_lrpid;
2773 abi_ulong __unused4;
2774 abi_ulong __unused5;
2777 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2778 abi_ulong target_addr)
2780 struct target_msqid_ds *target_md;
2782 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2783 return -TARGET_EFAULT;
2784 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2785 return -TARGET_EFAULT;
2786 host_md->msg_stime = tswapal(target_md->msg_stime);
2787 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2788 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2789 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2790 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2791 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2792 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2793 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2794 unlock_user_struct(target_md, target_addr, 0);
2795 return 0;
2798 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2799 struct msqid_ds *host_md)
2801 struct target_msqid_ds *target_md;
2803 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2804 return -TARGET_EFAULT;
2805 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2806 return -TARGET_EFAULT;
2807 target_md->msg_stime = tswapal(host_md->msg_stime);
2808 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2809 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2810 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2811 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2812 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2813 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2814 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2815 unlock_user_struct(target_md, target_addr, 1);
2816 return 0;
2819 struct target_msginfo {
2820 int msgpool;
2821 int msgmap;
2822 int msgmax;
2823 int msgmnb;
2824 int msgmni;
2825 int msgssz;
2826 int msgtql;
2827 unsigned short int msgseg;
2830 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2831 struct msginfo *host_msginfo)
2833 struct target_msginfo *target_msginfo;
2834 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2835 return -TARGET_EFAULT;
2836 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2837 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2838 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2839 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2840 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2841 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2842 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2843 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2844 unlock_user_struct(target_msginfo, target_addr, 1);
2845 return 0;
2848 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2850 struct msqid_ds dsarg;
2851 struct msginfo msginfo;
2852 abi_long ret = -TARGET_EINVAL;
2854 cmd &= 0xff;
2856 switch (cmd) {
2857 case IPC_STAT:
2858 case IPC_SET:
2859 case MSG_STAT:
2860 if (target_to_host_msqid_ds(&dsarg,ptr))
2861 return -TARGET_EFAULT;
2862 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2863 if (host_to_target_msqid_ds(ptr,&dsarg))
2864 return -TARGET_EFAULT;
2865 break;
2866 case IPC_RMID:
2867 ret = get_errno(msgctl(msgid, cmd, NULL));
2868 break;
2869 case IPC_INFO:
2870 case MSG_INFO:
2871 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2872 if (host_to_target_msginfo(ptr, &msginfo))
2873 return -TARGET_EFAULT;
2874 break;
2877 return ret;
2880 struct target_msgbuf {
2881 abi_long mtype;
2882 char mtext[1];
2885 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2886 unsigned int msgsz, int msgflg)
2888 struct target_msgbuf *target_mb;
2889 struct msgbuf *host_mb;
2890 abi_long ret = 0;
2892 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2893 return -TARGET_EFAULT;
2894 host_mb = malloc(msgsz+sizeof(long));
2895 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2896 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2897 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2898 free(host_mb);
2899 unlock_user_struct(target_mb, msgp, 0);
2901 return ret;
2904 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2905 unsigned int msgsz, abi_long msgtyp,
2906 int msgflg)
2908 struct target_msgbuf *target_mb;
2909 char *target_mtext;
2910 struct msgbuf *host_mb;
2911 abi_long ret = 0;
2913 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2914 return -TARGET_EFAULT;
2916 host_mb = g_malloc(msgsz+sizeof(long));
2917 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2919 if (ret > 0) {
2920 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2921 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2922 if (!target_mtext) {
2923 ret = -TARGET_EFAULT;
2924 goto end;
2926 memcpy(target_mb->mtext, host_mb->mtext, ret);
2927 unlock_user(target_mtext, target_mtext_addr, ret);
2930 target_mb->mtype = tswapal(host_mb->mtype);
2932 end:
2933 if (target_mb)
2934 unlock_user_struct(target_mb, msgp, 1);
2935 g_free(host_mb);
2936 return ret;
2939 struct target_shmid_ds
2941 struct target_ipc_perm shm_perm;
2942 abi_ulong shm_segsz;
2943 abi_ulong shm_atime;
2944 #if TARGET_ABI_BITS == 32
2945 abi_ulong __unused1;
2946 #endif
2947 abi_ulong shm_dtime;
2948 #if TARGET_ABI_BITS == 32
2949 abi_ulong __unused2;
2950 #endif
2951 abi_ulong shm_ctime;
2952 #if TARGET_ABI_BITS == 32
2953 abi_ulong __unused3;
2954 #endif
2955 int shm_cpid;
2956 int shm_lpid;
2957 abi_ulong shm_nattch;
2958 unsigned long int __unused4;
2959 unsigned long int __unused5;
2962 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2963 abi_ulong target_addr)
2965 struct target_shmid_ds *target_sd;
2967 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2968 return -TARGET_EFAULT;
2969 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2970 return -TARGET_EFAULT;
2971 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2972 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2973 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2974 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2975 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2976 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2977 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2978 unlock_user_struct(target_sd, target_addr, 0);
2979 return 0;
2982 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2983 struct shmid_ds *host_sd)
2985 struct target_shmid_ds *target_sd;
2987 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2988 return -TARGET_EFAULT;
2989 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2990 return -TARGET_EFAULT;
2991 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2992 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2993 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2994 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2995 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2996 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2997 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2998 unlock_user_struct(target_sd, target_addr, 1);
2999 return 0;
3002 struct target_shminfo {
3003 abi_ulong shmmax;
3004 abi_ulong shmmin;
3005 abi_ulong shmmni;
3006 abi_ulong shmseg;
3007 abi_ulong shmall;
3010 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3011 struct shminfo *host_shminfo)
3013 struct target_shminfo *target_shminfo;
3014 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3015 return -TARGET_EFAULT;
3016 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3017 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3018 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3019 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3020 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3021 unlock_user_struct(target_shminfo, target_addr, 1);
3022 return 0;
3025 struct target_shm_info {
3026 int used_ids;
3027 abi_ulong shm_tot;
3028 abi_ulong shm_rss;
3029 abi_ulong shm_swp;
3030 abi_ulong swap_attempts;
3031 abi_ulong swap_successes;
3034 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3035 struct shm_info *host_shm_info)
3037 struct target_shm_info *target_shm_info;
3038 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3039 return -TARGET_EFAULT;
3040 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3041 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3042 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3043 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3044 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3045 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3046 unlock_user_struct(target_shm_info, target_addr, 1);
3047 return 0;
3050 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3052 struct shmid_ds dsarg;
3053 struct shminfo shminfo;
3054 struct shm_info shm_info;
3055 abi_long ret = -TARGET_EINVAL;
3057 cmd &= 0xff;
3059 switch(cmd) {
3060 case IPC_STAT:
3061 case IPC_SET:
3062 case SHM_STAT:
3063 if (target_to_host_shmid_ds(&dsarg, buf))
3064 return -TARGET_EFAULT;
3065 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3066 if (host_to_target_shmid_ds(buf, &dsarg))
3067 return -TARGET_EFAULT;
3068 break;
3069 case IPC_INFO:
3070 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3071 if (host_to_target_shminfo(buf, &shminfo))
3072 return -TARGET_EFAULT;
3073 break;
3074 case SHM_INFO:
3075 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3076 if (host_to_target_shm_info(buf, &shm_info))
3077 return -TARGET_EFAULT;
3078 break;
3079 case IPC_RMID:
3080 case SHM_LOCK:
3081 case SHM_UNLOCK:
3082 ret = get_errno(shmctl(shmid, cmd, NULL));
3083 break;
3086 return ret;
3089 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3091 abi_long raddr;
3092 void *host_raddr;
3093 struct shmid_ds shm_info;
3094 int i,ret;
3096 /* find out the length of the shared memory segment */
3097 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3098 if (is_error(ret)) {
3099 /* can't get length, bail out */
3100 return ret;
3103 mmap_lock();
3105 if (shmaddr)
3106 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3107 else {
3108 abi_ulong mmap_start;
3110 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3112 if (mmap_start == -1) {
3113 errno = ENOMEM;
3114 host_raddr = (void *)-1;
3115 } else
3116 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3119 if (host_raddr == (void *)-1) {
3120 mmap_unlock();
3121 return get_errno((long)host_raddr);
3123 raddr=h2g((unsigned long)host_raddr);
3125 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3126 PAGE_VALID | PAGE_READ |
3127 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3129 for (i = 0; i < N_SHM_REGIONS; i++) {
3130 if (shm_regions[i].start == 0) {
3131 shm_regions[i].start = raddr;
3132 shm_regions[i].size = shm_info.shm_segsz;
3133 break;
3137 mmap_unlock();
3138 return raddr;
3142 static inline abi_long do_shmdt(abi_ulong shmaddr)
3144 int i;
3146 for (i = 0; i < N_SHM_REGIONS; ++i) {
3147 if (shm_regions[i].start == shmaddr) {
3148 shm_regions[i].start = 0;
3149 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3150 break;
3154 return get_errno(shmdt(g2h(shmaddr)));
3157 #ifdef TARGET_NR_ipc
3158 /* ??? This only works with linear mappings. */
3159 /* do_ipc() must return target values and target errnos. */
3160 static abi_long do_ipc(unsigned int call, int first,
3161 int second, int third,
3162 abi_long ptr, abi_long fifth)
3164 int version;
3165 abi_long ret = 0;
3167 version = call >> 16;
3168 call &= 0xffff;
3170 switch (call) {
3171 case IPCOP_semop:
3172 ret = do_semop(first, ptr, second);
3173 break;
3175 case IPCOP_semget:
3176 ret = get_errno(semget(first, second, third));
3177 break;
3179 case IPCOP_semctl:
3180 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3181 break;
3183 case IPCOP_msgget:
3184 ret = get_errno(msgget(first, second));
3185 break;
3187 case IPCOP_msgsnd:
3188 ret = do_msgsnd(first, ptr, second, third);
3189 break;
3191 case IPCOP_msgctl:
3192 ret = do_msgctl(first, second, ptr);
3193 break;
3195 case IPCOP_msgrcv:
3196 switch (version) {
3197 case 0:
3199 struct target_ipc_kludge {
3200 abi_long msgp;
3201 abi_long msgtyp;
3202 } *tmp;
3204 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3205 ret = -TARGET_EFAULT;
3206 break;
3209 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3211 unlock_user_struct(tmp, ptr, 0);
3212 break;
3214 default:
3215 ret = do_msgrcv(first, ptr, second, fifth, third);
3217 break;
3219 case IPCOP_shmat:
3220 switch (version) {
3221 default:
3223 abi_ulong raddr;
3224 raddr = do_shmat(first, ptr, second);
3225 if (is_error(raddr))
3226 return get_errno(raddr);
3227 if (put_user_ual(raddr, third))
3228 return -TARGET_EFAULT;
3229 break;
3231 case 1:
3232 ret = -TARGET_EINVAL;
3233 break;
3235 break;
3236 case IPCOP_shmdt:
3237 ret = do_shmdt(ptr);
3238 break;
3240 case IPCOP_shmget:
3241 /* IPC_* flag values are the same on all linux platforms */
3242 ret = get_errno(shmget(first, second, third));
3243 break;
3245 /* IPC_* and SHM_* command values are the same on all linux platforms */
3246 case IPCOP_shmctl:
3247 ret = do_shmctl(first, second, third);
3248 break;
3249 default:
3250 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3251 ret = -TARGET_ENOSYS;
3252 break;
3254 return ret;
3256 #endif
3258 /* kernel structure types definitions */
3260 #define STRUCT(name, ...) STRUCT_ ## name,
3261 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3262 enum {
3263 #include "syscall_types.h"
3265 #undef STRUCT
3266 #undef STRUCT_SPECIAL
3268 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3269 #define STRUCT_SPECIAL(name)
3270 #include "syscall_types.h"
3271 #undef STRUCT
3272 #undef STRUCT_SPECIAL
3274 typedef struct IOCTLEntry IOCTLEntry;
3276 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3277 int fd, abi_long cmd, abi_long arg);
3279 struct IOCTLEntry {
3280 unsigned int target_cmd;
3281 unsigned int host_cmd;
3282 const char *name;
3283 int access;
3284 do_ioctl_fn *do_ioctl;
3285 const argtype arg_type[5];
3288 #define IOC_R 0x0001
3289 #define IOC_W 0x0002
3290 #define IOC_RW (IOC_R | IOC_W)
3292 #define MAX_STRUCT_SIZE 4096
3294 #ifdef CONFIG_FIEMAP
3295 /* So fiemap access checks don't overflow on 32 bit systems.
3296 * This is very slightly smaller than the limit imposed by
3297 * the underlying kernel.
3299 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3300 / sizeof(struct fiemap_extent))
3302 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3303 int fd, abi_long cmd, abi_long arg)
3305 /* The parameter for this ioctl is a struct fiemap followed
3306 * by an array of struct fiemap_extent whose size is set
3307 * in fiemap->fm_extent_count. The array is filled in by the
3308 * ioctl.
3310 int target_size_in, target_size_out;
3311 struct fiemap *fm;
3312 const argtype *arg_type = ie->arg_type;
3313 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3314 void *argptr, *p;
3315 abi_long ret;
3316 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3317 uint32_t outbufsz;
3318 int free_fm = 0;
3320 assert(arg_type[0] == TYPE_PTR);
3321 assert(ie->access == IOC_RW);
3322 arg_type++;
3323 target_size_in = thunk_type_size(arg_type, 0);
3324 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3325 if (!argptr) {
3326 return -TARGET_EFAULT;
3328 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3329 unlock_user(argptr, arg, 0);
3330 fm = (struct fiemap *)buf_temp;
3331 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3332 return -TARGET_EINVAL;
3335 outbufsz = sizeof (*fm) +
3336 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3338 if (outbufsz > MAX_STRUCT_SIZE) {
3339 /* We can't fit all the extents into the fixed size buffer.
3340 * Allocate one that is large enough and use it instead.
3342 fm = malloc(outbufsz);
3343 if (!fm) {
3344 return -TARGET_ENOMEM;
3346 memcpy(fm, buf_temp, sizeof(struct fiemap));
3347 free_fm = 1;
3349 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3350 if (!is_error(ret)) {
3351 target_size_out = target_size_in;
3352 /* An extent_count of 0 means we were only counting the extents
3353 * so there are no structs to copy
3355 if (fm->fm_extent_count != 0) {
3356 target_size_out += fm->fm_mapped_extents * extent_size;
3358 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3359 if (!argptr) {
3360 ret = -TARGET_EFAULT;
3361 } else {
3362 /* Convert the struct fiemap */
3363 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3364 if (fm->fm_extent_count != 0) {
3365 p = argptr + target_size_in;
3366 /* ...and then all the struct fiemap_extents */
3367 for (i = 0; i < fm->fm_mapped_extents; i++) {
3368 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3369 THUNK_TARGET);
3370 p += extent_size;
3373 unlock_user(argptr, arg, target_size_out);
3376 if (free_fm) {
3377 free(fm);
3379 return ret;
3381 #endif
3383 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3384 int fd, abi_long cmd, abi_long arg)
3386 const argtype *arg_type = ie->arg_type;
3387 int target_size;
3388 void *argptr;
3389 int ret;
3390 struct ifconf *host_ifconf;
3391 uint32_t outbufsz;
3392 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3393 int target_ifreq_size;
3394 int nb_ifreq;
3395 int free_buf = 0;
3396 int i;
3397 int target_ifc_len;
3398 abi_long target_ifc_buf;
3399 int host_ifc_len;
3400 char *host_ifc_buf;
3402 assert(arg_type[0] == TYPE_PTR);
3403 assert(ie->access == IOC_RW);
3405 arg_type++;
3406 target_size = thunk_type_size(arg_type, 0);
3408 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3409 if (!argptr)
3410 return -TARGET_EFAULT;
3411 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3412 unlock_user(argptr, arg, 0);
3414 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3415 target_ifc_len = host_ifconf->ifc_len;
3416 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3418 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3419 nb_ifreq = target_ifc_len / target_ifreq_size;
3420 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3422 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3423 if (outbufsz > MAX_STRUCT_SIZE) {
3424 /* We can't fit all the extents into the fixed size buffer.
3425 * Allocate one that is large enough and use it instead.
3427 host_ifconf = malloc(outbufsz);
3428 if (!host_ifconf) {
3429 return -TARGET_ENOMEM;
3431 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3432 free_buf = 1;
3434 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3436 host_ifconf->ifc_len = host_ifc_len;
3437 host_ifconf->ifc_buf = host_ifc_buf;
3439 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3440 if (!is_error(ret)) {
3441 /* convert host ifc_len to target ifc_len */
3443 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3444 target_ifc_len = nb_ifreq * target_ifreq_size;
3445 host_ifconf->ifc_len = target_ifc_len;
3447 /* restore target ifc_buf */
3449 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3451 /* copy struct ifconf to target user */
3453 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3454 if (!argptr)
3455 return -TARGET_EFAULT;
3456 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3457 unlock_user(argptr, arg, target_size);
3459 /* copy ifreq[] to target user */
3461 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3462 for (i = 0; i < nb_ifreq ; i++) {
3463 thunk_convert(argptr + i * target_ifreq_size,
3464 host_ifc_buf + i * sizeof(struct ifreq),
3465 ifreq_arg_type, THUNK_TARGET);
3467 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3470 if (free_buf) {
3471 free(host_ifconf);
3474 return ret;
3477 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3478 abi_long cmd, abi_long arg)
3480 void *argptr;
3481 struct dm_ioctl *host_dm;
3482 abi_long guest_data;
3483 uint32_t guest_data_size;
3484 int target_size;
3485 const argtype *arg_type = ie->arg_type;
3486 abi_long ret;
3487 void *big_buf = NULL;
3488 char *host_data;
3490 arg_type++;
3491 target_size = thunk_type_size(arg_type, 0);
3492 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3493 if (!argptr) {
3494 ret = -TARGET_EFAULT;
3495 goto out;
3497 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3498 unlock_user(argptr, arg, 0);
3500 /* buf_temp is too small, so fetch things into a bigger buffer */
3501 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3502 memcpy(big_buf, buf_temp, target_size);
3503 buf_temp = big_buf;
3504 host_dm = big_buf;
3506 guest_data = arg + host_dm->data_start;
3507 if ((guest_data - arg) < 0) {
3508 ret = -EINVAL;
3509 goto out;
3511 guest_data_size = host_dm->data_size - host_dm->data_start;
3512 host_data = (char*)host_dm + host_dm->data_start;
3514 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3515 switch (ie->host_cmd) {
3516 case DM_REMOVE_ALL:
3517 case DM_LIST_DEVICES:
3518 case DM_DEV_CREATE:
3519 case DM_DEV_REMOVE:
3520 case DM_DEV_SUSPEND:
3521 case DM_DEV_STATUS:
3522 case DM_DEV_WAIT:
3523 case DM_TABLE_STATUS:
3524 case DM_TABLE_CLEAR:
3525 case DM_TABLE_DEPS:
3526 case DM_LIST_VERSIONS:
3527 /* no input data */
3528 break;
3529 case DM_DEV_RENAME:
3530 case DM_DEV_SET_GEOMETRY:
3531 /* data contains only strings */
3532 memcpy(host_data, argptr, guest_data_size);
3533 break;
3534 case DM_TARGET_MSG:
3535 memcpy(host_data, argptr, guest_data_size);
3536 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3537 break;
3538 case DM_TABLE_LOAD:
3540 void *gspec = argptr;
3541 void *cur_data = host_data;
3542 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3543 int spec_size = thunk_type_size(arg_type, 0);
3544 int i;
3546 for (i = 0; i < host_dm->target_count; i++) {
3547 struct dm_target_spec *spec = cur_data;
3548 uint32_t next;
3549 int slen;
3551 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3552 slen = strlen((char*)gspec + spec_size) + 1;
3553 next = spec->next;
3554 spec->next = sizeof(*spec) + slen;
3555 strcpy((char*)&spec[1], gspec + spec_size);
3556 gspec += next;
3557 cur_data += spec->next;
3559 break;
3561 default:
3562 ret = -TARGET_EINVAL;
3563 goto out;
3565 unlock_user(argptr, guest_data, 0);
3567 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3568 if (!is_error(ret)) {
3569 guest_data = arg + host_dm->data_start;
3570 guest_data_size = host_dm->data_size - host_dm->data_start;
3571 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3572 switch (ie->host_cmd) {
3573 case DM_REMOVE_ALL:
3574 case DM_DEV_CREATE:
3575 case DM_DEV_REMOVE:
3576 case DM_DEV_RENAME:
3577 case DM_DEV_SUSPEND:
3578 case DM_DEV_STATUS:
3579 case DM_TABLE_LOAD:
3580 case DM_TABLE_CLEAR:
3581 case DM_TARGET_MSG:
3582 case DM_DEV_SET_GEOMETRY:
3583 /* no return data */
3584 break;
3585 case DM_LIST_DEVICES:
3587 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3588 uint32_t remaining_data = guest_data_size;
3589 void *cur_data = argptr;
3590 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3591 int nl_size = 12; /* can't use thunk_size due to alignment */
3593 while (1) {
3594 uint32_t next = nl->next;
3595 if (next) {
3596 nl->next = nl_size + (strlen(nl->name) + 1);
3598 if (remaining_data < nl->next) {
3599 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3600 break;
3602 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3603 strcpy(cur_data + nl_size, nl->name);
3604 cur_data += nl->next;
3605 remaining_data -= nl->next;
3606 if (!next) {
3607 break;
3609 nl = (void*)nl + next;
3611 break;
3613 case DM_DEV_WAIT:
3614 case DM_TABLE_STATUS:
3616 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3617 void *cur_data = argptr;
3618 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3619 int spec_size = thunk_type_size(arg_type, 0);
3620 int i;
3622 for (i = 0; i < host_dm->target_count; i++) {
3623 uint32_t next = spec->next;
3624 int slen = strlen((char*)&spec[1]) + 1;
3625 spec->next = (cur_data - argptr) + spec_size + slen;
3626 if (guest_data_size < spec->next) {
3627 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3628 break;
3630 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3631 strcpy(cur_data + spec_size, (char*)&spec[1]);
3632 cur_data = argptr + spec->next;
3633 spec = (void*)host_dm + host_dm->data_start + next;
3635 break;
3637 case DM_TABLE_DEPS:
3639 void *hdata = (void*)host_dm + host_dm->data_start;
3640 int count = *(uint32_t*)hdata;
3641 uint64_t *hdev = hdata + 8;
3642 uint64_t *gdev = argptr + 8;
3643 int i;
3645 *(uint32_t*)argptr = tswap32(count);
3646 for (i = 0; i < count; i++) {
3647 *gdev = tswap64(*hdev);
3648 gdev++;
3649 hdev++;
3651 break;
3653 case DM_LIST_VERSIONS:
3655 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3656 uint32_t remaining_data = guest_data_size;
3657 void *cur_data = argptr;
3658 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3659 int vers_size = thunk_type_size(arg_type, 0);
3661 while (1) {
3662 uint32_t next = vers->next;
3663 if (next) {
3664 vers->next = vers_size + (strlen(vers->name) + 1);
3666 if (remaining_data < vers->next) {
3667 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3668 break;
3670 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3671 strcpy(cur_data + vers_size, vers->name);
3672 cur_data += vers->next;
3673 remaining_data -= vers->next;
3674 if (!next) {
3675 break;
3677 vers = (void*)vers + next;
3679 break;
3681 default:
3682 ret = -TARGET_EINVAL;
3683 goto out;
3685 unlock_user(argptr, guest_data, guest_data_size);
3687 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3688 if (!argptr) {
3689 ret = -TARGET_EFAULT;
3690 goto out;
3692 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3693 unlock_user(argptr, arg, target_size);
3695 out:
3696 g_free(big_buf);
3697 return ret;
3700 static IOCTLEntry ioctl_entries[] = {
3701 #define IOCTL(cmd, access, ...) \
3702 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3703 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3704 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3705 #include "ioctls.h"
3706 { 0, 0, },
3709 /* ??? Implement proper locking for ioctls. */
3710 /* do_ioctl() Must return target values and target errnos. */
3711 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3713 const IOCTLEntry *ie;
3714 const argtype *arg_type;
3715 abi_long ret;
3716 uint8_t buf_temp[MAX_STRUCT_SIZE];
3717 int target_size;
3718 void *argptr;
3720 ie = ioctl_entries;
3721 for(;;) {
3722 if (ie->target_cmd == 0) {
3723 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3724 return -TARGET_ENOSYS;
3726 if (ie->target_cmd == cmd)
3727 break;
3728 ie++;
3730 arg_type = ie->arg_type;
3731 #if defined(DEBUG)
3732 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3733 #endif
3734 if (ie->do_ioctl) {
3735 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3738 switch(arg_type[0]) {
3739 case TYPE_NULL:
3740 /* no argument */
3741 ret = get_errno(ioctl(fd, ie->host_cmd));
3742 break;
3743 case TYPE_PTRVOID:
3744 case TYPE_INT:
3745 /* int argment */
3746 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3747 break;
3748 case TYPE_PTR:
3749 arg_type++;
3750 target_size = thunk_type_size(arg_type, 0);
3751 switch(ie->access) {
3752 case IOC_R:
3753 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3754 if (!is_error(ret)) {
3755 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3756 if (!argptr)
3757 return -TARGET_EFAULT;
3758 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3759 unlock_user(argptr, arg, target_size);
3761 break;
3762 case IOC_W:
3763 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3764 if (!argptr)
3765 return -TARGET_EFAULT;
3766 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3767 unlock_user(argptr, arg, 0);
3768 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3769 break;
3770 default:
3771 case IOC_RW:
3772 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3773 if (!argptr)
3774 return -TARGET_EFAULT;
3775 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3776 unlock_user(argptr, arg, 0);
3777 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3778 if (!is_error(ret)) {
3779 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3780 if (!argptr)
3781 return -TARGET_EFAULT;
3782 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3783 unlock_user(argptr, arg, target_size);
3785 break;
3787 break;
3788 default:
3789 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3790 (long)cmd, arg_type[0]);
3791 ret = -TARGET_ENOSYS;
3792 break;
3794 return ret;
3797 static const bitmask_transtbl iflag_tbl[] = {
3798 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3799 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3800 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3801 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3802 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3803 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3804 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3805 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3806 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3807 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3808 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3809 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3810 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3811 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3812 { 0, 0, 0, 0 }
3815 static const bitmask_transtbl oflag_tbl[] = {
3816 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3817 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3818 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3819 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3820 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3821 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3822 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3823 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3824 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3825 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3826 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3827 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3828 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3829 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3830 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3831 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3832 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3833 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3834 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3835 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3836 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3837 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3838 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3839 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3840 { 0, 0, 0, 0 }
3843 static const bitmask_transtbl cflag_tbl[] = {
3844 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3845 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3846 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3847 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3848 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3849 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3850 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3851 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3852 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3853 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3854 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3855 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3856 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3857 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3858 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3859 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3860 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3861 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3862 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3863 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3864 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3865 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3866 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3867 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3868 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3869 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3870 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3871 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3872 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3873 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3874 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3875 { 0, 0, 0, 0 }
3878 static const bitmask_transtbl lflag_tbl[] = {
3879 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3880 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3881 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3882 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3883 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3884 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3885 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3886 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3887 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3888 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3889 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3890 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3891 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3892 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3893 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3894 { 0, 0, 0, 0 }
3897 static void target_to_host_termios (void *dst, const void *src)
3899 struct host_termios *host = dst;
3900 const struct target_termios *target = src;
3902 host->c_iflag =
3903 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3904 host->c_oflag =
3905 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3906 host->c_cflag =
3907 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3908 host->c_lflag =
3909 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3910 host->c_line = target->c_line;
3912 memset(host->c_cc, 0, sizeof(host->c_cc));
3913 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3914 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3915 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3916 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3917 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3918 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3919 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3920 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3921 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3922 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3923 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3924 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3925 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3926 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3927 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3928 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3929 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3932 static void host_to_target_termios (void *dst, const void *src)
3934 struct target_termios *target = dst;
3935 const struct host_termios *host = src;
3937 target->c_iflag =
3938 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3939 target->c_oflag =
3940 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3941 target->c_cflag =
3942 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3943 target->c_lflag =
3944 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3945 target->c_line = host->c_line;
3947 memset(target->c_cc, 0, sizeof(target->c_cc));
3948 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3949 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3950 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3951 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3952 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3953 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3954 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3955 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3956 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3957 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3958 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3959 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3960 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3961 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3962 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3963 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3964 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3967 static const StructEntry struct_termios_def = {
3968 .convert = { host_to_target_termios, target_to_host_termios },
3969 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3970 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3973 static bitmask_transtbl mmap_flags_tbl[] = {
3974 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3975 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3976 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3977 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3978 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3979 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3980 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3981 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3982 { 0, 0, 0, 0 }
3985 #if defined(TARGET_I386)
3987 /* NOTE: there is really one LDT for all the threads */
3988 static uint8_t *ldt_table;
3990 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3992 int size;
3993 void *p;
3995 if (!ldt_table)
3996 return 0;
3997 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3998 if (size > bytecount)
3999 size = bytecount;
4000 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4001 if (!p)
4002 return -TARGET_EFAULT;
4003 /* ??? Should this by byteswapped? */
4004 memcpy(p, ldt_table, size);
4005 unlock_user(p, ptr, size);
4006 return size;
4009 /* XXX: add locking support */
4010 static abi_long write_ldt(CPUX86State *env,
4011 abi_ulong ptr, unsigned long bytecount, int oldmode)
4013 struct target_modify_ldt_ldt_s ldt_info;
4014 struct target_modify_ldt_ldt_s *target_ldt_info;
4015 int seg_32bit, contents, read_exec_only, limit_in_pages;
4016 int seg_not_present, useable, lm;
4017 uint32_t *lp, entry_1, entry_2;
4019 if (bytecount != sizeof(ldt_info))
4020 return -TARGET_EINVAL;
4021 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4022 return -TARGET_EFAULT;
4023 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4024 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4025 ldt_info.limit = tswap32(target_ldt_info->limit);
4026 ldt_info.flags = tswap32(target_ldt_info->flags);
4027 unlock_user_struct(target_ldt_info, ptr, 0);
4029 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4030 return -TARGET_EINVAL;
4031 seg_32bit = ldt_info.flags & 1;
4032 contents = (ldt_info.flags >> 1) & 3;
4033 read_exec_only = (ldt_info.flags >> 3) & 1;
4034 limit_in_pages = (ldt_info.flags >> 4) & 1;
4035 seg_not_present = (ldt_info.flags >> 5) & 1;
4036 useable = (ldt_info.flags >> 6) & 1;
4037 #ifdef TARGET_ABI32
4038 lm = 0;
4039 #else
4040 lm = (ldt_info.flags >> 7) & 1;
4041 #endif
4042 if (contents == 3) {
4043 if (oldmode)
4044 return -TARGET_EINVAL;
4045 if (seg_not_present == 0)
4046 return -TARGET_EINVAL;
4048 /* allocate the LDT */
4049 if (!ldt_table) {
4050 env->ldt.base = target_mmap(0,
4051 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4052 PROT_READ|PROT_WRITE,
4053 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4054 if (env->ldt.base == -1)
4055 return -TARGET_ENOMEM;
4056 memset(g2h(env->ldt.base), 0,
4057 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4058 env->ldt.limit = 0xffff;
4059 ldt_table = g2h(env->ldt.base);
4062 /* NOTE: same code as Linux kernel */
4063 /* Allow LDTs to be cleared by the user. */
4064 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4065 if (oldmode ||
4066 (contents == 0 &&
4067 read_exec_only == 1 &&
4068 seg_32bit == 0 &&
4069 limit_in_pages == 0 &&
4070 seg_not_present == 1 &&
4071 useable == 0 )) {
4072 entry_1 = 0;
4073 entry_2 = 0;
4074 goto install;
4078 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4079 (ldt_info.limit & 0x0ffff);
4080 entry_2 = (ldt_info.base_addr & 0xff000000) |
4081 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4082 (ldt_info.limit & 0xf0000) |
4083 ((read_exec_only ^ 1) << 9) |
4084 (contents << 10) |
4085 ((seg_not_present ^ 1) << 15) |
4086 (seg_32bit << 22) |
4087 (limit_in_pages << 23) |
4088 (lm << 21) |
4089 0x7000;
4090 if (!oldmode)
4091 entry_2 |= (useable << 20);
4093 /* Install the new entry ... */
4094 install:
4095 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4096 lp[0] = tswap32(entry_1);
4097 lp[1] = tswap32(entry_2);
4098 return 0;
4101 /* specific and weird i386 syscalls */
4102 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4103 unsigned long bytecount)
4105 abi_long ret;
4107 switch (func) {
4108 case 0:
4109 ret = read_ldt(ptr, bytecount);
4110 break;
4111 case 1:
4112 ret = write_ldt(env, ptr, bytecount, 1);
4113 break;
4114 case 0x11:
4115 ret = write_ldt(env, ptr, bytecount, 0);
4116 break;
4117 default:
4118 ret = -TARGET_ENOSYS;
4119 break;
4121 return ret;
4124 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4125 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4127 uint64_t *gdt_table = g2h(env->gdt.base);
4128 struct target_modify_ldt_ldt_s ldt_info;
4129 struct target_modify_ldt_ldt_s *target_ldt_info;
4130 int seg_32bit, contents, read_exec_only, limit_in_pages;
4131 int seg_not_present, useable, lm;
4132 uint32_t *lp, entry_1, entry_2;
4133 int i;
4135 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4136 if (!target_ldt_info)
4137 return -TARGET_EFAULT;
4138 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4139 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4140 ldt_info.limit = tswap32(target_ldt_info->limit);
4141 ldt_info.flags = tswap32(target_ldt_info->flags);
4142 if (ldt_info.entry_number == -1) {
4143 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4144 if (gdt_table[i] == 0) {
4145 ldt_info.entry_number = i;
4146 target_ldt_info->entry_number = tswap32(i);
4147 break;
4151 unlock_user_struct(target_ldt_info, ptr, 1);
4153 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4154 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4155 return -TARGET_EINVAL;
4156 seg_32bit = ldt_info.flags & 1;
4157 contents = (ldt_info.flags >> 1) & 3;
4158 read_exec_only = (ldt_info.flags >> 3) & 1;
4159 limit_in_pages = (ldt_info.flags >> 4) & 1;
4160 seg_not_present = (ldt_info.flags >> 5) & 1;
4161 useable = (ldt_info.flags >> 6) & 1;
4162 #ifdef TARGET_ABI32
4163 lm = 0;
4164 #else
4165 lm = (ldt_info.flags >> 7) & 1;
4166 #endif
4168 if (contents == 3) {
4169 if (seg_not_present == 0)
4170 return -TARGET_EINVAL;
4173 /* NOTE: same code as Linux kernel */
4174 /* Allow LDTs to be cleared by the user. */
4175 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4176 if ((contents == 0 &&
4177 read_exec_only == 1 &&
4178 seg_32bit == 0 &&
4179 limit_in_pages == 0 &&
4180 seg_not_present == 1 &&
4181 useable == 0 )) {
4182 entry_1 = 0;
4183 entry_2 = 0;
4184 goto install;
4188 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4189 (ldt_info.limit & 0x0ffff);
4190 entry_2 = (ldt_info.base_addr & 0xff000000) |
4191 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4192 (ldt_info.limit & 0xf0000) |
4193 ((read_exec_only ^ 1) << 9) |
4194 (contents << 10) |
4195 ((seg_not_present ^ 1) << 15) |
4196 (seg_32bit << 22) |
4197 (limit_in_pages << 23) |
4198 (useable << 20) |
4199 (lm << 21) |
4200 0x7000;
4202 /* Install the new entry ... */
4203 install:
4204 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4205 lp[0] = tswap32(entry_1);
4206 lp[1] = tswap32(entry_2);
4207 return 0;
4210 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4212 struct target_modify_ldt_ldt_s *target_ldt_info;
4213 uint64_t *gdt_table = g2h(env->gdt.base);
4214 uint32_t base_addr, limit, flags;
4215 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4216 int seg_not_present, useable, lm;
4217 uint32_t *lp, entry_1, entry_2;
4219 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4220 if (!target_ldt_info)
4221 return -TARGET_EFAULT;
4222 idx = tswap32(target_ldt_info->entry_number);
4223 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4224 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4225 unlock_user_struct(target_ldt_info, ptr, 1);
4226 return -TARGET_EINVAL;
4228 lp = (uint32_t *)(gdt_table + idx);
4229 entry_1 = tswap32(lp[0]);
4230 entry_2 = tswap32(lp[1]);
4232 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4233 contents = (entry_2 >> 10) & 3;
4234 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4235 seg_32bit = (entry_2 >> 22) & 1;
4236 limit_in_pages = (entry_2 >> 23) & 1;
4237 useable = (entry_2 >> 20) & 1;
4238 #ifdef TARGET_ABI32
4239 lm = 0;
4240 #else
4241 lm = (entry_2 >> 21) & 1;
4242 #endif
4243 flags = (seg_32bit << 0) | (contents << 1) |
4244 (read_exec_only << 3) | (limit_in_pages << 4) |
4245 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4246 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4247 base_addr = (entry_1 >> 16) |
4248 (entry_2 & 0xff000000) |
4249 ((entry_2 & 0xff) << 16);
4250 target_ldt_info->base_addr = tswapal(base_addr);
4251 target_ldt_info->limit = tswap32(limit);
4252 target_ldt_info->flags = tswap32(flags);
4253 unlock_user_struct(target_ldt_info, ptr, 1);
4254 return 0;
4256 #endif /* TARGET_I386 && TARGET_ABI32 */
4258 #ifndef TARGET_ABI32
4259 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4261 abi_long ret = 0;
4262 abi_ulong val;
4263 int idx;
4265 switch(code) {
4266 case TARGET_ARCH_SET_GS:
4267 case TARGET_ARCH_SET_FS:
4268 if (code == TARGET_ARCH_SET_GS)
4269 idx = R_GS;
4270 else
4271 idx = R_FS;
4272 cpu_x86_load_seg(env, idx, 0);
4273 env->segs[idx].base = addr;
4274 break;
4275 case TARGET_ARCH_GET_GS:
4276 case TARGET_ARCH_GET_FS:
4277 if (code == TARGET_ARCH_GET_GS)
4278 idx = R_GS;
4279 else
4280 idx = R_FS;
4281 val = env->segs[idx].base;
4282 if (put_user(val, addr, abi_ulong))
4283 ret = -TARGET_EFAULT;
4284 break;
4285 default:
4286 ret = -TARGET_EINVAL;
4287 break;
4289 return ret;
4291 #endif
4293 #endif /* defined(TARGET_I386) */
4295 #define NEW_STACK_SIZE 0x40000
4297 #if defined(CONFIG_USE_NPTL)
4299 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4300 typedef struct {
4301 CPUArchState *env;
4302 pthread_mutex_t mutex;
4303 pthread_cond_t cond;
4304 pthread_t thread;
4305 uint32_t tid;
4306 abi_ulong child_tidptr;
4307 abi_ulong parent_tidptr;
4308 sigset_t sigmask;
4309 } new_thread_info;
4311 static void *clone_func(void *arg)
4313 new_thread_info *info = arg;
4314 CPUArchState *env;
4315 TaskState *ts;
4317 env = info->env;
4318 thread_env = env;
4319 ts = (TaskState *)thread_env->opaque;
4320 info->tid = gettid();
4321 env->host_tid = info->tid;
4322 task_settid(ts);
4323 if (info->child_tidptr)
4324 put_user_u32(info->tid, info->child_tidptr);
4325 if (info->parent_tidptr)
4326 put_user_u32(info->tid, info->parent_tidptr);
4327 /* Enable signals. */
4328 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4329 /* Signal to the parent that we're ready. */
4330 pthread_mutex_lock(&info->mutex);
4331 pthread_cond_broadcast(&info->cond);
4332 pthread_mutex_unlock(&info->mutex);
4333 /* Wait until the parent has finshed initializing the tls state. */
4334 pthread_mutex_lock(&clone_lock);
4335 pthread_mutex_unlock(&clone_lock);
4336 cpu_loop(env);
4337 /* never exits */
4338 return NULL;
4340 #else
4342 static int clone_func(void *arg)
4344 CPUArchState *env = arg;
4345 cpu_loop(env);
4346 /* never exits */
4347 return 0;
4349 #endif
4351 /* do_fork() Must return host values and target errnos (unlike most
4352 do_*() functions). */
4353 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4354 abi_ulong parent_tidptr, target_ulong newtls,
4355 abi_ulong child_tidptr)
4357 int ret;
4358 TaskState *ts;
4359 CPUArchState *new_env;
4360 #if defined(CONFIG_USE_NPTL)
4361 unsigned int nptl_flags;
4362 sigset_t sigmask;
4363 #else
4364 uint8_t *new_stack;
4365 #endif
4367 /* Emulate vfork() with fork() */
4368 if (flags & CLONE_VFORK)
4369 flags &= ~(CLONE_VFORK | CLONE_VM);
4371 if (flags & CLONE_VM) {
4372 TaskState *parent_ts = (TaskState *)env->opaque;
4373 #if defined(CONFIG_USE_NPTL)
4374 new_thread_info info;
4375 pthread_attr_t attr;
4376 #endif
4377 ts = g_malloc0(sizeof(TaskState));
4378 init_task_state(ts);
4379 /* we create a new CPU instance. */
4380 new_env = cpu_copy(env);
4381 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4382 cpu_reset(ENV_GET_CPU(new_env));
4383 #endif
4384 /* Init regs that differ from the parent. */
4385 cpu_clone_regs(new_env, newsp);
4386 new_env->opaque = ts;
4387 ts->bprm = parent_ts->bprm;
4388 ts->info = parent_ts->info;
4389 #if defined(CONFIG_USE_NPTL)
4390 nptl_flags = flags;
4391 flags &= ~CLONE_NPTL_FLAGS2;
4393 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4394 ts->child_tidptr = child_tidptr;
4397 if (nptl_flags & CLONE_SETTLS)
4398 cpu_set_tls (new_env, newtls);
4400 /* Grab a mutex so that thread setup appears atomic. */
4401 pthread_mutex_lock(&clone_lock);
4403 memset(&info, 0, sizeof(info));
4404 pthread_mutex_init(&info.mutex, NULL);
4405 pthread_mutex_lock(&info.mutex);
4406 pthread_cond_init(&info.cond, NULL);
4407 info.env = new_env;
4408 if (nptl_flags & CLONE_CHILD_SETTID)
4409 info.child_tidptr = child_tidptr;
4410 if (nptl_flags & CLONE_PARENT_SETTID)
4411 info.parent_tidptr = parent_tidptr;
4413 ret = pthread_attr_init(&attr);
4414 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4415 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4416 /* It is not safe to deliver signals until the child has finished
4417 initializing, so temporarily block all signals. */
4418 sigfillset(&sigmask);
4419 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4421 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4422 /* TODO: Free new CPU state if thread creation failed. */
4424 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4425 pthread_attr_destroy(&attr);
4426 if (ret == 0) {
4427 /* Wait for the child to initialize. */
4428 pthread_cond_wait(&info.cond, &info.mutex);
4429 ret = info.tid;
4430 if (flags & CLONE_PARENT_SETTID)
4431 put_user_u32(ret, parent_tidptr);
4432 } else {
4433 ret = -1;
4435 pthread_mutex_unlock(&info.mutex);
4436 pthread_cond_destroy(&info.cond);
4437 pthread_mutex_destroy(&info.mutex);
4438 pthread_mutex_unlock(&clone_lock);
4439 #else
4440 if (flags & CLONE_NPTL_FLAGS2)
4441 return -EINVAL;
4442 /* This is probably going to die very quickly, but do it anyway. */
4443 new_stack = g_malloc0 (NEW_STACK_SIZE);
4444 #ifdef __ia64__
4445 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4446 #else
4447 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4448 #endif
4449 #endif
4450 } else {
4451 /* if no CLONE_VM, we consider it is a fork */
4452 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4453 return -EINVAL;
4454 fork_start();
4455 ret = fork();
4456 if (ret == 0) {
4457 /* Child Process. */
4458 cpu_clone_regs(env, newsp);
4459 fork_end(1);
4460 #if defined(CONFIG_USE_NPTL)
4461 /* There is a race condition here. The parent process could
4462 theoretically read the TID in the child process before the child
4463 tid is set. This would require using either ptrace
4464 (not implemented) or having *_tidptr to point at a shared memory
4465 mapping. We can't repeat the spinlock hack used above because
4466 the child process gets its own copy of the lock. */
4467 if (flags & CLONE_CHILD_SETTID)
4468 put_user_u32(gettid(), child_tidptr);
4469 if (flags & CLONE_PARENT_SETTID)
4470 put_user_u32(gettid(), parent_tidptr);
4471 ts = (TaskState *)env->opaque;
4472 if (flags & CLONE_SETTLS)
4473 cpu_set_tls (env, newtls);
4474 if (flags & CLONE_CHILD_CLEARTID)
4475 ts->child_tidptr = child_tidptr;
4476 #endif
4477 } else {
4478 fork_end(0);
4481 return ret;
4484 /* warning : doesn't handle linux specific flags... */
4485 static int target_to_host_fcntl_cmd(int cmd)
4487 switch(cmd) {
4488 case TARGET_F_DUPFD:
4489 case TARGET_F_GETFD:
4490 case TARGET_F_SETFD:
4491 case TARGET_F_GETFL:
4492 case TARGET_F_SETFL:
4493 return cmd;
4494 case TARGET_F_GETLK:
4495 return F_GETLK;
4496 case TARGET_F_SETLK:
4497 return F_SETLK;
4498 case TARGET_F_SETLKW:
4499 return F_SETLKW;
4500 case TARGET_F_GETOWN:
4501 return F_GETOWN;
4502 case TARGET_F_SETOWN:
4503 return F_SETOWN;
4504 case TARGET_F_GETSIG:
4505 return F_GETSIG;
4506 case TARGET_F_SETSIG:
4507 return F_SETSIG;
4508 #if TARGET_ABI_BITS == 32
4509 case TARGET_F_GETLK64:
4510 return F_GETLK64;
4511 case TARGET_F_SETLK64:
4512 return F_SETLK64;
4513 case TARGET_F_SETLKW64:
4514 return F_SETLKW64;
4515 #endif
4516 case TARGET_F_SETLEASE:
4517 return F_SETLEASE;
4518 case TARGET_F_GETLEASE:
4519 return F_GETLEASE;
4520 #ifdef F_DUPFD_CLOEXEC
4521 case TARGET_F_DUPFD_CLOEXEC:
4522 return F_DUPFD_CLOEXEC;
4523 #endif
4524 case TARGET_F_NOTIFY:
4525 return F_NOTIFY;
4526 default:
4527 return -TARGET_EINVAL;
4529 return -TARGET_EINVAL;
4532 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4533 static const bitmask_transtbl flock_tbl[] = {
4534 TRANSTBL_CONVERT(F_RDLCK),
4535 TRANSTBL_CONVERT(F_WRLCK),
4536 TRANSTBL_CONVERT(F_UNLCK),
4537 TRANSTBL_CONVERT(F_EXLCK),
4538 TRANSTBL_CONVERT(F_SHLCK),
4539 { 0, 0, 0, 0 }
4542 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4544 struct flock fl;
4545 struct target_flock *target_fl;
4546 struct flock64 fl64;
4547 struct target_flock64 *target_fl64;
4548 abi_long ret;
4549 int host_cmd = target_to_host_fcntl_cmd(cmd);
4551 if (host_cmd == -TARGET_EINVAL)
4552 return host_cmd;
4554 switch(cmd) {
4555 case TARGET_F_GETLK:
4556 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4557 return -TARGET_EFAULT;
4558 fl.l_type =
4559 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4560 fl.l_whence = tswap16(target_fl->l_whence);
4561 fl.l_start = tswapal(target_fl->l_start);
4562 fl.l_len = tswapal(target_fl->l_len);
4563 fl.l_pid = tswap32(target_fl->l_pid);
4564 unlock_user_struct(target_fl, arg, 0);
4565 ret = get_errno(fcntl(fd, host_cmd, &fl));
4566 if (ret == 0) {
4567 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4568 return -TARGET_EFAULT;
4569 target_fl->l_type =
4570 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4571 target_fl->l_whence = tswap16(fl.l_whence);
4572 target_fl->l_start = tswapal(fl.l_start);
4573 target_fl->l_len = tswapal(fl.l_len);
4574 target_fl->l_pid = tswap32(fl.l_pid);
4575 unlock_user_struct(target_fl, arg, 1);
4577 break;
4579 case TARGET_F_SETLK:
4580 case TARGET_F_SETLKW:
4581 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4582 return -TARGET_EFAULT;
4583 fl.l_type =
4584 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4585 fl.l_whence = tswap16(target_fl->l_whence);
4586 fl.l_start = tswapal(target_fl->l_start);
4587 fl.l_len = tswapal(target_fl->l_len);
4588 fl.l_pid = tswap32(target_fl->l_pid);
4589 unlock_user_struct(target_fl, arg, 0);
4590 ret = get_errno(fcntl(fd, host_cmd, &fl));
4591 break;
4593 case TARGET_F_GETLK64:
4594 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4595 return -TARGET_EFAULT;
4596 fl64.l_type =
4597 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4598 fl64.l_whence = tswap16(target_fl64->l_whence);
4599 fl64.l_start = tswap64(target_fl64->l_start);
4600 fl64.l_len = tswap64(target_fl64->l_len);
4601 fl64.l_pid = tswap32(target_fl64->l_pid);
4602 unlock_user_struct(target_fl64, arg, 0);
4603 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4604 if (ret == 0) {
4605 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4606 return -TARGET_EFAULT;
4607 target_fl64->l_type =
4608 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4609 target_fl64->l_whence = tswap16(fl64.l_whence);
4610 target_fl64->l_start = tswap64(fl64.l_start);
4611 target_fl64->l_len = tswap64(fl64.l_len);
4612 target_fl64->l_pid = tswap32(fl64.l_pid);
4613 unlock_user_struct(target_fl64, arg, 1);
4615 break;
4616 case TARGET_F_SETLK64:
4617 case TARGET_F_SETLKW64:
4618 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4619 return -TARGET_EFAULT;
4620 fl64.l_type =
4621 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4622 fl64.l_whence = tswap16(target_fl64->l_whence);
4623 fl64.l_start = tswap64(target_fl64->l_start);
4624 fl64.l_len = tswap64(target_fl64->l_len);
4625 fl64.l_pid = tswap32(target_fl64->l_pid);
4626 unlock_user_struct(target_fl64, arg, 0);
4627 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4628 break;
4630 case TARGET_F_GETFL:
4631 ret = get_errno(fcntl(fd, host_cmd, arg));
4632 if (ret >= 0) {
4633 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4635 break;
4637 case TARGET_F_SETFL:
4638 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4639 break;
4641 case TARGET_F_SETOWN:
4642 case TARGET_F_GETOWN:
4643 case TARGET_F_SETSIG:
4644 case TARGET_F_GETSIG:
4645 case TARGET_F_SETLEASE:
4646 case TARGET_F_GETLEASE:
4647 ret = get_errno(fcntl(fd, host_cmd, arg));
4648 break;
4650 default:
4651 ret = get_errno(fcntl(fd, cmd, arg));
4652 break;
4654 return ret;
4657 #ifdef USE_UID16
4659 static inline int high2lowuid(int uid)
4661 if (uid > 65535)
4662 return 65534;
4663 else
4664 return uid;
4667 static inline int high2lowgid(int gid)
4669 if (gid > 65535)
4670 return 65534;
4671 else
4672 return gid;
4675 static inline int low2highuid(int uid)
4677 if ((int16_t)uid == -1)
4678 return -1;
4679 else
4680 return uid;
4683 static inline int low2highgid(int gid)
4685 if ((int16_t)gid == -1)
4686 return -1;
4687 else
4688 return gid;
4690 static inline int tswapid(int id)
4692 return tswap16(id);
4694 #else /* !USE_UID16 */
4695 static inline int high2lowuid(int uid)
4697 return uid;
4699 static inline int high2lowgid(int gid)
4701 return gid;
4703 static inline int low2highuid(int uid)
4705 return uid;
4707 static inline int low2highgid(int gid)
4709 return gid;
4711 static inline int tswapid(int id)
4713 return tswap32(id);
4715 #endif /* USE_UID16 */
4717 void syscall_init(void)
4719 IOCTLEntry *ie;
4720 const argtype *arg_type;
4721 int size;
4722 int i;
4724 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4725 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4726 #include "syscall_types.h"
4727 #undef STRUCT
4728 #undef STRUCT_SPECIAL
4730 /* Build target_to_host_errno_table[] table from
4731 * host_to_target_errno_table[]. */
4732 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4733 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4736 /* we patch the ioctl size if necessary. We rely on the fact that
4737 no ioctl has all the bits at '1' in the size field */
4738 ie = ioctl_entries;
4739 while (ie->target_cmd != 0) {
4740 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4741 TARGET_IOC_SIZEMASK) {
4742 arg_type = ie->arg_type;
4743 if (arg_type[0] != TYPE_PTR) {
4744 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4745 ie->target_cmd);
4746 exit(1);
4748 arg_type++;
4749 size = thunk_type_size(arg_type, 0);
4750 ie->target_cmd = (ie->target_cmd &
4751 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4752 (size << TARGET_IOC_SIZESHIFT);
4755 /* automatic consistency check if same arch */
4756 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4757 (defined(__x86_64__) && defined(TARGET_X86_64))
4758 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4759 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4760 ie->name, ie->target_cmd, ie->host_cmd);
4762 #endif
4763 ie++;
4767 #if TARGET_ABI_BITS == 32
4768 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4770 #ifdef TARGET_WORDS_BIGENDIAN
4771 return ((uint64_t)word0 << 32) | word1;
4772 #else
4773 return ((uint64_t)word1 << 32) | word0;
4774 #endif
4776 #else /* TARGET_ABI_BITS == 32 */
4777 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4779 return word0;
4781 #endif /* TARGET_ABI_BITS != 32 */
4783 #ifdef TARGET_NR_truncate64
4784 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4785 abi_long arg2,
4786 abi_long arg3,
4787 abi_long arg4)
4789 if (regpairs_aligned(cpu_env)) {
4790 arg2 = arg3;
4791 arg3 = arg4;
4793 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4795 #endif
4797 #ifdef TARGET_NR_ftruncate64
4798 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4799 abi_long arg2,
4800 abi_long arg3,
4801 abi_long arg4)
4803 if (regpairs_aligned(cpu_env)) {
4804 arg2 = arg3;
4805 arg3 = arg4;
4807 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4809 #endif
4811 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4812 abi_ulong target_addr)
4814 struct target_timespec *target_ts;
4816 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4817 return -TARGET_EFAULT;
4818 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4819 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4820 unlock_user_struct(target_ts, target_addr, 0);
4821 return 0;
4824 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4825 struct timespec *host_ts)
4827 struct target_timespec *target_ts;
4829 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4830 return -TARGET_EFAULT;
4831 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4832 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4833 unlock_user_struct(target_ts, target_addr, 1);
4834 return 0;
4837 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4838 static inline abi_long host_to_target_stat64(void *cpu_env,
4839 abi_ulong target_addr,
4840 struct stat *host_st)
4842 #ifdef TARGET_ARM
4843 if (((CPUARMState *)cpu_env)->eabi) {
4844 struct target_eabi_stat64 *target_st;
4846 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4847 return -TARGET_EFAULT;
4848 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4849 __put_user(host_st->st_dev, &target_st->st_dev);
4850 __put_user(host_st->st_ino, &target_st->st_ino);
4851 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4852 __put_user(host_st->st_ino, &target_st->__st_ino);
4853 #endif
4854 __put_user(host_st->st_mode, &target_st->st_mode);
4855 __put_user(host_st->st_nlink, &target_st->st_nlink);
4856 __put_user(host_st->st_uid, &target_st->st_uid);
4857 __put_user(host_st->st_gid, &target_st->st_gid);
4858 __put_user(host_st->st_rdev, &target_st->st_rdev);
4859 __put_user(host_st->st_size, &target_st->st_size);
4860 __put_user(host_st->st_blksize, &target_st->st_blksize);
4861 __put_user(host_st->st_blocks, &target_st->st_blocks);
4862 __put_user(host_st->st_atime, &target_st->target_st_atime);
4863 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4864 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4865 unlock_user_struct(target_st, target_addr, 1);
4866 } else
4867 #endif
4869 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4870 struct target_stat *target_st;
4871 #else
4872 struct target_stat64 *target_st;
4873 #endif
4875 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4876 return -TARGET_EFAULT;
4877 memset(target_st, 0, sizeof(*target_st));
4878 __put_user(host_st->st_dev, &target_st->st_dev);
4879 __put_user(host_st->st_ino, &target_st->st_ino);
4880 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4881 __put_user(host_st->st_ino, &target_st->__st_ino);
4882 #endif
4883 __put_user(host_st->st_mode, &target_st->st_mode);
4884 __put_user(host_st->st_nlink, &target_st->st_nlink);
4885 __put_user(host_st->st_uid, &target_st->st_uid);
4886 __put_user(host_st->st_gid, &target_st->st_gid);
4887 __put_user(host_st->st_rdev, &target_st->st_rdev);
4888 /* XXX: better use of kernel struct */
4889 __put_user(host_st->st_size, &target_st->st_size);
4890 __put_user(host_st->st_blksize, &target_st->st_blksize);
4891 __put_user(host_st->st_blocks, &target_st->st_blocks);
4892 __put_user(host_st->st_atime, &target_st->target_st_atime);
4893 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4894 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4895 unlock_user_struct(target_st, target_addr, 1);
4898 return 0;
4900 #endif
4902 #if defined(CONFIG_USE_NPTL)
4903 /* ??? Using host futex calls even when target atomic operations
4904 are not really atomic probably breaks things. However implementing
4905 futexes locally would make futexes shared between multiple processes
4906 tricky. However they're probably useless because guest atomic
4907 operations won't work either. */
4908 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4909 target_ulong uaddr2, int val3)
4911 struct timespec ts, *pts;
4912 int base_op;
4914 /* ??? We assume FUTEX_* constants are the same on both host
4915 and target. */
4916 #ifdef FUTEX_CMD_MASK
4917 base_op = op & FUTEX_CMD_MASK;
4918 #else
4919 base_op = op;
4920 #endif
4921 switch (base_op) {
4922 case FUTEX_WAIT:
4923 if (timeout) {
4924 pts = &ts;
4925 target_to_host_timespec(pts, timeout);
4926 } else {
4927 pts = NULL;
4929 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4930 pts, NULL, 0));
4931 case FUTEX_WAKE:
4932 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4933 case FUTEX_FD:
4934 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4935 case FUTEX_REQUEUE:
4936 case FUTEX_CMP_REQUEUE:
4937 case FUTEX_WAKE_OP:
4938 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4939 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4940 But the prototype takes a `struct timespec *'; insert casts
4941 to satisfy the compiler. We do not need to tswap TIMEOUT
4942 since it's not compared to guest memory. */
4943 pts = (struct timespec *)(uintptr_t) timeout;
4944 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4945 g2h(uaddr2),
4946 (base_op == FUTEX_CMP_REQUEUE
4947 ? tswap32(val3)
4948 : val3)));
4949 default:
4950 return -TARGET_ENOSYS;
4953 #endif
4955 /* Map host to target signal numbers for the wait family of syscalls.
4956 Assume all other status bits are the same. */
4957 int host_to_target_waitstatus(int status)
4959 if (WIFSIGNALED(status)) {
4960 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4962 if (WIFSTOPPED(status)) {
4963 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4964 | (status & 0xff);
4966 return status;
4969 int get_osversion(void)
4971 static int osversion;
4972 struct new_utsname buf;
4973 const char *s;
4974 int i, n, tmp;
4975 if (osversion)
4976 return osversion;
4977 if (qemu_uname_release && *qemu_uname_release) {
4978 s = qemu_uname_release;
4979 } else {
4980 if (sys_uname(&buf))
4981 return 0;
4982 s = buf.release;
4984 tmp = 0;
4985 for (i = 0; i < 3; i++) {
4986 n = 0;
4987 while (*s >= '0' && *s <= '9') {
4988 n *= 10;
4989 n += *s - '0';
4990 s++;
4992 tmp = (tmp << 8) + n;
4993 if (*s == '.')
4994 s++;
4996 osversion = tmp;
4997 return osversion;
5001 static int open_self_maps(void *cpu_env, int fd)
5003 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5004 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5005 #endif
5006 FILE *fp;
5007 char *line = NULL;
5008 size_t len = 0;
5009 ssize_t read;
5011 fp = fopen("/proc/self/maps", "r");
5012 if (fp == NULL) {
5013 return -EACCES;
5016 while ((read = getline(&line, &len, fp)) != -1) {
5017 int fields, dev_maj, dev_min, inode;
5018 uint64_t min, max, offset;
5019 char flag_r, flag_w, flag_x, flag_p;
5020 char path[512] = "";
5021 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5022 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5023 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5025 if ((fields < 10) || (fields > 11)) {
5026 continue;
5028 if (!strncmp(path, "[stack]", 7)) {
5029 continue;
5031 if (h2g_valid(min) && h2g_valid(max)) {
5032 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5033 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
5034 h2g(min), h2g(max), flag_r, flag_w,
5035 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5036 path[0] ? " " : "", path);
5040 free(line);
5041 fclose(fp);
5043 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5044 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5045 (unsigned long long)ts->info->stack_limit,
5046 (unsigned long long)(ts->info->start_stack +
5047 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5048 (unsigned long long)0);
5049 #endif
5051 return 0;
5054 static int open_self_stat(void *cpu_env, int fd)
5056 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5057 abi_ulong start_stack = ts->info->start_stack;
5058 int i;
5060 for (i = 0; i < 44; i++) {
5061 char buf[128];
5062 int len;
5063 uint64_t val = 0;
5065 if (i == 0) {
5066 /* pid */
5067 val = getpid();
5068 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5069 } else if (i == 1) {
5070 /* app name */
5071 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5072 } else if (i == 27) {
5073 /* stack bottom */
5074 val = start_stack;
5075 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5076 } else {
5077 /* for the rest, there is MasterCard */
5078 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5081 len = strlen(buf);
5082 if (write(fd, buf, len) != len) {
5083 return -1;
5087 return 0;
5090 static int open_self_auxv(void *cpu_env, int fd)
5092 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5093 abi_ulong auxv = ts->info->saved_auxv;
5094 abi_ulong len = ts->info->auxv_len;
5095 char *ptr;
5098 * Auxiliary vector is stored in target process stack.
5099 * read in whole auxv vector and copy it to file
5101 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5102 if (ptr != NULL) {
5103 while (len > 0) {
5104 ssize_t r;
5105 r = write(fd, ptr, len);
5106 if (r <= 0) {
5107 break;
5109 len -= r;
5110 ptr += r;
5112 lseek(fd, 0, SEEK_SET);
5113 unlock_user(ptr, auxv, len);
5116 return 0;
5119 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5121 struct fake_open {
5122 const char *filename;
5123 int (*fill)(void *cpu_env, int fd);
5125 const struct fake_open *fake_open;
5126 static const struct fake_open fakes[] = {
5127 { "/proc/self/maps", open_self_maps },
5128 { "/proc/self/stat", open_self_stat },
5129 { "/proc/self/auxv", open_self_auxv },
5130 { NULL, NULL }
5133 for (fake_open = fakes; fake_open->filename; fake_open++) {
5134 if (!strncmp(pathname, fake_open->filename,
5135 strlen(fake_open->filename))) {
5136 break;
5140 if (fake_open->filename) {
5141 const char *tmpdir;
5142 char filename[PATH_MAX];
5143 int fd, r;
5145 /* create temporary file to map stat to */
5146 tmpdir = getenv("TMPDIR");
5147 if (!tmpdir)
5148 tmpdir = "/tmp";
5149 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5150 fd = mkstemp(filename);
5151 if (fd < 0) {
5152 return fd;
5154 unlink(filename);
5156 if ((r = fake_open->fill(cpu_env, fd))) {
5157 close(fd);
5158 return r;
5160 lseek(fd, 0, SEEK_SET);
5162 return fd;
5165 return get_errno(open(path(pathname), flags, mode));
5168 /* do_syscall() should always have a single exit point at the end so
5169 that actions, such as logging of syscall results, can be performed.
5170 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5171 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5172 abi_long arg2, abi_long arg3, abi_long arg4,
5173 abi_long arg5, abi_long arg6, abi_long arg7,
5174 abi_long arg8)
5176 abi_long ret;
5177 struct stat st;
5178 struct statfs stfs;
5179 void *p;
5181 #ifdef DEBUG
5182 gemu_log("syscall %d", num);
5183 #endif
5184 if(do_strace)
5185 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5187 switch(num) {
5188 case TARGET_NR_exit:
5189 #ifdef CONFIG_USE_NPTL
5190 /* In old applications this may be used to implement _exit(2).
5191 However in threaded applictions it is used for thread termination,
5192 and _exit_group is used for application termination.
5193 Do thread termination if we have more then one thread. */
5194 /* FIXME: This probably breaks if a signal arrives. We should probably
5195 be disabling signals. */
5196 if (first_cpu->next_cpu) {
5197 TaskState *ts;
5198 CPUArchState **lastp;
5199 CPUArchState *p;
5201 cpu_list_lock();
5202 lastp = &first_cpu;
5203 p = first_cpu;
5204 while (p && p != (CPUArchState *)cpu_env) {
5205 lastp = &p->next_cpu;
5206 p = p->next_cpu;
5208 /* If we didn't find the CPU for this thread then something is
5209 horribly wrong. */
5210 if (!p)
5211 abort();
5212 /* Remove the CPU from the list. */
5213 *lastp = p->next_cpu;
5214 cpu_list_unlock();
5215 ts = ((CPUArchState *)cpu_env)->opaque;
5216 if (ts->child_tidptr) {
5217 put_user_u32(0, ts->child_tidptr);
5218 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5219 NULL, NULL, 0);
5221 thread_env = NULL;
5222 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5223 g_free(ts);
5224 pthread_exit(NULL);
5226 #endif
5227 #ifdef TARGET_GPROF
5228 _mcleanup();
5229 #endif
5230 gdb_exit(cpu_env, arg1);
5231 _exit(arg1);
5232 ret = 0; /* avoid warning */
5233 break;
5234 case TARGET_NR_read:
5235 if (arg3 == 0)
5236 ret = 0;
5237 else {
5238 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5239 goto efault;
5240 ret = get_errno(read(arg1, p, arg3));
5241 unlock_user(p, arg2, ret);
5243 break;
5244 case TARGET_NR_write:
5245 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5246 goto efault;
5247 ret = get_errno(write(arg1, p, arg3));
5248 unlock_user(p, arg2, 0);
5249 break;
5250 case TARGET_NR_open:
5251 if (!(p = lock_user_string(arg1)))
5252 goto efault;
5253 ret = get_errno(do_open(cpu_env, p,
5254 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5255 arg3));
5256 unlock_user(p, arg1, 0);
5257 break;
5258 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5259 case TARGET_NR_openat:
5260 if (!(p = lock_user_string(arg2)))
5261 goto efault;
5262 ret = get_errno(sys_openat(arg1,
5263 path(p),
5264 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5265 arg4));
5266 unlock_user(p, arg2, 0);
5267 break;
5268 #endif
5269 case TARGET_NR_close:
5270 ret = get_errno(close(arg1));
5271 break;
5272 case TARGET_NR_brk:
5273 ret = do_brk(arg1);
5274 break;
5275 case TARGET_NR_fork:
5276 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5277 break;
5278 #ifdef TARGET_NR_waitpid
5279 case TARGET_NR_waitpid:
5281 int status;
5282 ret = get_errno(waitpid(arg1, &status, arg3));
5283 if (!is_error(ret) && arg2 && ret
5284 && put_user_s32(host_to_target_waitstatus(status), arg2))
5285 goto efault;
5287 break;
5288 #endif
5289 #ifdef TARGET_NR_waitid
5290 case TARGET_NR_waitid:
5292 siginfo_t info;
5293 info.si_pid = 0;
5294 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5295 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5296 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5297 goto efault;
5298 host_to_target_siginfo(p, &info);
5299 unlock_user(p, arg3, sizeof(target_siginfo_t));
5302 break;
5303 #endif
5304 #ifdef TARGET_NR_creat /* not on alpha */
5305 case TARGET_NR_creat:
5306 if (!(p = lock_user_string(arg1)))
5307 goto efault;
5308 ret = get_errno(creat(p, arg2));
5309 unlock_user(p, arg1, 0);
5310 break;
5311 #endif
5312 case TARGET_NR_link:
5314 void * p2;
5315 p = lock_user_string(arg1);
5316 p2 = lock_user_string(arg2);
5317 if (!p || !p2)
5318 ret = -TARGET_EFAULT;
5319 else
5320 ret = get_errno(link(p, p2));
5321 unlock_user(p2, arg2, 0);
5322 unlock_user(p, arg1, 0);
5324 break;
5325 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5326 case TARGET_NR_linkat:
5328 void * p2 = NULL;
5329 if (!arg2 || !arg4)
5330 goto efault;
5331 p = lock_user_string(arg2);
5332 p2 = lock_user_string(arg4);
5333 if (!p || !p2)
5334 ret = -TARGET_EFAULT;
5335 else
5336 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5337 unlock_user(p, arg2, 0);
5338 unlock_user(p2, arg4, 0);
5340 break;
5341 #endif
5342 case TARGET_NR_unlink:
5343 if (!(p = lock_user_string(arg1)))
5344 goto efault;
5345 ret = get_errno(unlink(p));
5346 unlock_user(p, arg1, 0);
5347 break;
5348 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5349 case TARGET_NR_unlinkat:
5350 if (!(p = lock_user_string(arg2)))
5351 goto efault;
5352 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5353 unlock_user(p, arg2, 0);
5354 break;
5355 #endif
5356 case TARGET_NR_execve:
5358 char **argp, **envp;
5359 int argc, envc;
5360 abi_ulong gp;
5361 abi_ulong guest_argp;
5362 abi_ulong guest_envp;
5363 abi_ulong addr;
5364 char **q;
5365 int total_size = 0;
5367 argc = 0;
5368 guest_argp = arg2;
5369 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5370 if (get_user_ual(addr, gp))
5371 goto efault;
5372 if (!addr)
5373 break;
5374 argc++;
5376 envc = 0;
5377 guest_envp = arg3;
5378 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5379 if (get_user_ual(addr, gp))
5380 goto efault;
5381 if (!addr)
5382 break;
5383 envc++;
5386 argp = alloca((argc + 1) * sizeof(void *));
5387 envp = alloca((envc + 1) * sizeof(void *));
5389 for (gp = guest_argp, q = argp; gp;
5390 gp += sizeof(abi_ulong), q++) {
5391 if (get_user_ual(addr, gp))
5392 goto execve_efault;
5393 if (!addr)
5394 break;
5395 if (!(*q = lock_user_string(addr)))
5396 goto execve_efault;
5397 total_size += strlen(*q) + 1;
5399 *q = NULL;
5401 for (gp = guest_envp, q = envp; gp;
5402 gp += sizeof(abi_ulong), q++) {
5403 if (get_user_ual(addr, gp))
5404 goto execve_efault;
5405 if (!addr)
5406 break;
5407 if (!(*q = lock_user_string(addr)))
5408 goto execve_efault;
5409 total_size += strlen(*q) + 1;
5411 *q = NULL;
5413 /* This case will not be caught by the host's execve() if its
5414 page size is bigger than the target's. */
5415 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5416 ret = -TARGET_E2BIG;
5417 goto execve_end;
5419 if (!(p = lock_user_string(arg1)))
5420 goto execve_efault;
5421 ret = get_errno(execve(p, argp, envp));
5422 unlock_user(p, arg1, 0);
5424 goto execve_end;
5426 execve_efault:
5427 ret = -TARGET_EFAULT;
5429 execve_end:
5430 for (gp = guest_argp, q = argp; *q;
5431 gp += sizeof(abi_ulong), q++) {
5432 if (get_user_ual(addr, gp)
5433 || !addr)
5434 break;
5435 unlock_user(*q, addr, 0);
5437 for (gp = guest_envp, q = envp; *q;
5438 gp += sizeof(abi_ulong), q++) {
5439 if (get_user_ual(addr, gp)
5440 || !addr)
5441 break;
5442 unlock_user(*q, addr, 0);
5445 break;
5446 case TARGET_NR_chdir:
5447 if (!(p = lock_user_string(arg1)))
5448 goto efault;
5449 ret = get_errno(chdir(p));
5450 unlock_user(p, arg1, 0);
5451 break;
5452 #ifdef TARGET_NR_time
5453 case TARGET_NR_time:
5455 time_t host_time;
5456 ret = get_errno(time(&host_time));
5457 if (!is_error(ret)
5458 && arg1
5459 && put_user_sal(host_time, arg1))
5460 goto efault;
5462 break;
5463 #endif
5464 case TARGET_NR_mknod:
5465 if (!(p = lock_user_string(arg1)))
5466 goto efault;
5467 ret = get_errno(mknod(p, arg2, arg3));
5468 unlock_user(p, arg1, 0);
5469 break;
5470 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5471 case TARGET_NR_mknodat:
5472 if (!(p = lock_user_string(arg2)))
5473 goto efault;
5474 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5475 unlock_user(p, arg2, 0);
5476 break;
5477 #endif
5478 case TARGET_NR_chmod:
5479 if (!(p = lock_user_string(arg1)))
5480 goto efault;
5481 ret = get_errno(chmod(p, arg2));
5482 unlock_user(p, arg1, 0);
5483 break;
5484 #ifdef TARGET_NR_break
5485 case TARGET_NR_break:
5486 goto unimplemented;
5487 #endif
5488 #ifdef TARGET_NR_oldstat
5489 case TARGET_NR_oldstat:
5490 goto unimplemented;
5491 #endif
5492 case TARGET_NR_lseek:
5493 ret = get_errno(lseek(arg1, arg2, arg3));
5494 break;
5495 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5496 /* Alpha specific */
5497 case TARGET_NR_getxpid:
5498 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5499 ret = get_errno(getpid());
5500 break;
5501 #endif
5502 #ifdef TARGET_NR_getpid
5503 case TARGET_NR_getpid:
5504 ret = get_errno(getpid());
5505 break;
5506 #endif
5507 case TARGET_NR_mount:
5509 /* need to look at the data field */
5510 void *p2, *p3;
5511 p = lock_user_string(arg1);
5512 p2 = lock_user_string(arg2);
5513 p3 = lock_user_string(arg3);
5514 if (!p || !p2 || !p3)
5515 ret = -TARGET_EFAULT;
5516 else {
5517 /* FIXME - arg5 should be locked, but it isn't clear how to
5518 * do that since it's not guaranteed to be a NULL-terminated
5519 * string.
5521 if ( ! arg5 )
5522 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5523 else
5524 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5526 unlock_user(p, arg1, 0);
5527 unlock_user(p2, arg2, 0);
5528 unlock_user(p3, arg3, 0);
5529 break;
5531 #ifdef TARGET_NR_umount
5532 case TARGET_NR_umount:
5533 if (!(p = lock_user_string(arg1)))
5534 goto efault;
5535 ret = get_errno(umount(p));
5536 unlock_user(p, arg1, 0);
5537 break;
5538 #endif
5539 #ifdef TARGET_NR_stime /* not on alpha */
5540 case TARGET_NR_stime:
5542 time_t host_time;
5543 if (get_user_sal(host_time, arg1))
5544 goto efault;
5545 ret = get_errno(stime(&host_time));
5547 break;
5548 #endif
5549 case TARGET_NR_ptrace:
5550 goto unimplemented;
5551 #ifdef TARGET_NR_alarm /* not on alpha */
5552 case TARGET_NR_alarm:
5553 ret = alarm(arg1);
5554 break;
5555 #endif
5556 #ifdef TARGET_NR_oldfstat
5557 case TARGET_NR_oldfstat:
5558 goto unimplemented;
5559 #endif
5560 #ifdef TARGET_NR_pause /* not on alpha */
5561 case TARGET_NR_pause:
5562 ret = get_errno(pause());
5563 break;
5564 #endif
5565 #ifdef TARGET_NR_utime
5566 case TARGET_NR_utime:
5568 struct utimbuf tbuf, *host_tbuf;
5569 struct target_utimbuf *target_tbuf;
5570 if (arg2) {
5571 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5572 goto efault;
5573 tbuf.actime = tswapal(target_tbuf->actime);
5574 tbuf.modtime = tswapal(target_tbuf->modtime);
5575 unlock_user_struct(target_tbuf, arg2, 0);
5576 host_tbuf = &tbuf;
5577 } else {
5578 host_tbuf = NULL;
5580 if (!(p = lock_user_string(arg1)))
5581 goto efault;
5582 ret = get_errno(utime(p, host_tbuf));
5583 unlock_user(p, arg1, 0);
5585 break;
5586 #endif
5587 case TARGET_NR_utimes:
5589 struct timeval *tvp, tv[2];
5590 if (arg2) {
5591 if (copy_from_user_timeval(&tv[0], arg2)
5592 || copy_from_user_timeval(&tv[1],
5593 arg2 + sizeof(struct target_timeval)))
5594 goto efault;
5595 tvp = tv;
5596 } else {
5597 tvp = NULL;
5599 if (!(p = lock_user_string(arg1)))
5600 goto efault;
5601 ret = get_errno(utimes(p, tvp));
5602 unlock_user(p, arg1, 0);
5604 break;
5605 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5606 case TARGET_NR_futimesat:
5608 struct timeval *tvp, tv[2];
5609 if (arg3) {
5610 if (copy_from_user_timeval(&tv[0], arg3)
5611 || copy_from_user_timeval(&tv[1],
5612 arg3 + sizeof(struct target_timeval)))
5613 goto efault;
5614 tvp = tv;
5615 } else {
5616 tvp = NULL;
5618 if (!(p = lock_user_string(arg2)))
5619 goto efault;
5620 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5621 unlock_user(p, arg2, 0);
5623 break;
5624 #endif
5625 #ifdef TARGET_NR_stty
5626 case TARGET_NR_stty:
5627 goto unimplemented;
5628 #endif
5629 #ifdef TARGET_NR_gtty
5630 case TARGET_NR_gtty:
5631 goto unimplemented;
5632 #endif
5633 case TARGET_NR_access:
5634 if (!(p = lock_user_string(arg1)))
5635 goto efault;
5636 ret = get_errno(access(path(p), arg2));
5637 unlock_user(p, arg1, 0);
5638 break;
5639 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5640 case TARGET_NR_faccessat:
5641 if (!(p = lock_user_string(arg2)))
5642 goto efault;
5643 ret = get_errno(sys_faccessat(arg1, p, arg3));
5644 unlock_user(p, arg2, 0);
5645 break;
5646 #endif
5647 #ifdef TARGET_NR_nice /* not on alpha */
5648 case TARGET_NR_nice:
5649 ret = get_errno(nice(arg1));
5650 break;
5651 #endif
5652 #ifdef TARGET_NR_ftime
5653 case TARGET_NR_ftime:
5654 goto unimplemented;
5655 #endif
5656 case TARGET_NR_sync:
5657 sync();
5658 ret = 0;
5659 break;
5660 case TARGET_NR_kill:
5661 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5662 break;
5663 case TARGET_NR_rename:
5665 void *p2;
5666 p = lock_user_string(arg1);
5667 p2 = lock_user_string(arg2);
5668 if (!p || !p2)
5669 ret = -TARGET_EFAULT;
5670 else
5671 ret = get_errno(rename(p, p2));
5672 unlock_user(p2, arg2, 0);
5673 unlock_user(p, arg1, 0);
5675 break;
5676 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5677 case TARGET_NR_renameat:
5679 void *p2;
5680 p = lock_user_string(arg2);
5681 p2 = lock_user_string(arg4);
5682 if (!p || !p2)
5683 ret = -TARGET_EFAULT;
5684 else
5685 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5686 unlock_user(p2, arg4, 0);
5687 unlock_user(p, arg2, 0);
5689 break;
5690 #endif
5691 case TARGET_NR_mkdir:
5692 if (!(p = lock_user_string(arg1)))
5693 goto efault;
5694 ret = get_errno(mkdir(p, arg2));
5695 unlock_user(p, arg1, 0);
5696 break;
5697 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5698 case TARGET_NR_mkdirat:
5699 if (!(p = lock_user_string(arg2)))
5700 goto efault;
5701 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5702 unlock_user(p, arg2, 0);
5703 break;
5704 #endif
5705 case TARGET_NR_rmdir:
5706 if (!(p = lock_user_string(arg1)))
5707 goto efault;
5708 ret = get_errno(rmdir(p));
5709 unlock_user(p, arg1, 0);
5710 break;
5711 case TARGET_NR_dup:
5712 ret = get_errno(dup(arg1));
5713 break;
5714 case TARGET_NR_pipe:
5715 ret = do_pipe(cpu_env, arg1, 0, 0);
5716 break;
5717 #ifdef TARGET_NR_pipe2
5718 case TARGET_NR_pipe2:
5719 ret = do_pipe(cpu_env, arg1,
5720 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5721 break;
5722 #endif
5723 case TARGET_NR_times:
5725 struct target_tms *tmsp;
5726 struct tms tms;
5727 ret = get_errno(times(&tms));
5728 if (arg1) {
5729 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5730 if (!tmsp)
5731 goto efault;
5732 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5733 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5734 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5735 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5737 if (!is_error(ret))
5738 ret = host_to_target_clock_t(ret);
5740 break;
5741 #ifdef TARGET_NR_prof
5742 case TARGET_NR_prof:
5743 goto unimplemented;
5744 #endif
5745 #ifdef TARGET_NR_signal
5746 case TARGET_NR_signal:
5747 goto unimplemented;
5748 #endif
5749 case TARGET_NR_acct:
5750 if (arg1 == 0) {
5751 ret = get_errno(acct(NULL));
5752 } else {
5753 if (!(p = lock_user_string(arg1)))
5754 goto efault;
5755 ret = get_errno(acct(path(p)));
5756 unlock_user(p, arg1, 0);
5758 break;
5759 #ifdef TARGET_NR_umount2 /* not on alpha */
5760 case TARGET_NR_umount2:
5761 if (!(p = lock_user_string(arg1)))
5762 goto efault;
5763 ret = get_errno(umount2(p, arg2));
5764 unlock_user(p, arg1, 0);
5765 break;
5766 #endif
5767 #ifdef TARGET_NR_lock
5768 case TARGET_NR_lock:
5769 goto unimplemented;
5770 #endif
5771 case TARGET_NR_ioctl:
5772 ret = do_ioctl(arg1, arg2, arg3);
5773 break;
5774 case TARGET_NR_fcntl:
5775 ret = do_fcntl(arg1, arg2, arg3);
5776 break;
5777 #ifdef TARGET_NR_mpx
5778 case TARGET_NR_mpx:
5779 goto unimplemented;
5780 #endif
5781 case TARGET_NR_setpgid:
5782 ret = get_errno(setpgid(arg1, arg2));
5783 break;
5784 #ifdef TARGET_NR_ulimit
5785 case TARGET_NR_ulimit:
5786 goto unimplemented;
5787 #endif
5788 #ifdef TARGET_NR_oldolduname
5789 case TARGET_NR_oldolduname:
5790 goto unimplemented;
5791 #endif
5792 case TARGET_NR_umask:
5793 ret = get_errno(umask(arg1));
5794 break;
5795 case TARGET_NR_chroot:
5796 if (!(p = lock_user_string(arg1)))
5797 goto efault;
5798 ret = get_errno(chroot(p));
5799 unlock_user(p, arg1, 0);
5800 break;
5801 case TARGET_NR_ustat:
5802 goto unimplemented;
5803 case TARGET_NR_dup2:
5804 ret = get_errno(dup2(arg1, arg2));
5805 break;
5806 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5807 case TARGET_NR_dup3:
5808 ret = get_errno(dup3(arg1, arg2, arg3));
5809 break;
5810 #endif
5811 #ifdef TARGET_NR_getppid /* not on alpha */
5812 case TARGET_NR_getppid:
5813 ret = get_errno(getppid());
5814 break;
5815 #endif
5816 case TARGET_NR_getpgrp:
5817 ret = get_errno(getpgrp());
5818 break;
5819 case TARGET_NR_setsid:
5820 ret = get_errno(setsid());
5821 break;
5822 #ifdef TARGET_NR_sigaction
5823 case TARGET_NR_sigaction:
5825 #if defined(TARGET_ALPHA)
5826 struct target_sigaction act, oact, *pact = 0;
5827 struct target_old_sigaction *old_act;
5828 if (arg2) {
5829 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5830 goto efault;
5831 act._sa_handler = old_act->_sa_handler;
5832 target_siginitset(&act.sa_mask, old_act->sa_mask);
5833 act.sa_flags = old_act->sa_flags;
5834 act.sa_restorer = 0;
5835 unlock_user_struct(old_act, arg2, 0);
5836 pact = &act;
5838 ret = get_errno(do_sigaction(arg1, pact, &oact));
5839 if (!is_error(ret) && arg3) {
5840 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5841 goto efault;
5842 old_act->_sa_handler = oact._sa_handler;
5843 old_act->sa_mask = oact.sa_mask.sig[0];
5844 old_act->sa_flags = oact.sa_flags;
5845 unlock_user_struct(old_act, arg3, 1);
5847 #elif defined(TARGET_MIPS)
5848 struct target_sigaction act, oact, *pact, *old_act;
5850 if (arg2) {
5851 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5852 goto efault;
5853 act._sa_handler = old_act->_sa_handler;
5854 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5855 act.sa_flags = old_act->sa_flags;
5856 unlock_user_struct(old_act, arg2, 0);
5857 pact = &act;
5858 } else {
5859 pact = NULL;
5862 ret = get_errno(do_sigaction(arg1, pact, &oact));
5864 if (!is_error(ret) && arg3) {
5865 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5866 goto efault;
5867 old_act->_sa_handler = oact._sa_handler;
5868 old_act->sa_flags = oact.sa_flags;
5869 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5870 old_act->sa_mask.sig[1] = 0;
5871 old_act->sa_mask.sig[2] = 0;
5872 old_act->sa_mask.sig[3] = 0;
5873 unlock_user_struct(old_act, arg3, 1);
5875 #else
5876 struct target_old_sigaction *old_act;
5877 struct target_sigaction act, oact, *pact;
5878 if (arg2) {
5879 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5880 goto efault;
5881 act._sa_handler = old_act->_sa_handler;
5882 target_siginitset(&act.sa_mask, old_act->sa_mask);
5883 act.sa_flags = old_act->sa_flags;
5884 act.sa_restorer = old_act->sa_restorer;
5885 unlock_user_struct(old_act, arg2, 0);
5886 pact = &act;
5887 } else {
5888 pact = NULL;
5890 ret = get_errno(do_sigaction(arg1, pact, &oact));
5891 if (!is_error(ret) && arg3) {
5892 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5893 goto efault;
5894 old_act->_sa_handler = oact._sa_handler;
5895 old_act->sa_mask = oact.sa_mask.sig[0];
5896 old_act->sa_flags = oact.sa_flags;
5897 old_act->sa_restorer = oact.sa_restorer;
5898 unlock_user_struct(old_act, arg3, 1);
5900 #endif
5902 break;
5903 #endif
5904 case TARGET_NR_rt_sigaction:
5906 #if defined(TARGET_ALPHA)
5907 struct target_sigaction act, oact, *pact = 0;
5908 struct target_rt_sigaction *rt_act;
5909 /* ??? arg4 == sizeof(sigset_t). */
5910 if (arg2) {
5911 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5912 goto efault;
5913 act._sa_handler = rt_act->_sa_handler;
5914 act.sa_mask = rt_act->sa_mask;
5915 act.sa_flags = rt_act->sa_flags;
5916 act.sa_restorer = arg5;
5917 unlock_user_struct(rt_act, arg2, 0);
5918 pact = &act;
5920 ret = get_errno(do_sigaction(arg1, pact, &oact));
5921 if (!is_error(ret) && arg3) {
5922 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5923 goto efault;
5924 rt_act->_sa_handler = oact._sa_handler;
5925 rt_act->sa_mask = oact.sa_mask;
5926 rt_act->sa_flags = oact.sa_flags;
5927 unlock_user_struct(rt_act, arg3, 1);
5929 #else
5930 struct target_sigaction *act;
5931 struct target_sigaction *oact;
5933 if (arg2) {
5934 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5935 goto efault;
5936 } else
5937 act = NULL;
5938 if (arg3) {
5939 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5940 ret = -TARGET_EFAULT;
5941 goto rt_sigaction_fail;
5943 } else
5944 oact = NULL;
5945 ret = get_errno(do_sigaction(arg1, act, oact));
5946 rt_sigaction_fail:
5947 if (act)
5948 unlock_user_struct(act, arg2, 0);
5949 if (oact)
5950 unlock_user_struct(oact, arg3, 1);
5951 #endif
5953 break;
5954 #ifdef TARGET_NR_sgetmask /* not on alpha */
5955 case TARGET_NR_sgetmask:
5957 sigset_t cur_set;
5958 abi_ulong target_set;
5959 sigprocmask(0, NULL, &cur_set);
5960 host_to_target_old_sigset(&target_set, &cur_set);
5961 ret = target_set;
5963 break;
5964 #endif
5965 #ifdef TARGET_NR_ssetmask /* not on alpha */
5966 case TARGET_NR_ssetmask:
5968 sigset_t set, oset, cur_set;
5969 abi_ulong target_set = arg1;
5970 sigprocmask(0, NULL, &cur_set);
5971 target_to_host_old_sigset(&set, &target_set);
5972 sigorset(&set, &set, &cur_set);
5973 sigprocmask(SIG_SETMASK, &set, &oset);
5974 host_to_target_old_sigset(&target_set, &oset);
5975 ret = target_set;
5977 break;
5978 #endif
5979 #ifdef TARGET_NR_sigprocmask
5980 case TARGET_NR_sigprocmask:
5982 #if defined(TARGET_ALPHA)
5983 sigset_t set, oldset;
5984 abi_ulong mask;
5985 int how;
5987 switch (arg1) {
5988 case TARGET_SIG_BLOCK:
5989 how = SIG_BLOCK;
5990 break;
5991 case TARGET_SIG_UNBLOCK:
5992 how = SIG_UNBLOCK;
5993 break;
5994 case TARGET_SIG_SETMASK:
5995 how = SIG_SETMASK;
5996 break;
5997 default:
5998 ret = -TARGET_EINVAL;
5999 goto fail;
6001 mask = arg2;
6002 target_to_host_old_sigset(&set, &mask);
6004 ret = get_errno(sigprocmask(how, &set, &oldset));
6005 if (!is_error(ret)) {
6006 host_to_target_old_sigset(&mask, &oldset);
6007 ret = mask;
6008 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6010 #else
6011 sigset_t set, oldset, *set_ptr;
6012 int how;
6014 if (arg2) {
6015 switch (arg1) {
6016 case TARGET_SIG_BLOCK:
6017 how = SIG_BLOCK;
6018 break;
6019 case TARGET_SIG_UNBLOCK:
6020 how = SIG_UNBLOCK;
6021 break;
6022 case TARGET_SIG_SETMASK:
6023 how = SIG_SETMASK;
6024 break;
6025 default:
6026 ret = -TARGET_EINVAL;
6027 goto fail;
6029 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6030 goto efault;
6031 target_to_host_old_sigset(&set, p);
6032 unlock_user(p, arg2, 0);
6033 set_ptr = &set;
6034 } else {
6035 how = 0;
6036 set_ptr = NULL;
6038 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6039 if (!is_error(ret) && arg3) {
6040 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6041 goto efault;
6042 host_to_target_old_sigset(p, &oldset);
6043 unlock_user(p, arg3, sizeof(target_sigset_t));
6045 #endif
6047 break;
6048 #endif
6049 case TARGET_NR_rt_sigprocmask:
6051 int how = arg1;
6052 sigset_t set, oldset, *set_ptr;
6054 if (arg2) {
6055 switch(how) {
6056 case TARGET_SIG_BLOCK:
6057 how = SIG_BLOCK;
6058 break;
6059 case TARGET_SIG_UNBLOCK:
6060 how = SIG_UNBLOCK;
6061 break;
6062 case TARGET_SIG_SETMASK:
6063 how = SIG_SETMASK;
6064 break;
6065 default:
6066 ret = -TARGET_EINVAL;
6067 goto fail;
6069 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6070 goto efault;
6071 target_to_host_sigset(&set, p);
6072 unlock_user(p, arg2, 0);
6073 set_ptr = &set;
6074 } else {
6075 how = 0;
6076 set_ptr = NULL;
6078 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6079 if (!is_error(ret) && arg3) {
6080 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6081 goto efault;
6082 host_to_target_sigset(p, &oldset);
6083 unlock_user(p, arg3, sizeof(target_sigset_t));
6086 break;
6087 #ifdef TARGET_NR_sigpending
6088 case TARGET_NR_sigpending:
6090 sigset_t set;
6091 ret = get_errno(sigpending(&set));
6092 if (!is_error(ret)) {
6093 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6094 goto efault;
6095 host_to_target_old_sigset(p, &set);
6096 unlock_user(p, arg1, sizeof(target_sigset_t));
6099 break;
6100 #endif
6101 case TARGET_NR_rt_sigpending:
6103 sigset_t set;
6104 ret = get_errno(sigpending(&set));
6105 if (!is_error(ret)) {
6106 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6107 goto efault;
6108 host_to_target_sigset(p, &set);
6109 unlock_user(p, arg1, sizeof(target_sigset_t));
6112 break;
6113 #ifdef TARGET_NR_sigsuspend
6114 case TARGET_NR_sigsuspend:
6116 sigset_t set;
6117 #if defined(TARGET_ALPHA)
6118 abi_ulong mask = arg1;
6119 target_to_host_old_sigset(&set, &mask);
6120 #else
6121 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6122 goto efault;
6123 target_to_host_old_sigset(&set, p);
6124 unlock_user(p, arg1, 0);
6125 #endif
6126 ret = get_errno(sigsuspend(&set));
6128 break;
6129 #endif
6130 case TARGET_NR_rt_sigsuspend:
6132 sigset_t set;
6133 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6134 goto efault;
6135 target_to_host_sigset(&set, p);
6136 unlock_user(p, arg1, 0);
6137 ret = get_errno(sigsuspend(&set));
6139 break;
6140 case TARGET_NR_rt_sigtimedwait:
6142 sigset_t set;
6143 struct timespec uts, *puts;
6144 siginfo_t uinfo;
6146 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6147 goto efault;
6148 target_to_host_sigset(&set, p);
6149 unlock_user(p, arg1, 0);
6150 if (arg3) {
6151 puts = &uts;
6152 target_to_host_timespec(puts, arg3);
6153 } else {
6154 puts = NULL;
6156 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6157 if (!is_error(ret) && arg2) {
6158 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6159 goto efault;
6160 host_to_target_siginfo(p, &uinfo);
6161 unlock_user(p, arg2, sizeof(target_siginfo_t));
6164 break;
6165 case TARGET_NR_rt_sigqueueinfo:
6167 siginfo_t uinfo;
6168 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6169 goto efault;
6170 target_to_host_siginfo(&uinfo, p);
6171 unlock_user(p, arg1, 0);
6172 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6174 break;
6175 #ifdef TARGET_NR_sigreturn
6176 case TARGET_NR_sigreturn:
6177 /* NOTE: ret is eax, so not transcoding must be done */
6178 ret = do_sigreturn(cpu_env);
6179 break;
6180 #endif
6181 case TARGET_NR_rt_sigreturn:
6182 /* NOTE: ret is eax, so not transcoding must be done */
6183 ret = do_rt_sigreturn(cpu_env);
6184 break;
6185 case TARGET_NR_sethostname:
6186 if (!(p = lock_user_string(arg1)))
6187 goto efault;
6188 ret = get_errno(sethostname(p, arg2));
6189 unlock_user(p, arg1, 0);
6190 break;
6191 case TARGET_NR_setrlimit:
6193 int resource = target_to_host_resource(arg1);
6194 struct target_rlimit *target_rlim;
6195 struct rlimit rlim;
6196 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6197 goto efault;
6198 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6199 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6200 unlock_user_struct(target_rlim, arg2, 0);
6201 ret = get_errno(setrlimit(resource, &rlim));
6203 break;
6204 case TARGET_NR_getrlimit:
6206 int resource = target_to_host_resource(arg1);
6207 struct target_rlimit *target_rlim;
6208 struct rlimit rlim;
6210 ret = get_errno(getrlimit(resource, &rlim));
6211 if (!is_error(ret)) {
6212 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6213 goto efault;
6214 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6215 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6216 unlock_user_struct(target_rlim, arg2, 1);
6219 break;
6220 case TARGET_NR_getrusage:
6222 struct rusage rusage;
6223 ret = get_errno(getrusage(arg1, &rusage));
6224 if (!is_error(ret)) {
6225 host_to_target_rusage(arg2, &rusage);
6228 break;
6229 case TARGET_NR_gettimeofday:
6231 struct timeval tv;
6232 ret = get_errno(gettimeofday(&tv, NULL));
6233 if (!is_error(ret)) {
6234 if (copy_to_user_timeval(arg1, &tv))
6235 goto efault;
6238 break;
6239 case TARGET_NR_settimeofday:
6241 struct timeval tv;
6242 if (copy_from_user_timeval(&tv, arg1))
6243 goto efault;
6244 ret = get_errno(settimeofday(&tv, NULL));
6246 break;
6247 #if defined(TARGET_NR_select)
6248 case TARGET_NR_select:
6249 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6250 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6251 #else
6253 struct target_sel_arg_struct *sel;
6254 abi_ulong inp, outp, exp, tvp;
6255 long nsel;
6257 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6258 goto efault;
6259 nsel = tswapal(sel->n);
6260 inp = tswapal(sel->inp);
6261 outp = tswapal(sel->outp);
6262 exp = tswapal(sel->exp);
6263 tvp = tswapal(sel->tvp);
6264 unlock_user_struct(sel, arg1, 0);
6265 ret = do_select(nsel, inp, outp, exp, tvp);
6267 #endif
6268 break;
6269 #endif
6270 #ifdef TARGET_NR_pselect6
6271 case TARGET_NR_pselect6:
6273 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6274 fd_set rfds, wfds, efds;
6275 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6276 struct timespec ts, *ts_ptr;
6279 * The 6th arg is actually two args smashed together,
6280 * so we cannot use the C library.
6282 sigset_t set;
6283 struct {
6284 sigset_t *set;
6285 size_t size;
6286 } sig, *sig_ptr;
6288 abi_ulong arg_sigset, arg_sigsize, *arg7;
6289 target_sigset_t *target_sigset;
6291 n = arg1;
6292 rfd_addr = arg2;
6293 wfd_addr = arg3;
6294 efd_addr = arg4;
6295 ts_addr = arg5;
6297 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6298 if (ret) {
6299 goto fail;
6301 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6302 if (ret) {
6303 goto fail;
6305 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6306 if (ret) {
6307 goto fail;
6311 * This takes a timespec, and not a timeval, so we cannot
6312 * use the do_select() helper ...
6314 if (ts_addr) {
6315 if (target_to_host_timespec(&ts, ts_addr)) {
6316 goto efault;
6318 ts_ptr = &ts;
6319 } else {
6320 ts_ptr = NULL;
6323 /* Extract the two packed args for the sigset */
6324 if (arg6) {
6325 sig_ptr = &sig;
6326 sig.size = _NSIG / 8;
6328 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6329 if (!arg7) {
6330 goto efault;
6332 arg_sigset = tswapal(arg7[0]);
6333 arg_sigsize = tswapal(arg7[1]);
6334 unlock_user(arg7, arg6, 0);
6336 if (arg_sigset) {
6337 sig.set = &set;
6338 if (arg_sigsize != sizeof(*target_sigset)) {
6339 /* Like the kernel, we enforce correct size sigsets */
6340 ret = -TARGET_EINVAL;
6341 goto fail;
6343 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6344 sizeof(*target_sigset), 1);
6345 if (!target_sigset) {
6346 goto efault;
6348 target_to_host_sigset(&set, target_sigset);
6349 unlock_user(target_sigset, arg_sigset, 0);
6350 } else {
6351 sig.set = NULL;
6353 } else {
6354 sig_ptr = NULL;
6357 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6358 ts_ptr, sig_ptr));
6360 if (!is_error(ret)) {
6361 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6362 goto efault;
6363 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6364 goto efault;
6365 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6366 goto efault;
6368 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6369 goto efault;
6372 break;
6373 #endif
6374 case TARGET_NR_symlink:
6376 void *p2;
6377 p = lock_user_string(arg1);
6378 p2 = lock_user_string(arg2);
6379 if (!p || !p2)
6380 ret = -TARGET_EFAULT;
6381 else
6382 ret = get_errno(symlink(p, p2));
6383 unlock_user(p2, arg2, 0);
6384 unlock_user(p, arg1, 0);
6386 break;
6387 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6388 case TARGET_NR_symlinkat:
6390 void *p2;
6391 p = lock_user_string(arg1);
6392 p2 = lock_user_string(arg3);
6393 if (!p || !p2)
6394 ret = -TARGET_EFAULT;
6395 else
6396 ret = get_errno(sys_symlinkat(p, arg2, p2));
6397 unlock_user(p2, arg3, 0);
6398 unlock_user(p, arg1, 0);
6400 break;
6401 #endif
6402 #ifdef TARGET_NR_oldlstat
6403 case TARGET_NR_oldlstat:
6404 goto unimplemented;
6405 #endif
6406 case TARGET_NR_readlink:
6408 void *p2, *temp;
6409 p = lock_user_string(arg1);
6410 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6411 if (!p || !p2)
6412 ret = -TARGET_EFAULT;
6413 else {
6414 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6415 char real[PATH_MAX];
6416 temp = realpath(exec_path,real);
6417 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6418 snprintf((char *)p2, arg3, "%s", real);
6420 else
6421 ret = get_errno(readlink(path(p), p2, arg3));
6423 unlock_user(p2, arg2, ret);
6424 unlock_user(p, arg1, 0);
6426 break;
6427 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6428 case TARGET_NR_readlinkat:
6430 void *p2;
6431 p = lock_user_string(arg2);
6432 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6433 if (!p || !p2)
6434 ret = -TARGET_EFAULT;
6435 else
6436 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6437 unlock_user(p2, arg3, ret);
6438 unlock_user(p, arg2, 0);
6440 break;
6441 #endif
6442 #ifdef TARGET_NR_uselib
6443 case TARGET_NR_uselib:
6444 goto unimplemented;
6445 #endif
6446 #ifdef TARGET_NR_swapon
6447 case TARGET_NR_swapon:
6448 if (!(p = lock_user_string(arg1)))
6449 goto efault;
6450 ret = get_errno(swapon(p, arg2));
6451 unlock_user(p, arg1, 0);
6452 break;
6453 #endif
6454 case TARGET_NR_reboot:
6455 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6456 /* arg4 must be ignored in all other cases */
6457 p = lock_user_string(arg4);
6458 if (!p) {
6459 goto efault;
6461 ret = get_errno(reboot(arg1, arg2, arg3, p));
6462 unlock_user(p, arg4, 0);
6463 } else {
6464 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6466 break;
6467 #ifdef TARGET_NR_readdir
6468 case TARGET_NR_readdir:
6469 goto unimplemented;
6470 #endif
6471 #ifdef TARGET_NR_mmap
6472 case TARGET_NR_mmap:
6473 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6474 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6475 || defined(TARGET_S390X)
6477 abi_ulong *v;
6478 abi_ulong v1, v2, v3, v4, v5, v6;
6479 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6480 goto efault;
6481 v1 = tswapal(v[0]);
6482 v2 = tswapal(v[1]);
6483 v3 = tswapal(v[2]);
6484 v4 = tswapal(v[3]);
6485 v5 = tswapal(v[4]);
6486 v6 = tswapal(v[5]);
6487 unlock_user(v, arg1, 0);
6488 ret = get_errno(target_mmap(v1, v2, v3,
6489 target_to_host_bitmask(v4, mmap_flags_tbl),
6490 v5, v6));
6492 #else
6493 ret = get_errno(target_mmap(arg1, arg2, arg3,
6494 target_to_host_bitmask(arg4, mmap_flags_tbl),
6495 arg5,
6496 arg6));
6497 #endif
6498 break;
6499 #endif
6500 #ifdef TARGET_NR_mmap2
6501 case TARGET_NR_mmap2:
6502 #ifndef MMAP_SHIFT
6503 #define MMAP_SHIFT 12
6504 #endif
6505 ret = get_errno(target_mmap(arg1, arg2, arg3,
6506 target_to_host_bitmask(arg4, mmap_flags_tbl),
6507 arg5,
6508 arg6 << MMAP_SHIFT));
6509 break;
6510 #endif
6511 case TARGET_NR_munmap:
6512 ret = get_errno(target_munmap(arg1, arg2));
6513 break;
6514 case TARGET_NR_mprotect:
6516 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6517 /* Special hack to detect libc making the stack executable. */
6518 if ((arg3 & PROT_GROWSDOWN)
6519 && arg1 >= ts->info->stack_limit
6520 && arg1 <= ts->info->start_stack) {
6521 arg3 &= ~PROT_GROWSDOWN;
6522 arg2 = arg2 + arg1 - ts->info->stack_limit;
6523 arg1 = ts->info->stack_limit;
6526 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6527 break;
6528 #ifdef TARGET_NR_mremap
6529 case TARGET_NR_mremap:
6530 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6531 break;
6532 #endif
6533 /* ??? msync/mlock/munlock are broken for softmmu. */
6534 #ifdef TARGET_NR_msync
6535 case TARGET_NR_msync:
6536 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6537 break;
6538 #endif
6539 #ifdef TARGET_NR_mlock
6540 case TARGET_NR_mlock:
6541 ret = get_errno(mlock(g2h(arg1), arg2));
6542 break;
6543 #endif
6544 #ifdef TARGET_NR_munlock
6545 case TARGET_NR_munlock:
6546 ret = get_errno(munlock(g2h(arg1), arg2));
6547 break;
6548 #endif
6549 #ifdef TARGET_NR_mlockall
6550 case TARGET_NR_mlockall:
6551 ret = get_errno(mlockall(arg1));
6552 break;
6553 #endif
6554 #ifdef TARGET_NR_munlockall
6555 case TARGET_NR_munlockall:
6556 ret = get_errno(munlockall());
6557 break;
6558 #endif
6559 case TARGET_NR_truncate:
6560 if (!(p = lock_user_string(arg1)))
6561 goto efault;
6562 ret = get_errno(truncate(p, arg2));
6563 unlock_user(p, arg1, 0);
6564 break;
6565 case TARGET_NR_ftruncate:
6566 ret = get_errno(ftruncate(arg1, arg2));
6567 break;
6568 case TARGET_NR_fchmod:
6569 ret = get_errno(fchmod(arg1, arg2));
6570 break;
6571 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6572 case TARGET_NR_fchmodat:
6573 if (!(p = lock_user_string(arg2)))
6574 goto efault;
6575 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6576 unlock_user(p, arg2, 0);
6577 break;
6578 #endif
6579 case TARGET_NR_getpriority:
6580 /* Note that negative values are valid for getpriority, so we must
6581 differentiate based on errno settings. */
6582 errno = 0;
6583 ret = getpriority(arg1, arg2);
6584 if (ret == -1 && errno != 0) {
6585 ret = -host_to_target_errno(errno);
6586 break;
6588 #ifdef TARGET_ALPHA
6589 /* Return value is the unbiased priority. Signal no error. */
6590 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6591 #else
6592 /* Return value is a biased priority to avoid negative numbers. */
6593 ret = 20 - ret;
6594 #endif
6595 break;
6596 case TARGET_NR_setpriority:
6597 ret = get_errno(setpriority(arg1, arg2, arg3));
6598 break;
6599 #ifdef TARGET_NR_profil
6600 case TARGET_NR_profil:
6601 goto unimplemented;
6602 #endif
6603 case TARGET_NR_statfs:
6604 if (!(p = lock_user_string(arg1)))
6605 goto efault;
6606 ret = get_errno(statfs(path(p), &stfs));
6607 unlock_user(p, arg1, 0);
6608 convert_statfs:
6609 if (!is_error(ret)) {
6610 struct target_statfs *target_stfs;
6612 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6613 goto efault;
6614 __put_user(stfs.f_type, &target_stfs->f_type);
6615 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6616 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6617 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6618 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6619 __put_user(stfs.f_files, &target_stfs->f_files);
6620 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6621 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6622 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6623 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6624 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6625 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6626 unlock_user_struct(target_stfs, arg2, 1);
6628 break;
6629 case TARGET_NR_fstatfs:
6630 ret = get_errno(fstatfs(arg1, &stfs));
6631 goto convert_statfs;
6632 #ifdef TARGET_NR_statfs64
6633 case TARGET_NR_statfs64:
6634 if (!(p = lock_user_string(arg1)))
6635 goto efault;
6636 ret = get_errno(statfs(path(p), &stfs));
6637 unlock_user(p, arg1, 0);
6638 convert_statfs64:
6639 if (!is_error(ret)) {
6640 struct target_statfs64 *target_stfs;
6642 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6643 goto efault;
6644 __put_user(stfs.f_type, &target_stfs->f_type);
6645 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6646 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6647 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6648 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6649 __put_user(stfs.f_files, &target_stfs->f_files);
6650 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6651 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6652 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6653 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6654 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6655 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6656 unlock_user_struct(target_stfs, arg3, 1);
6658 break;
6659 case TARGET_NR_fstatfs64:
6660 ret = get_errno(fstatfs(arg1, &stfs));
6661 goto convert_statfs64;
6662 #endif
6663 #ifdef TARGET_NR_ioperm
6664 case TARGET_NR_ioperm:
6665 goto unimplemented;
6666 #endif
6667 #ifdef TARGET_NR_socketcall
6668 case TARGET_NR_socketcall:
6669 ret = do_socketcall(arg1, arg2);
6670 break;
6671 #endif
6672 #ifdef TARGET_NR_accept
6673 case TARGET_NR_accept:
6674 ret = do_accept(arg1, arg2, arg3);
6675 break;
6676 #endif
6677 #ifdef TARGET_NR_bind
6678 case TARGET_NR_bind:
6679 ret = do_bind(arg1, arg2, arg3);
6680 break;
6681 #endif
6682 #ifdef TARGET_NR_connect
6683 case TARGET_NR_connect:
6684 ret = do_connect(arg1, arg2, arg3);
6685 break;
6686 #endif
6687 #ifdef TARGET_NR_getpeername
6688 case TARGET_NR_getpeername:
6689 ret = do_getpeername(arg1, arg2, arg3);
6690 break;
6691 #endif
6692 #ifdef TARGET_NR_getsockname
6693 case TARGET_NR_getsockname:
6694 ret = do_getsockname(arg1, arg2, arg3);
6695 break;
6696 #endif
6697 #ifdef TARGET_NR_getsockopt
6698 case TARGET_NR_getsockopt:
6699 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6700 break;
6701 #endif
6702 #ifdef TARGET_NR_listen
6703 case TARGET_NR_listen:
6704 ret = get_errno(listen(arg1, arg2));
6705 break;
6706 #endif
6707 #ifdef TARGET_NR_recv
6708 case TARGET_NR_recv:
6709 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6710 break;
6711 #endif
6712 #ifdef TARGET_NR_recvfrom
6713 case TARGET_NR_recvfrom:
6714 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6715 break;
6716 #endif
6717 #ifdef TARGET_NR_recvmsg
6718 case TARGET_NR_recvmsg:
6719 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6720 break;
6721 #endif
6722 #ifdef TARGET_NR_send
6723 case TARGET_NR_send:
6724 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6725 break;
6726 #endif
6727 #ifdef TARGET_NR_sendmsg
6728 case TARGET_NR_sendmsg:
6729 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6730 break;
6731 #endif
6732 #ifdef TARGET_NR_sendto
6733 case TARGET_NR_sendto:
6734 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6735 break;
6736 #endif
6737 #ifdef TARGET_NR_shutdown
6738 case TARGET_NR_shutdown:
6739 ret = get_errno(shutdown(arg1, arg2));
6740 break;
6741 #endif
6742 #ifdef TARGET_NR_socket
6743 case TARGET_NR_socket:
6744 ret = do_socket(arg1, arg2, arg3);
6745 break;
6746 #endif
6747 #ifdef TARGET_NR_socketpair
6748 case TARGET_NR_socketpair:
6749 ret = do_socketpair(arg1, arg2, arg3, arg4);
6750 break;
6751 #endif
6752 #ifdef TARGET_NR_setsockopt
6753 case TARGET_NR_setsockopt:
6754 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6755 break;
6756 #endif
6758 case TARGET_NR_syslog:
6759 if (!(p = lock_user_string(arg2)))
6760 goto efault;
6761 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6762 unlock_user(p, arg2, 0);
6763 break;
6765 case TARGET_NR_setitimer:
6767 struct itimerval value, ovalue, *pvalue;
6769 if (arg2) {
6770 pvalue = &value;
6771 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6772 || copy_from_user_timeval(&pvalue->it_value,
6773 arg2 + sizeof(struct target_timeval)))
6774 goto efault;
6775 } else {
6776 pvalue = NULL;
6778 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6779 if (!is_error(ret) && arg3) {
6780 if (copy_to_user_timeval(arg3,
6781 &ovalue.it_interval)
6782 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6783 &ovalue.it_value))
6784 goto efault;
6787 break;
6788 case TARGET_NR_getitimer:
6790 struct itimerval value;
6792 ret = get_errno(getitimer(arg1, &value));
6793 if (!is_error(ret) && arg2) {
6794 if (copy_to_user_timeval(arg2,
6795 &value.it_interval)
6796 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6797 &value.it_value))
6798 goto efault;
6801 break;
6802 case TARGET_NR_stat:
6803 if (!(p = lock_user_string(arg1)))
6804 goto efault;
6805 ret = get_errno(stat(path(p), &st));
6806 unlock_user(p, arg1, 0);
6807 goto do_stat;
6808 case TARGET_NR_lstat:
6809 if (!(p = lock_user_string(arg1)))
6810 goto efault;
6811 ret = get_errno(lstat(path(p), &st));
6812 unlock_user(p, arg1, 0);
6813 goto do_stat;
6814 case TARGET_NR_fstat:
6816 ret = get_errno(fstat(arg1, &st));
6817 do_stat:
6818 if (!is_error(ret)) {
6819 struct target_stat *target_st;
6821 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6822 goto efault;
6823 memset(target_st, 0, sizeof(*target_st));
6824 __put_user(st.st_dev, &target_st->st_dev);
6825 __put_user(st.st_ino, &target_st->st_ino);
6826 __put_user(st.st_mode, &target_st->st_mode);
6827 __put_user(st.st_uid, &target_st->st_uid);
6828 __put_user(st.st_gid, &target_st->st_gid);
6829 __put_user(st.st_nlink, &target_st->st_nlink);
6830 __put_user(st.st_rdev, &target_st->st_rdev);
6831 __put_user(st.st_size, &target_st->st_size);
6832 __put_user(st.st_blksize, &target_st->st_blksize);
6833 __put_user(st.st_blocks, &target_st->st_blocks);
6834 __put_user(st.st_atime, &target_st->target_st_atime);
6835 __put_user(st.st_mtime, &target_st->target_st_mtime);
6836 __put_user(st.st_ctime, &target_st->target_st_ctime);
6837 unlock_user_struct(target_st, arg2, 1);
6840 break;
6841 #ifdef TARGET_NR_olduname
6842 case TARGET_NR_olduname:
6843 goto unimplemented;
6844 #endif
6845 #ifdef TARGET_NR_iopl
6846 case TARGET_NR_iopl:
6847 goto unimplemented;
6848 #endif
6849 case TARGET_NR_vhangup:
6850 ret = get_errno(vhangup());
6851 break;
6852 #ifdef TARGET_NR_idle
6853 case TARGET_NR_idle:
6854 goto unimplemented;
6855 #endif
6856 #ifdef TARGET_NR_syscall
6857 case TARGET_NR_syscall:
6858 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6859 arg6, arg7, arg8, 0);
6860 break;
6861 #endif
6862 case TARGET_NR_wait4:
6864 int status;
6865 abi_long status_ptr = arg2;
6866 struct rusage rusage, *rusage_ptr;
6867 abi_ulong target_rusage = arg4;
6868 if (target_rusage)
6869 rusage_ptr = &rusage;
6870 else
6871 rusage_ptr = NULL;
6872 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6873 if (!is_error(ret)) {
6874 if (status_ptr && ret) {
6875 status = host_to_target_waitstatus(status);
6876 if (put_user_s32(status, status_ptr))
6877 goto efault;
6879 if (target_rusage)
6880 host_to_target_rusage(target_rusage, &rusage);
6883 break;
6884 #ifdef TARGET_NR_swapoff
6885 case TARGET_NR_swapoff:
6886 if (!(p = lock_user_string(arg1)))
6887 goto efault;
6888 ret = get_errno(swapoff(p));
6889 unlock_user(p, arg1, 0);
6890 break;
6891 #endif
6892 case TARGET_NR_sysinfo:
6894 struct target_sysinfo *target_value;
6895 struct sysinfo value;
6896 ret = get_errno(sysinfo(&value));
6897 if (!is_error(ret) && arg1)
6899 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6900 goto efault;
6901 __put_user(value.uptime, &target_value->uptime);
6902 __put_user(value.loads[0], &target_value->loads[0]);
6903 __put_user(value.loads[1], &target_value->loads[1]);
6904 __put_user(value.loads[2], &target_value->loads[2]);
6905 __put_user(value.totalram, &target_value->totalram);
6906 __put_user(value.freeram, &target_value->freeram);
6907 __put_user(value.sharedram, &target_value->sharedram);
6908 __put_user(value.bufferram, &target_value->bufferram);
6909 __put_user(value.totalswap, &target_value->totalswap);
6910 __put_user(value.freeswap, &target_value->freeswap);
6911 __put_user(value.procs, &target_value->procs);
6912 __put_user(value.totalhigh, &target_value->totalhigh);
6913 __put_user(value.freehigh, &target_value->freehigh);
6914 __put_user(value.mem_unit, &target_value->mem_unit);
6915 unlock_user_struct(target_value, arg1, 1);
6918 break;
6919 #ifdef TARGET_NR_ipc
6920 case TARGET_NR_ipc:
6921 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6922 break;
6923 #endif
6924 #ifdef TARGET_NR_semget
6925 case TARGET_NR_semget:
6926 ret = get_errno(semget(arg1, arg2, arg3));
6927 break;
6928 #endif
6929 #ifdef TARGET_NR_semop
6930 case TARGET_NR_semop:
6931 ret = get_errno(do_semop(arg1, arg2, arg3));
6932 break;
6933 #endif
6934 #ifdef TARGET_NR_semctl
6935 case TARGET_NR_semctl:
6936 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6937 break;
6938 #endif
6939 #ifdef TARGET_NR_msgctl
6940 case TARGET_NR_msgctl:
6941 ret = do_msgctl(arg1, arg2, arg3);
6942 break;
6943 #endif
6944 #ifdef TARGET_NR_msgget
6945 case TARGET_NR_msgget:
6946 ret = get_errno(msgget(arg1, arg2));
6947 break;
6948 #endif
6949 #ifdef TARGET_NR_msgrcv
6950 case TARGET_NR_msgrcv:
6951 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6952 break;
6953 #endif
6954 #ifdef TARGET_NR_msgsnd
6955 case TARGET_NR_msgsnd:
6956 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6957 break;
6958 #endif
6959 #ifdef TARGET_NR_shmget
6960 case TARGET_NR_shmget:
6961 ret = get_errno(shmget(arg1, arg2, arg3));
6962 break;
6963 #endif
6964 #ifdef TARGET_NR_shmctl
6965 case TARGET_NR_shmctl:
6966 ret = do_shmctl(arg1, arg2, arg3);
6967 break;
6968 #endif
6969 #ifdef TARGET_NR_shmat
6970 case TARGET_NR_shmat:
6971 ret = do_shmat(arg1, arg2, arg3);
6972 break;
6973 #endif
6974 #ifdef TARGET_NR_shmdt
6975 case TARGET_NR_shmdt:
6976 ret = do_shmdt(arg1);
6977 break;
6978 #endif
6979 case TARGET_NR_fsync:
6980 ret = get_errno(fsync(arg1));
6981 break;
6982 case TARGET_NR_clone:
6983 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6984 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6985 #elif defined(TARGET_CRIS)
6986 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6987 #elif defined(TARGET_MICROBLAZE)
6988 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6989 #elif defined(TARGET_S390X)
6990 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6991 #else
6992 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6993 #endif
6994 break;
6995 #ifdef __NR_exit_group
6996 /* new thread calls */
6997 case TARGET_NR_exit_group:
6998 #ifdef TARGET_GPROF
6999 _mcleanup();
7000 #endif
7001 gdb_exit(cpu_env, arg1);
7002 ret = get_errno(exit_group(arg1));
7003 break;
7004 #endif
7005 case TARGET_NR_setdomainname:
7006 if (!(p = lock_user_string(arg1)))
7007 goto efault;
7008 ret = get_errno(setdomainname(p, arg2));
7009 unlock_user(p, arg1, 0);
7010 break;
7011 case TARGET_NR_uname:
7012 /* no need to transcode because we use the linux syscall */
7014 struct new_utsname * buf;
7016 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7017 goto efault;
7018 ret = get_errno(sys_uname(buf));
7019 if (!is_error(ret)) {
7020 /* Overrite the native machine name with whatever is being
7021 emulated. */
7022 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7023 /* Allow the user to override the reported release. */
7024 if (qemu_uname_release && *qemu_uname_release)
7025 strcpy (buf->release, qemu_uname_release);
7027 unlock_user_struct(buf, arg1, 1);
7029 break;
7030 #ifdef TARGET_I386
7031 case TARGET_NR_modify_ldt:
7032 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7033 break;
7034 #if !defined(TARGET_X86_64)
7035 case TARGET_NR_vm86old:
7036 goto unimplemented;
7037 case TARGET_NR_vm86:
7038 ret = do_vm86(cpu_env, arg1, arg2);
7039 break;
7040 #endif
7041 #endif
7042 case TARGET_NR_adjtimex:
7043 goto unimplemented;
7044 #ifdef TARGET_NR_create_module
7045 case TARGET_NR_create_module:
7046 #endif
7047 case TARGET_NR_init_module:
7048 case TARGET_NR_delete_module:
7049 #ifdef TARGET_NR_get_kernel_syms
7050 case TARGET_NR_get_kernel_syms:
7051 #endif
7052 goto unimplemented;
7053 case TARGET_NR_quotactl:
7054 goto unimplemented;
7055 case TARGET_NR_getpgid:
7056 ret = get_errno(getpgid(arg1));
7057 break;
7058 case TARGET_NR_fchdir:
7059 ret = get_errno(fchdir(arg1));
7060 break;
7061 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7062 case TARGET_NR_bdflush:
7063 goto unimplemented;
7064 #endif
7065 #ifdef TARGET_NR_sysfs
7066 case TARGET_NR_sysfs:
7067 goto unimplemented;
7068 #endif
7069 case TARGET_NR_personality:
7070 ret = get_errno(personality(arg1));
7071 break;
7072 #ifdef TARGET_NR_afs_syscall
7073 case TARGET_NR_afs_syscall:
7074 goto unimplemented;
7075 #endif
7076 #ifdef TARGET_NR__llseek /* Not on alpha */
7077 case TARGET_NR__llseek:
7079 int64_t res;
7080 #if !defined(__NR_llseek)
7081 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7082 if (res == -1) {
7083 ret = get_errno(res);
7084 } else {
7085 ret = 0;
7087 #else
7088 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7089 #endif
7090 if ((ret == 0) && put_user_s64(res, arg4)) {
7091 goto efault;
7094 break;
7095 #endif
7096 case TARGET_NR_getdents:
7097 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7099 struct target_dirent *target_dirp;
7100 struct linux_dirent *dirp;
7101 abi_long count = arg3;
7103 dirp = malloc(count);
7104 if (!dirp) {
7105 ret = -TARGET_ENOMEM;
7106 goto fail;
7109 ret = get_errno(sys_getdents(arg1, dirp, count));
7110 if (!is_error(ret)) {
7111 struct linux_dirent *de;
7112 struct target_dirent *tde;
7113 int len = ret;
7114 int reclen, treclen;
7115 int count1, tnamelen;
7117 count1 = 0;
7118 de = dirp;
7119 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7120 goto efault;
7121 tde = target_dirp;
7122 while (len > 0) {
7123 reclen = de->d_reclen;
7124 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7125 assert(tnamelen >= 0);
7126 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7127 assert(count1 + treclen <= count);
7128 tde->d_reclen = tswap16(treclen);
7129 tde->d_ino = tswapal(de->d_ino);
7130 tde->d_off = tswapal(de->d_off);
7131 memcpy(tde->d_name, de->d_name, tnamelen);
7132 de = (struct linux_dirent *)((char *)de + reclen);
7133 len -= reclen;
7134 tde = (struct target_dirent *)((char *)tde + treclen);
7135 count1 += treclen;
7137 ret = count1;
7138 unlock_user(target_dirp, arg2, ret);
7140 free(dirp);
7142 #else
7144 struct linux_dirent *dirp;
7145 abi_long count = arg3;
7147 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7148 goto efault;
7149 ret = get_errno(sys_getdents(arg1, dirp, count));
7150 if (!is_error(ret)) {
7151 struct linux_dirent *de;
7152 int len = ret;
7153 int reclen;
7154 de = dirp;
7155 while (len > 0) {
7156 reclen = de->d_reclen;
7157 if (reclen > len)
7158 break;
7159 de->d_reclen = tswap16(reclen);
7160 tswapls(&de->d_ino);
7161 tswapls(&de->d_off);
7162 de = (struct linux_dirent *)((char *)de + reclen);
7163 len -= reclen;
7166 unlock_user(dirp, arg2, ret);
7168 #endif
7169 break;
7170 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7171 case TARGET_NR_getdents64:
7173 struct linux_dirent64 *dirp;
7174 abi_long count = arg3;
7175 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7176 goto efault;
7177 ret = get_errno(sys_getdents64(arg1, dirp, count));
7178 if (!is_error(ret)) {
7179 struct linux_dirent64 *de;
7180 int len = ret;
7181 int reclen;
7182 de = dirp;
7183 while (len > 0) {
7184 reclen = de->d_reclen;
7185 if (reclen > len)
7186 break;
7187 de->d_reclen = tswap16(reclen);
7188 tswap64s((uint64_t *)&de->d_ino);
7189 tswap64s((uint64_t *)&de->d_off);
7190 de = (struct linux_dirent64 *)((char *)de + reclen);
7191 len -= reclen;
7194 unlock_user(dirp, arg2, ret);
7196 break;
7197 #endif /* TARGET_NR_getdents64 */
7198 #if defined(TARGET_NR__newselect)
7199 case TARGET_NR__newselect:
7200 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7201 break;
7202 #endif
7203 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7204 # ifdef TARGET_NR_poll
7205 case TARGET_NR_poll:
7206 # endif
7207 # ifdef TARGET_NR_ppoll
7208 case TARGET_NR_ppoll:
7209 # endif
7211 struct target_pollfd *target_pfd;
7212 unsigned int nfds = arg2;
7213 int timeout = arg3;
7214 struct pollfd *pfd;
7215 unsigned int i;
7217 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7218 if (!target_pfd)
7219 goto efault;
7221 pfd = alloca(sizeof(struct pollfd) * nfds);
7222 for(i = 0; i < nfds; i++) {
7223 pfd[i].fd = tswap32(target_pfd[i].fd);
7224 pfd[i].events = tswap16(target_pfd[i].events);
7227 # ifdef TARGET_NR_ppoll
7228 if (num == TARGET_NR_ppoll) {
7229 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7230 target_sigset_t *target_set;
7231 sigset_t _set, *set = &_set;
7233 if (arg3) {
7234 if (target_to_host_timespec(timeout_ts, arg3)) {
7235 unlock_user(target_pfd, arg1, 0);
7236 goto efault;
7238 } else {
7239 timeout_ts = NULL;
7242 if (arg4) {
7243 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7244 if (!target_set) {
7245 unlock_user(target_pfd, arg1, 0);
7246 goto efault;
7248 target_to_host_sigset(set, target_set);
7249 } else {
7250 set = NULL;
7253 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7255 if (!is_error(ret) && arg3) {
7256 host_to_target_timespec(arg3, timeout_ts);
7258 if (arg4) {
7259 unlock_user(target_set, arg4, 0);
7261 } else
7262 # endif
7263 ret = get_errno(poll(pfd, nfds, timeout));
7265 if (!is_error(ret)) {
7266 for(i = 0; i < nfds; i++) {
7267 target_pfd[i].revents = tswap16(pfd[i].revents);
7270 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7272 break;
7273 #endif
7274 case TARGET_NR_flock:
7275 /* NOTE: the flock constant seems to be the same for every
7276 Linux platform */
7277 ret = get_errno(flock(arg1, arg2));
7278 break;
7279 case TARGET_NR_readv:
7281 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7282 if (vec != NULL) {
7283 ret = get_errno(readv(arg1, vec, arg3));
7284 unlock_iovec(vec, arg2, arg3, 1);
7285 } else {
7286 ret = -host_to_target_errno(errno);
7289 break;
7290 case TARGET_NR_writev:
7292 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7293 if (vec != NULL) {
7294 ret = get_errno(writev(arg1, vec, arg3));
7295 unlock_iovec(vec, arg2, arg3, 0);
7296 } else {
7297 ret = -host_to_target_errno(errno);
7300 break;
7301 case TARGET_NR_getsid:
7302 ret = get_errno(getsid(arg1));
7303 break;
7304 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7305 case TARGET_NR_fdatasync:
7306 ret = get_errno(fdatasync(arg1));
7307 break;
7308 #endif
7309 case TARGET_NR__sysctl:
7310 /* We don't implement this, but ENOTDIR is always a safe
7311 return value. */
7312 ret = -TARGET_ENOTDIR;
7313 break;
7314 case TARGET_NR_sched_getaffinity:
7316 unsigned int mask_size;
7317 unsigned long *mask;
7320 * sched_getaffinity needs multiples of ulong, so need to take
7321 * care of mismatches between target ulong and host ulong sizes.
7323 if (arg2 & (sizeof(abi_ulong) - 1)) {
7324 ret = -TARGET_EINVAL;
7325 break;
7327 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7329 mask = alloca(mask_size);
7330 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7332 if (!is_error(ret)) {
7333 if (copy_to_user(arg3, mask, ret)) {
7334 goto efault;
7338 break;
7339 case TARGET_NR_sched_setaffinity:
7341 unsigned int mask_size;
7342 unsigned long *mask;
7345 * sched_setaffinity needs multiples of ulong, so need to take
7346 * care of mismatches between target ulong and host ulong sizes.
7348 if (arg2 & (sizeof(abi_ulong) - 1)) {
7349 ret = -TARGET_EINVAL;
7350 break;
7352 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7354 mask = alloca(mask_size);
7355 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7356 goto efault;
7358 memcpy(mask, p, arg2);
7359 unlock_user_struct(p, arg2, 0);
7361 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7363 break;
7364 case TARGET_NR_sched_setparam:
7366 struct sched_param *target_schp;
7367 struct sched_param schp;
7369 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7370 goto efault;
7371 schp.sched_priority = tswap32(target_schp->sched_priority);
7372 unlock_user_struct(target_schp, arg2, 0);
7373 ret = get_errno(sched_setparam(arg1, &schp));
7375 break;
7376 case TARGET_NR_sched_getparam:
7378 struct sched_param *target_schp;
7379 struct sched_param schp;
7380 ret = get_errno(sched_getparam(arg1, &schp));
7381 if (!is_error(ret)) {
7382 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7383 goto efault;
7384 target_schp->sched_priority = tswap32(schp.sched_priority);
7385 unlock_user_struct(target_schp, arg2, 1);
7388 break;
7389 case TARGET_NR_sched_setscheduler:
7391 struct sched_param *target_schp;
7392 struct sched_param schp;
7393 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7394 goto efault;
7395 schp.sched_priority = tswap32(target_schp->sched_priority);
7396 unlock_user_struct(target_schp, arg3, 0);
7397 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7399 break;
7400 case TARGET_NR_sched_getscheduler:
7401 ret = get_errno(sched_getscheduler(arg1));
7402 break;
7403 case TARGET_NR_sched_yield:
7404 ret = get_errno(sched_yield());
7405 break;
7406 case TARGET_NR_sched_get_priority_max:
7407 ret = get_errno(sched_get_priority_max(arg1));
7408 break;
7409 case TARGET_NR_sched_get_priority_min:
7410 ret = get_errno(sched_get_priority_min(arg1));
7411 break;
7412 case TARGET_NR_sched_rr_get_interval:
7414 struct timespec ts;
7415 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7416 if (!is_error(ret)) {
7417 host_to_target_timespec(arg2, &ts);
7420 break;
7421 case TARGET_NR_nanosleep:
7423 struct timespec req, rem;
7424 target_to_host_timespec(&req, arg1);
7425 ret = get_errno(nanosleep(&req, &rem));
7426 if (is_error(ret) && arg2) {
7427 host_to_target_timespec(arg2, &rem);
7430 break;
7431 #ifdef TARGET_NR_query_module
7432 case TARGET_NR_query_module:
7433 goto unimplemented;
7434 #endif
7435 #ifdef TARGET_NR_nfsservctl
7436 case TARGET_NR_nfsservctl:
7437 goto unimplemented;
7438 #endif
7439 case TARGET_NR_prctl:
7440 switch (arg1) {
7441 case PR_GET_PDEATHSIG:
7443 int deathsig;
7444 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7445 if (!is_error(ret) && arg2
7446 && put_user_ual(deathsig, arg2)) {
7447 goto efault;
7449 break;
7451 #ifdef PR_GET_NAME
7452 case PR_GET_NAME:
7454 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7455 if (!name) {
7456 goto efault;
7458 ret = get_errno(prctl(arg1, (unsigned long)name,
7459 arg3, arg4, arg5));
7460 unlock_user(name, arg2, 16);
7461 break;
7463 case PR_SET_NAME:
7465 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7466 if (!name) {
7467 goto efault;
7469 ret = get_errno(prctl(arg1, (unsigned long)name,
7470 arg3, arg4, arg5));
7471 unlock_user(name, arg2, 0);
7472 break;
7474 #endif
7475 default:
7476 /* Most prctl options have no pointer arguments */
7477 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7478 break;
7480 break;
7481 #ifdef TARGET_NR_arch_prctl
7482 case TARGET_NR_arch_prctl:
7483 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7484 ret = do_arch_prctl(cpu_env, arg1, arg2);
7485 break;
7486 #else
7487 goto unimplemented;
7488 #endif
7489 #endif
7490 #ifdef TARGET_NR_pread64
7491 case TARGET_NR_pread64:
7492 if (regpairs_aligned(cpu_env)) {
7493 arg4 = arg5;
7494 arg5 = arg6;
7496 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7497 goto efault;
7498 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7499 unlock_user(p, arg2, ret);
7500 break;
7501 case TARGET_NR_pwrite64:
7502 if (regpairs_aligned(cpu_env)) {
7503 arg4 = arg5;
7504 arg5 = arg6;
7506 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7507 goto efault;
7508 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7509 unlock_user(p, arg2, 0);
7510 break;
7511 #endif
7512 case TARGET_NR_getcwd:
7513 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7514 goto efault;
7515 ret = get_errno(sys_getcwd1(p, arg2));
7516 unlock_user(p, arg1, ret);
7517 break;
7518 case TARGET_NR_capget:
7519 goto unimplemented;
7520 case TARGET_NR_capset:
7521 goto unimplemented;
7522 case TARGET_NR_sigaltstack:
7523 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7524 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7525 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7526 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7527 break;
7528 #else
7529 goto unimplemented;
7530 #endif
7531 case TARGET_NR_sendfile:
7532 goto unimplemented;
7533 #ifdef TARGET_NR_getpmsg
7534 case TARGET_NR_getpmsg:
7535 goto unimplemented;
7536 #endif
7537 #ifdef TARGET_NR_putpmsg
7538 case TARGET_NR_putpmsg:
7539 goto unimplemented;
7540 #endif
7541 #ifdef TARGET_NR_vfork
7542 case TARGET_NR_vfork:
7543 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7544 0, 0, 0, 0));
7545 break;
7546 #endif
7547 #ifdef TARGET_NR_ugetrlimit
7548 case TARGET_NR_ugetrlimit:
7550 struct rlimit rlim;
7551 int resource = target_to_host_resource(arg1);
7552 ret = get_errno(getrlimit(resource, &rlim));
7553 if (!is_error(ret)) {
7554 struct target_rlimit *target_rlim;
7555 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7556 goto efault;
7557 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7558 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7559 unlock_user_struct(target_rlim, arg2, 1);
7561 break;
7563 #endif
7564 #ifdef TARGET_NR_truncate64
7565 case TARGET_NR_truncate64:
7566 if (!(p = lock_user_string(arg1)))
7567 goto efault;
7568 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7569 unlock_user(p, arg1, 0);
7570 break;
7571 #endif
7572 #ifdef TARGET_NR_ftruncate64
7573 case TARGET_NR_ftruncate64:
7574 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7575 break;
7576 #endif
7577 #ifdef TARGET_NR_stat64
7578 case TARGET_NR_stat64:
7579 if (!(p = lock_user_string(arg1)))
7580 goto efault;
7581 ret = get_errno(stat(path(p), &st));
7582 unlock_user(p, arg1, 0);
7583 if (!is_error(ret))
7584 ret = host_to_target_stat64(cpu_env, arg2, &st);
7585 break;
7586 #endif
7587 #ifdef TARGET_NR_lstat64
7588 case TARGET_NR_lstat64:
7589 if (!(p = lock_user_string(arg1)))
7590 goto efault;
7591 ret = get_errno(lstat(path(p), &st));
7592 unlock_user(p, arg1, 0);
7593 if (!is_error(ret))
7594 ret = host_to_target_stat64(cpu_env, arg2, &st);
7595 break;
7596 #endif
7597 #ifdef TARGET_NR_fstat64
7598 case TARGET_NR_fstat64:
7599 ret = get_errno(fstat(arg1, &st));
7600 if (!is_error(ret))
7601 ret = host_to_target_stat64(cpu_env, arg2, &st);
7602 break;
7603 #endif
7604 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7605 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7606 #ifdef TARGET_NR_fstatat64
7607 case TARGET_NR_fstatat64:
7608 #endif
7609 #ifdef TARGET_NR_newfstatat
7610 case TARGET_NR_newfstatat:
7611 #endif
7612 if (!(p = lock_user_string(arg2)))
7613 goto efault;
7614 #ifdef __NR_fstatat64
7615 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7616 #else
7617 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7618 #endif
7619 if (!is_error(ret))
7620 ret = host_to_target_stat64(cpu_env, arg3, &st);
7621 break;
7622 #endif
7623 case TARGET_NR_lchown:
7624 if (!(p = lock_user_string(arg1)))
7625 goto efault;
7626 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7627 unlock_user(p, arg1, 0);
7628 break;
7629 #ifdef TARGET_NR_getuid
7630 case TARGET_NR_getuid:
7631 ret = get_errno(high2lowuid(getuid()));
7632 break;
7633 #endif
7634 #ifdef TARGET_NR_getgid
7635 case TARGET_NR_getgid:
7636 ret = get_errno(high2lowgid(getgid()));
7637 break;
7638 #endif
7639 #ifdef TARGET_NR_geteuid
7640 case TARGET_NR_geteuid:
7641 ret = get_errno(high2lowuid(geteuid()));
7642 break;
7643 #endif
7644 #ifdef TARGET_NR_getegid
7645 case TARGET_NR_getegid:
7646 ret = get_errno(high2lowgid(getegid()));
7647 break;
7648 #endif
7649 case TARGET_NR_setreuid:
7650 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7651 break;
7652 case TARGET_NR_setregid:
7653 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7654 break;
7655 case TARGET_NR_getgroups:
7657 int gidsetsize = arg1;
7658 target_id *target_grouplist;
7659 gid_t *grouplist;
7660 int i;
7662 grouplist = alloca(gidsetsize * sizeof(gid_t));
7663 ret = get_errno(getgroups(gidsetsize, grouplist));
7664 if (gidsetsize == 0)
7665 break;
7666 if (!is_error(ret)) {
7667 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7668 if (!target_grouplist)
7669 goto efault;
7670 for(i = 0;i < ret; i++)
7671 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7672 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7675 break;
7676 case TARGET_NR_setgroups:
7678 int gidsetsize = arg1;
7679 target_id *target_grouplist;
7680 gid_t *grouplist;
7681 int i;
7683 grouplist = alloca(gidsetsize * sizeof(gid_t));
7684 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7685 if (!target_grouplist) {
7686 ret = -TARGET_EFAULT;
7687 goto fail;
7689 for(i = 0;i < gidsetsize; i++)
7690 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7691 unlock_user(target_grouplist, arg2, 0);
7692 ret = get_errno(setgroups(gidsetsize, grouplist));
7694 break;
7695 case TARGET_NR_fchown:
7696 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7697 break;
7698 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7699 case TARGET_NR_fchownat:
7700 if (!(p = lock_user_string(arg2)))
7701 goto efault;
7702 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7703 unlock_user(p, arg2, 0);
7704 break;
7705 #endif
7706 #ifdef TARGET_NR_setresuid
7707 case TARGET_NR_setresuid:
7708 ret = get_errno(setresuid(low2highuid(arg1),
7709 low2highuid(arg2),
7710 low2highuid(arg3)));
7711 break;
7712 #endif
7713 #ifdef TARGET_NR_getresuid
7714 case TARGET_NR_getresuid:
7716 uid_t ruid, euid, suid;
7717 ret = get_errno(getresuid(&ruid, &euid, &suid));
7718 if (!is_error(ret)) {
7719 if (put_user_u16(high2lowuid(ruid), arg1)
7720 || put_user_u16(high2lowuid(euid), arg2)
7721 || put_user_u16(high2lowuid(suid), arg3))
7722 goto efault;
7725 break;
7726 #endif
7727 #ifdef TARGET_NR_getresgid
7728 case TARGET_NR_setresgid:
7729 ret = get_errno(setresgid(low2highgid(arg1),
7730 low2highgid(arg2),
7731 low2highgid(arg3)));
7732 break;
7733 #endif
7734 #ifdef TARGET_NR_getresgid
7735 case TARGET_NR_getresgid:
7737 gid_t rgid, egid, sgid;
7738 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7739 if (!is_error(ret)) {
7740 if (put_user_u16(high2lowgid(rgid), arg1)
7741 || put_user_u16(high2lowgid(egid), arg2)
7742 || put_user_u16(high2lowgid(sgid), arg3))
7743 goto efault;
7746 break;
7747 #endif
7748 case TARGET_NR_chown:
7749 if (!(p = lock_user_string(arg1)))
7750 goto efault;
7751 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7752 unlock_user(p, arg1, 0);
7753 break;
7754 case TARGET_NR_setuid:
7755 ret = get_errno(setuid(low2highuid(arg1)));
7756 break;
7757 case TARGET_NR_setgid:
7758 ret = get_errno(setgid(low2highgid(arg1)));
7759 break;
7760 case TARGET_NR_setfsuid:
7761 ret = get_errno(setfsuid(arg1));
7762 break;
7763 case TARGET_NR_setfsgid:
7764 ret = get_errno(setfsgid(arg1));
7765 break;
7767 #ifdef TARGET_NR_lchown32
7768 case TARGET_NR_lchown32:
7769 if (!(p = lock_user_string(arg1)))
7770 goto efault;
7771 ret = get_errno(lchown(p, arg2, arg3));
7772 unlock_user(p, arg1, 0);
7773 break;
7774 #endif
7775 #ifdef TARGET_NR_getuid32
7776 case TARGET_NR_getuid32:
7777 ret = get_errno(getuid());
7778 break;
7779 #endif
7781 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7782 /* Alpha specific */
7783 case TARGET_NR_getxuid:
7785 uid_t euid;
7786 euid=geteuid();
7787 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7789 ret = get_errno(getuid());
7790 break;
7791 #endif
7792 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7793 /* Alpha specific */
7794 case TARGET_NR_getxgid:
7796 uid_t egid;
7797 egid=getegid();
7798 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7800 ret = get_errno(getgid());
7801 break;
7802 #endif
7803 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7804 /* Alpha specific */
7805 case TARGET_NR_osf_getsysinfo:
7806 ret = -TARGET_EOPNOTSUPP;
7807 switch (arg1) {
7808 case TARGET_GSI_IEEE_FP_CONTROL:
7810 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7812 /* Copied from linux ieee_fpcr_to_swcr. */
7813 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7814 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7815 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7816 | SWCR_TRAP_ENABLE_DZE
7817 | SWCR_TRAP_ENABLE_OVF);
7818 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7819 | SWCR_TRAP_ENABLE_INE);
7820 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7821 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7823 if (put_user_u64 (swcr, arg2))
7824 goto efault;
7825 ret = 0;
7827 break;
7829 /* case GSI_IEEE_STATE_AT_SIGNAL:
7830 -- Not implemented in linux kernel.
7831 case GSI_UACPROC:
7832 -- Retrieves current unaligned access state; not much used.
7833 case GSI_PROC_TYPE:
7834 -- Retrieves implver information; surely not used.
7835 case GSI_GET_HWRPB:
7836 -- Grabs a copy of the HWRPB; surely not used.
7839 break;
7840 #endif
7841 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7842 /* Alpha specific */
7843 case TARGET_NR_osf_setsysinfo:
7844 ret = -TARGET_EOPNOTSUPP;
7845 switch (arg1) {
7846 case TARGET_SSI_IEEE_FP_CONTROL:
7848 uint64_t swcr, fpcr, orig_fpcr;
7850 if (get_user_u64 (swcr, arg2)) {
7851 goto efault;
7853 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7854 fpcr = orig_fpcr & FPCR_DYN_MASK;
7856 /* Copied from linux ieee_swcr_to_fpcr. */
7857 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7858 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7859 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7860 | SWCR_TRAP_ENABLE_DZE
7861 | SWCR_TRAP_ENABLE_OVF)) << 48;
7862 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7863 | SWCR_TRAP_ENABLE_INE)) << 57;
7864 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7865 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7867 cpu_alpha_store_fpcr(cpu_env, fpcr);
7868 ret = 0;
7870 break;
7872 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7874 uint64_t exc, fpcr, orig_fpcr;
7875 int si_code;
7877 if (get_user_u64(exc, arg2)) {
7878 goto efault;
7881 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7883 /* We only add to the exception status here. */
7884 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7886 cpu_alpha_store_fpcr(cpu_env, fpcr);
7887 ret = 0;
7889 /* Old exceptions are not signaled. */
7890 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7892 /* If any exceptions set by this call,
7893 and are unmasked, send a signal. */
7894 si_code = 0;
7895 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7896 si_code = TARGET_FPE_FLTRES;
7898 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7899 si_code = TARGET_FPE_FLTUND;
7901 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7902 si_code = TARGET_FPE_FLTOVF;
7904 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7905 si_code = TARGET_FPE_FLTDIV;
7907 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7908 si_code = TARGET_FPE_FLTINV;
7910 if (si_code != 0) {
7911 target_siginfo_t info;
7912 info.si_signo = SIGFPE;
7913 info.si_errno = 0;
7914 info.si_code = si_code;
7915 info._sifields._sigfault._addr
7916 = ((CPUArchState *)cpu_env)->pc;
7917 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7920 break;
7922 /* case SSI_NVPAIRS:
7923 -- Used with SSIN_UACPROC to enable unaligned accesses.
7924 case SSI_IEEE_STATE_AT_SIGNAL:
7925 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7926 -- Not implemented in linux kernel
7929 break;
7930 #endif
7931 #ifdef TARGET_NR_osf_sigprocmask
7932 /* Alpha specific. */
7933 case TARGET_NR_osf_sigprocmask:
7935 abi_ulong mask;
7936 int how;
7937 sigset_t set, oldset;
7939 switch(arg1) {
7940 case TARGET_SIG_BLOCK:
7941 how = SIG_BLOCK;
7942 break;
7943 case TARGET_SIG_UNBLOCK:
7944 how = SIG_UNBLOCK;
7945 break;
7946 case TARGET_SIG_SETMASK:
7947 how = SIG_SETMASK;
7948 break;
7949 default:
7950 ret = -TARGET_EINVAL;
7951 goto fail;
7953 mask = arg2;
7954 target_to_host_old_sigset(&set, &mask);
7955 sigprocmask(how, &set, &oldset);
7956 host_to_target_old_sigset(&mask, &oldset);
7957 ret = mask;
7959 break;
7960 #endif
7962 #ifdef TARGET_NR_getgid32
7963 case TARGET_NR_getgid32:
7964 ret = get_errno(getgid());
7965 break;
7966 #endif
7967 #ifdef TARGET_NR_geteuid32
7968 case TARGET_NR_geteuid32:
7969 ret = get_errno(geteuid());
7970 break;
7971 #endif
7972 #ifdef TARGET_NR_getegid32
7973 case TARGET_NR_getegid32:
7974 ret = get_errno(getegid());
7975 break;
7976 #endif
7977 #ifdef TARGET_NR_setreuid32
7978 case TARGET_NR_setreuid32:
7979 ret = get_errno(setreuid(arg1, arg2));
7980 break;
7981 #endif
7982 #ifdef TARGET_NR_setregid32
7983 case TARGET_NR_setregid32:
7984 ret = get_errno(setregid(arg1, arg2));
7985 break;
7986 #endif
7987 #ifdef TARGET_NR_getgroups32
7988 case TARGET_NR_getgroups32:
7990 int gidsetsize = arg1;
7991 uint32_t *target_grouplist;
7992 gid_t *grouplist;
7993 int i;
7995 grouplist = alloca(gidsetsize * sizeof(gid_t));
7996 ret = get_errno(getgroups(gidsetsize, grouplist));
7997 if (gidsetsize == 0)
7998 break;
7999 if (!is_error(ret)) {
8000 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8001 if (!target_grouplist) {
8002 ret = -TARGET_EFAULT;
8003 goto fail;
8005 for(i = 0;i < ret; i++)
8006 target_grouplist[i] = tswap32(grouplist[i]);
8007 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8010 break;
8011 #endif
8012 #ifdef TARGET_NR_setgroups32
8013 case TARGET_NR_setgroups32:
8015 int gidsetsize = arg1;
8016 uint32_t *target_grouplist;
8017 gid_t *grouplist;
8018 int i;
8020 grouplist = alloca(gidsetsize * sizeof(gid_t));
8021 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8022 if (!target_grouplist) {
8023 ret = -TARGET_EFAULT;
8024 goto fail;
8026 for(i = 0;i < gidsetsize; i++)
8027 grouplist[i] = tswap32(target_grouplist[i]);
8028 unlock_user(target_grouplist, arg2, 0);
8029 ret = get_errno(setgroups(gidsetsize, grouplist));
8031 break;
8032 #endif
8033 #ifdef TARGET_NR_fchown32
8034 case TARGET_NR_fchown32:
8035 ret = get_errno(fchown(arg1, arg2, arg3));
8036 break;
8037 #endif
8038 #ifdef TARGET_NR_setresuid32
8039 case TARGET_NR_setresuid32:
8040 ret = get_errno(setresuid(arg1, arg2, arg3));
8041 break;
8042 #endif
8043 #ifdef TARGET_NR_getresuid32
8044 case TARGET_NR_getresuid32:
8046 uid_t ruid, euid, suid;
8047 ret = get_errno(getresuid(&ruid, &euid, &suid));
8048 if (!is_error(ret)) {
8049 if (put_user_u32(ruid, arg1)
8050 || put_user_u32(euid, arg2)
8051 || put_user_u32(suid, arg3))
8052 goto efault;
8055 break;
8056 #endif
8057 #ifdef TARGET_NR_setresgid32
8058 case TARGET_NR_setresgid32:
8059 ret = get_errno(setresgid(arg1, arg2, arg3));
8060 break;
8061 #endif
8062 #ifdef TARGET_NR_getresgid32
8063 case TARGET_NR_getresgid32:
8065 gid_t rgid, egid, sgid;
8066 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8067 if (!is_error(ret)) {
8068 if (put_user_u32(rgid, arg1)
8069 || put_user_u32(egid, arg2)
8070 || put_user_u32(sgid, arg3))
8071 goto efault;
8074 break;
8075 #endif
8076 #ifdef TARGET_NR_chown32
8077 case TARGET_NR_chown32:
8078 if (!(p = lock_user_string(arg1)))
8079 goto efault;
8080 ret = get_errno(chown(p, arg2, arg3));
8081 unlock_user(p, arg1, 0);
8082 break;
8083 #endif
8084 #ifdef TARGET_NR_setuid32
8085 case TARGET_NR_setuid32:
8086 ret = get_errno(setuid(arg1));
8087 break;
8088 #endif
8089 #ifdef TARGET_NR_setgid32
8090 case TARGET_NR_setgid32:
8091 ret = get_errno(setgid(arg1));
8092 break;
8093 #endif
8094 #ifdef TARGET_NR_setfsuid32
8095 case TARGET_NR_setfsuid32:
8096 ret = get_errno(setfsuid(arg1));
8097 break;
8098 #endif
8099 #ifdef TARGET_NR_setfsgid32
8100 case TARGET_NR_setfsgid32:
8101 ret = get_errno(setfsgid(arg1));
8102 break;
8103 #endif
8105 case TARGET_NR_pivot_root:
8106 goto unimplemented;
8107 #ifdef TARGET_NR_mincore
8108 case TARGET_NR_mincore:
8110 void *a;
8111 ret = -TARGET_EFAULT;
8112 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8113 goto efault;
8114 if (!(p = lock_user_string(arg3)))
8115 goto mincore_fail;
8116 ret = get_errno(mincore(a, arg2, p));
8117 unlock_user(p, arg3, ret);
8118 mincore_fail:
8119 unlock_user(a, arg1, 0);
8121 break;
8122 #endif
8123 #ifdef TARGET_NR_arm_fadvise64_64
8124 case TARGET_NR_arm_fadvise64_64:
8127 * arm_fadvise64_64 looks like fadvise64_64 but
8128 * with different argument order
8130 abi_long temp;
8131 temp = arg3;
8132 arg3 = arg4;
8133 arg4 = temp;
8135 #endif
8136 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8137 #ifdef TARGET_NR_fadvise64_64
8138 case TARGET_NR_fadvise64_64:
8139 #endif
8140 #ifdef TARGET_NR_fadvise64
8141 case TARGET_NR_fadvise64:
8142 #endif
8143 #ifdef TARGET_S390X
8144 switch (arg4) {
8145 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8146 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8147 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8148 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8149 default: break;
8151 #endif
8152 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8153 break;
8154 #endif
8155 #ifdef TARGET_NR_madvise
8156 case TARGET_NR_madvise:
8157 /* A straight passthrough may not be safe because qemu sometimes
8158 turns private flie-backed mappings into anonymous mappings.
8159 This will break MADV_DONTNEED.
8160 This is a hint, so ignoring and returning success is ok. */
8161 ret = get_errno(0);
8162 break;
8163 #endif
8164 #if TARGET_ABI_BITS == 32
8165 case TARGET_NR_fcntl64:
8167 int cmd;
8168 struct flock64 fl;
8169 struct target_flock64 *target_fl;
8170 #ifdef TARGET_ARM
8171 struct target_eabi_flock64 *target_efl;
8172 #endif
8174 cmd = target_to_host_fcntl_cmd(arg2);
8175 if (cmd == -TARGET_EINVAL) {
8176 ret = cmd;
8177 break;
8180 switch(arg2) {
8181 case TARGET_F_GETLK64:
8182 #ifdef TARGET_ARM
8183 if (((CPUARMState *)cpu_env)->eabi) {
8184 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8185 goto efault;
8186 fl.l_type = tswap16(target_efl->l_type);
8187 fl.l_whence = tswap16(target_efl->l_whence);
8188 fl.l_start = tswap64(target_efl->l_start);
8189 fl.l_len = tswap64(target_efl->l_len);
8190 fl.l_pid = tswap32(target_efl->l_pid);
8191 unlock_user_struct(target_efl, arg3, 0);
8192 } else
8193 #endif
8195 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8196 goto efault;
8197 fl.l_type = tswap16(target_fl->l_type);
8198 fl.l_whence = tswap16(target_fl->l_whence);
8199 fl.l_start = tswap64(target_fl->l_start);
8200 fl.l_len = tswap64(target_fl->l_len);
8201 fl.l_pid = tswap32(target_fl->l_pid);
8202 unlock_user_struct(target_fl, arg3, 0);
8204 ret = get_errno(fcntl(arg1, cmd, &fl));
8205 if (ret == 0) {
8206 #ifdef TARGET_ARM
8207 if (((CPUARMState *)cpu_env)->eabi) {
8208 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8209 goto efault;
8210 target_efl->l_type = tswap16(fl.l_type);
8211 target_efl->l_whence = tswap16(fl.l_whence);
8212 target_efl->l_start = tswap64(fl.l_start);
8213 target_efl->l_len = tswap64(fl.l_len);
8214 target_efl->l_pid = tswap32(fl.l_pid);
8215 unlock_user_struct(target_efl, arg3, 1);
8216 } else
8217 #endif
8219 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8220 goto efault;
8221 target_fl->l_type = tswap16(fl.l_type);
8222 target_fl->l_whence = tswap16(fl.l_whence);
8223 target_fl->l_start = tswap64(fl.l_start);
8224 target_fl->l_len = tswap64(fl.l_len);
8225 target_fl->l_pid = tswap32(fl.l_pid);
8226 unlock_user_struct(target_fl, arg3, 1);
8229 break;
8231 case TARGET_F_SETLK64:
8232 case TARGET_F_SETLKW64:
8233 #ifdef TARGET_ARM
8234 if (((CPUARMState *)cpu_env)->eabi) {
8235 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8236 goto efault;
8237 fl.l_type = tswap16(target_efl->l_type);
8238 fl.l_whence = tswap16(target_efl->l_whence);
8239 fl.l_start = tswap64(target_efl->l_start);
8240 fl.l_len = tswap64(target_efl->l_len);
8241 fl.l_pid = tswap32(target_efl->l_pid);
8242 unlock_user_struct(target_efl, arg3, 0);
8243 } else
8244 #endif
8246 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8247 goto efault;
8248 fl.l_type = tswap16(target_fl->l_type);
8249 fl.l_whence = tswap16(target_fl->l_whence);
8250 fl.l_start = tswap64(target_fl->l_start);
8251 fl.l_len = tswap64(target_fl->l_len);
8252 fl.l_pid = tswap32(target_fl->l_pid);
8253 unlock_user_struct(target_fl, arg3, 0);
8255 ret = get_errno(fcntl(arg1, cmd, &fl));
8256 break;
8257 default:
8258 ret = do_fcntl(arg1, arg2, arg3);
8259 break;
8261 break;
8263 #endif
8264 #ifdef TARGET_NR_cacheflush
8265 case TARGET_NR_cacheflush:
8266 /* self-modifying code is handled automatically, so nothing needed */
8267 ret = 0;
8268 break;
8269 #endif
8270 #ifdef TARGET_NR_security
8271 case TARGET_NR_security:
8272 goto unimplemented;
8273 #endif
8274 #ifdef TARGET_NR_getpagesize
8275 case TARGET_NR_getpagesize:
8276 ret = TARGET_PAGE_SIZE;
8277 break;
8278 #endif
8279 case TARGET_NR_gettid:
8280 ret = get_errno(gettid());
8281 break;
8282 #ifdef TARGET_NR_readahead
8283 case TARGET_NR_readahead:
8284 #if TARGET_ABI_BITS == 32
8285 if (regpairs_aligned(cpu_env)) {
8286 arg2 = arg3;
8287 arg3 = arg4;
8288 arg4 = arg5;
8290 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8291 #else
8292 ret = get_errno(readahead(arg1, arg2, arg3));
8293 #endif
8294 break;
8295 #endif
8296 #ifdef CONFIG_ATTR
8297 #ifdef TARGET_NR_setxattr
8298 case TARGET_NR_listxattr:
8299 case TARGET_NR_llistxattr:
8301 void *p, *b = 0;
8302 if (arg2) {
8303 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8304 if (!b) {
8305 ret = -TARGET_EFAULT;
8306 break;
8309 p = lock_user_string(arg1);
8310 if (p) {
8311 if (num == TARGET_NR_listxattr) {
8312 ret = get_errno(listxattr(p, b, arg3));
8313 } else {
8314 ret = get_errno(llistxattr(p, b, arg3));
8316 } else {
8317 ret = -TARGET_EFAULT;
8319 unlock_user(p, arg1, 0);
8320 unlock_user(b, arg2, arg3);
8321 break;
8323 case TARGET_NR_flistxattr:
8325 void *b = 0;
8326 if (arg2) {
8327 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8328 if (!b) {
8329 ret = -TARGET_EFAULT;
8330 break;
8333 ret = get_errno(flistxattr(arg1, b, arg3));
8334 unlock_user(b, arg2, arg3);
8335 break;
8337 case TARGET_NR_setxattr:
8338 case TARGET_NR_lsetxattr:
8340 void *p, *n, *v = 0;
8341 if (arg3) {
8342 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8343 if (!v) {
8344 ret = -TARGET_EFAULT;
8345 break;
8348 p = lock_user_string(arg1);
8349 n = lock_user_string(arg2);
8350 if (p && n) {
8351 if (num == TARGET_NR_setxattr) {
8352 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8353 } else {
8354 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8356 } else {
8357 ret = -TARGET_EFAULT;
8359 unlock_user(p, arg1, 0);
8360 unlock_user(n, arg2, 0);
8361 unlock_user(v, arg3, 0);
8363 break;
8364 case TARGET_NR_fsetxattr:
8366 void *n, *v = 0;
8367 if (arg3) {
8368 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8369 if (!v) {
8370 ret = -TARGET_EFAULT;
8371 break;
8374 n = lock_user_string(arg2);
8375 if (n) {
8376 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8377 } else {
8378 ret = -TARGET_EFAULT;
8380 unlock_user(n, arg2, 0);
8381 unlock_user(v, arg3, 0);
8383 break;
8384 case TARGET_NR_getxattr:
8385 case TARGET_NR_lgetxattr:
8387 void *p, *n, *v = 0;
8388 if (arg3) {
8389 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8390 if (!v) {
8391 ret = -TARGET_EFAULT;
8392 break;
8395 p = lock_user_string(arg1);
8396 n = lock_user_string(arg2);
8397 if (p && n) {
8398 if (num == TARGET_NR_getxattr) {
8399 ret = get_errno(getxattr(p, n, v, arg4));
8400 } else {
8401 ret = get_errno(lgetxattr(p, n, v, arg4));
8403 } else {
8404 ret = -TARGET_EFAULT;
8406 unlock_user(p, arg1, 0);
8407 unlock_user(n, arg2, 0);
8408 unlock_user(v, arg3, arg4);
8410 break;
8411 case TARGET_NR_fgetxattr:
8413 void *n, *v = 0;
8414 if (arg3) {
8415 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8416 if (!v) {
8417 ret = -TARGET_EFAULT;
8418 break;
8421 n = lock_user_string(arg2);
8422 if (n) {
8423 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8424 } else {
8425 ret = -TARGET_EFAULT;
8427 unlock_user(n, arg2, 0);
8428 unlock_user(v, arg3, arg4);
8430 break;
8431 case TARGET_NR_removexattr:
8432 case TARGET_NR_lremovexattr:
8434 void *p, *n;
8435 p = lock_user_string(arg1);
8436 n = lock_user_string(arg2);
8437 if (p && n) {
8438 if (num == TARGET_NR_removexattr) {
8439 ret = get_errno(removexattr(p, n));
8440 } else {
8441 ret = get_errno(lremovexattr(p, n));
8443 } else {
8444 ret = -TARGET_EFAULT;
8446 unlock_user(p, arg1, 0);
8447 unlock_user(n, arg2, 0);
8449 break;
8450 case TARGET_NR_fremovexattr:
8452 void *n;
8453 n = lock_user_string(arg2);
8454 if (n) {
8455 ret = get_errno(fremovexattr(arg1, n));
8456 } else {
8457 ret = -TARGET_EFAULT;
8459 unlock_user(n, arg2, 0);
8461 break;
8462 #endif
8463 #endif /* CONFIG_ATTR */
8464 #ifdef TARGET_NR_set_thread_area
8465 case TARGET_NR_set_thread_area:
8466 #if defined(TARGET_MIPS)
8467 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8468 ret = 0;
8469 break;
8470 #elif defined(TARGET_CRIS)
8471 if (arg1 & 0xff)
8472 ret = -TARGET_EINVAL;
8473 else {
8474 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8475 ret = 0;
8477 break;
8478 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8479 ret = do_set_thread_area(cpu_env, arg1);
8480 break;
8481 #else
8482 goto unimplemented_nowarn;
8483 #endif
8484 #endif
8485 #ifdef TARGET_NR_get_thread_area
8486 case TARGET_NR_get_thread_area:
8487 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8488 ret = do_get_thread_area(cpu_env, arg1);
8489 #else
8490 goto unimplemented_nowarn;
8491 #endif
8492 #endif
8493 #ifdef TARGET_NR_getdomainname
8494 case TARGET_NR_getdomainname:
8495 goto unimplemented_nowarn;
8496 #endif
8498 #ifdef TARGET_NR_clock_gettime
8499 case TARGET_NR_clock_gettime:
8501 struct timespec ts;
8502 ret = get_errno(clock_gettime(arg1, &ts));
8503 if (!is_error(ret)) {
8504 host_to_target_timespec(arg2, &ts);
8506 break;
8508 #endif
8509 #ifdef TARGET_NR_clock_getres
8510 case TARGET_NR_clock_getres:
8512 struct timespec ts;
8513 ret = get_errno(clock_getres(arg1, &ts));
8514 if (!is_error(ret)) {
8515 host_to_target_timespec(arg2, &ts);
8517 break;
8519 #endif
8520 #ifdef TARGET_NR_clock_nanosleep
8521 case TARGET_NR_clock_nanosleep:
8523 struct timespec ts;
8524 target_to_host_timespec(&ts, arg3);
8525 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8526 if (arg4)
8527 host_to_target_timespec(arg4, &ts);
8528 break;
8530 #endif
8532 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8533 case TARGET_NR_set_tid_address:
8534 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8535 break;
8536 #endif
8538 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8539 case TARGET_NR_tkill:
8540 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8541 break;
8542 #endif
8544 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8545 case TARGET_NR_tgkill:
8546 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8547 target_to_host_signal(arg3)));
8548 break;
8549 #endif
8551 #ifdef TARGET_NR_set_robust_list
8552 case TARGET_NR_set_robust_list:
8553 goto unimplemented_nowarn;
8554 #endif
8556 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8557 case TARGET_NR_utimensat:
8559 struct timespec *tsp, ts[2];
8560 if (!arg3) {
8561 tsp = NULL;
8562 } else {
8563 target_to_host_timespec(ts, arg3);
8564 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8565 tsp = ts;
8567 if (!arg2)
8568 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8569 else {
8570 if (!(p = lock_user_string(arg2))) {
8571 ret = -TARGET_EFAULT;
8572 goto fail;
8574 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8575 unlock_user(p, arg2, 0);
8578 break;
8579 #endif
8580 #if defined(CONFIG_USE_NPTL)
8581 case TARGET_NR_futex:
8582 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8583 break;
8584 #endif
8585 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8586 case TARGET_NR_inotify_init:
8587 ret = get_errno(sys_inotify_init());
8588 break;
8589 #endif
8590 #ifdef CONFIG_INOTIFY1
8591 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8592 case TARGET_NR_inotify_init1:
8593 ret = get_errno(sys_inotify_init1(arg1));
8594 break;
8595 #endif
8596 #endif
8597 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8598 case TARGET_NR_inotify_add_watch:
8599 p = lock_user_string(arg2);
8600 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8601 unlock_user(p, arg2, 0);
8602 break;
8603 #endif
8604 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8605 case TARGET_NR_inotify_rm_watch:
8606 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8607 break;
8608 #endif
8610 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8611 case TARGET_NR_mq_open:
8613 struct mq_attr posix_mq_attr;
8615 p = lock_user_string(arg1 - 1);
8616 if (arg4 != 0)
8617 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8618 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8619 unlock_user (p, arg1, 0);
8621 break;
8623 case TARGET_NR_mq_unlink:
8624 p = lock_user_string(arg1 - 1);
8625 ret = get_errno(mq_unlink(p));
8626 unlock_user (p, arg1, 0);
8627 break;
8629 case TARGET_NR_mq_timedsend:
8631 struct timespec ts;
8633 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8634 if (arg5 != 0) {
8635 target_to_host_timespec(&ts, arg5);
8636 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8637 host_to_target_timespec(arg5, &ts);
8639 else
8640 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8641 unlock_user (p, arg2, arg3);
8643 break;
8645 case TARGET_NR_mq_timedreceive:
8647 struct timespec ts;
8648 unsigned int prio;
8650 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8651 if (arg5 != 0) {
8652 target_to_host_timespec(&ts, arg5);
8653 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8654 host_to_target_timespec(arg5, &ts);
8656 else
8657 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8658 unlock_user (p, arg2, arg3);
8659 if (arg4 != 0)
8660 put_user_u32(prio, arg4);
8662 break;
8664 /* Not implemented for now... */
8665 /* case TARGET_NR_mq_notify: */
8666 /* break; */
8668 case TARGET_NR_mq_getsetattr:
8670 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8671 ret = 0;
8672 if (arg3 != 0) {
8673 ret = mq_getattr(arg1, &posix_mq_attr_out);
8674 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8676 if (arg2 != 0) {
8677 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8678 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8682 break;
8683 #endif
8685 #ifdef CONFIG_SPLICE
8686 #ifdef TARGET_NR_tee
8687 case TARGET_NR_tee:
8689 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8691 break;
8692 #endif
8693 #ifdef TARGET_NR_splice
8694 case TARGET_NR_splice:
8696 loff_t loff_in, loff_out;
8697 loff_t *ploff_in = NULL, *ploff_out = NULL;
8698 if(arg2) {
8699 get_user_u64(loff_in, arg2);
8700 ploff_in = &loff_in;
8702 if(arg4) {
8703 get_user_u64(loff_out, arg2);
8704 ploff_out = &loff_out;
8706 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8708 break;
8709 #endif
8710 #ifdef TARGET_NR_vmsplice
8711 case TARGET_NR_vmsplice:
8713 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8714 if (vec != NULL) {
8715 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8716 unlock_iovec(vec, arg2, arg3, 0);
8717 } else {
8718 ret = -host_to_target_errno(errno);
8721 break;
8722 #endif
8723 #endif /* CONFIG_SPLICE */
8724 #ifdef CONFIG_EVENTFD
8725 #if defined(TARGET_NR_eventfd)
8726 case TARGET_NR_eventfd:
8727 ret = get_errno(eventfd(arg1, 0));
8728 break;
8729 #endif
8730 #if defined(TARGET_NR_eventfd2)
8731 case TARGET_NR_eventfd2:
8732 ret = get_errno(eventfd(arg1, arg2));
8733 break;
8734 #endif
8735 #endif /* CONFIG_EVENTFD */
8736 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8737 case TARGET_NR_fallocate:
8738 #if TARGET_ABI_BITS == 32
8739 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8740 target_offset64(arg5, arg6)));
8741 #else
8742 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8743 #endif
8744 break;
8745 #endif
8746 #if defined(CONFIG_SYNC_FILE_RANGE)
8747 #if defined(TARGET_NR_sync_file_range)
8748 case TARGET_NR_sync_file_range:
8749 #if TARGET_ABI_BITS == 32
8750 #if defined(TARGET_MIPS)
8751 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8752 target_offset64(arg5, arg6), arg7));
8753 #else
8754 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8755 target_offset64(arg4, arg5), arg6));
8756 #endif /* !TARGET_MIPS */
8757 #else
8758 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8759 #endif
8760 break;
8761 #endif
8762 #if defined(TARGET_NR_sync_file_range2)
8763 case TARGET_NR_sync_file_range2:
8764 /* This is like sync_file_range but the arguments are reordered */
8765 #if TARGET_ABI_BITS == 32
8766 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8767 target_offset64(arg5, arg6), arg2));
8768 #else
8769 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8770 #endif
8771 break;
8772 #endif
8773 #endif
8774 #if defined(CONFIG_EPOLL)
8775 #if defined(TARGET_NR_epoll_create)
8776 case TARGET_NR_epoll_create:
8777 ret = get_errno(epoll_create(arg1));
8778 break;
8779 #endif
8780 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8781 case TARGET_NR_epoll_create1:
8782 ret = get_errno(epoll_create1(arg1));
8783 break;
8784 #endif
8785 #if defined(TARGET_NR_epoll_ctl)
8786 case TARGET_NR_epoll_ctl:
8788 struct epoll_event ep;
8789 struct epoll_event *epp = 0;
8790 if (arg4) {
8791 struct target_epoll_event *target_ep;
8792 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8793 goto efault;
8795 ep.events = tswap32(target_ep->events);
8796 /* The epoll_data_t union is just opaque data to the kernel,
8797 * so we transfer all 64 bits across and need not worry what
8798 * actual data type it is.
8800 ep.data.u64 = tswap64(target_ep->data.u64);
8801 unlock_user_struct(target_ep, arg4, 0);
8802 epp = &ep;
8804 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8805 break;
8807 #endif
8809 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8810 #define IMPLEMENT_EPOLL_PWAIT
8811 #endif
8812 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8813 #if defined(TARGET_NR_epoll_wait)
8814 case TARGET_NR_epoll_wait:
8815 #endif
8816 #if defined(IMPLEMENT_EPOLL_PWAIT)
8817 case TARGET_NR_epoll_pwait:
8818 #endif
8820 struct target_epoll_event *target_ep;
8821 struct epoll_event *ep;
8822 int epfd = arg1;
8823 int maxevents = arg3;
8824 int timeout = arg4;
8826 target_ep = lock_user(VERIFY_WRITE, arg2,
8827 maxevents * sizeof(struct target_epoll_event), 1);
8828 if (!target_ep) {
8829 goto efault;
8832 ep = alloca(maxevents * sizeof(struct epoll_event));
8834 switch (num) {
8835 #if defined(IMPLEMENT_EPOLL_PWAIT)
8836 case TARGET_NR_epoll_pwait:
8838 target_sigset_t *target_set;
8839 sigset_t _set, *set = &_set;
8841 if (arg5) {
8842 target_set = lock_user(VERIFY_READ, arg5,
8843 sizeof(target_sigset_t), 1);
8844 if (!target_set) {
8845 unlock_user(target_ep, arg2, 0);
8846 goto efault;
8848 target_to_host_sigset(set, target_set);
8849 unlock_user(target_set, arg5, 0);
8850 } else {
8851 set = NULL;
8854 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8855 break;
8857 #endif
8858 #if defined(TARGET_NR_epoll_wait)
8859 case TARGET_NR_epoll_wait:
8860 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8861 break;
8862 #endif
8863 default:
8864 ret = -TARGET_ENOSYS;
8866 if (!is_error(ret)) {
8867 int i;
8868 for (i = 0; i < ret; i++) {
8869 target_ep[i].events = tswap32(ep[i].events);
8870 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8873 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8874 break;
8876 #endif
8877 #endif
8878 #ifdef TARGET_NR_prlimit64
8879 case TARGET_NR_prlimit64:
8881 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8882 struct target_rlimit64 *target_rnew, *target_rold;
8883 struct host_rlimit64 rnew, rold, *rnewp = 0;
8884 if (arg3) {
8885 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8886 goto efault;
8888 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8889 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8890 unlock_user_struct(target_rnew, arg3, 0);
8891 rnewp = &rnew;
8894 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8895 if (!is_error(ret) && arg4) {
8896 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8897 goto efault;
8899 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8900 target_rold->rlim_max = tswap64(rold.rlim_max);
8901 unlock_user_struct(target_rold, arg4, 1);
8903 break;
8905 #endif
8906 #ifdef TARGET_NR_gethostname
8907 case TARGET_NR_gethostname:
8909 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8910 if (name) {
8911 ret = get_errno(gethostname(name, arg2));
8912 unlock_user(name, arg1, arg2);
8913 } else {
8914 ret = -TARGET_EFAULT;
8916 break;
8918 #endif
8919 default:
8920 unimplemented:
8921 gemu_log("qemu: Unsupported syscall: %d\n", num);
8922 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8923 unimplemented_nowarn:
8924 #endif
8925 ret = -TARGET_ENOSYS;
8926 break;
8928 fail:
8929 #ifdef DEBUG
8930 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8931 #endif
8932 if(do_strace)
8933 print_syscall_ret(num, ret);
8934 return ret;
8935 efault:
8936 ret = -TARGET_EFAULT;
8937 goto fail;