linux-user/syscall.c: Enforce pselect6 sigset size restrictions
[qemu.git] / linux-user / syscall.c
blob90f6789e5f0c2be0b9b06256d68e69bb0ca7440a
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #include <linux/vt.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
98 #include "qemu.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
104 #else
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
107 #endif
109 //#define DEBUG
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
116 #undef _syscall0
117 #undef _syscall1
118 #undef _syscall2
119 #undef _syscall3
120 #undef _syscall4
121 #undef _syscall5
122 #undef _syscall6
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
155 type5,arg5) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
165 type6 arg6) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 defined(__s390x__)
202 #define __NR__llseek __NR_lseek
203 #endif
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
213 #endif
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 _syscall2(int, sys_getpriority, int, which, int, who);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #endif
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #endif
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #endif
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
250 static bitmask_transtbl fcntl_flags_tbl[] = {
251 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
252 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
253 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
254 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
255 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
256 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
257 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
258 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
259 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
260 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
261 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
262 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
263 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
266 #endif
267 { 0, 0, 0, 0 }
270 #define COPY_UTSNAME_FIELD(dest, src) \
271 do { \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
275 } while (0)
277 static int sys_uname(struct new_utsname *buf)
279 struct utsname uts_buf;
281 if (uname(&uts_buf) < 0)
282 return (-1);
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 memset(buf, 0, sizeof(*buf));
291 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
292 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
293 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
294 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
295 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
296 #ifdef _GNU_SOURCE
297 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
298 #endif
299 return (0);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf, size_t size)
306 if (getcwd(buf, size) == NULL) {
307 /* getcwd() sets errno */
308 return (-1);
310 return strlen(buf)+1;
313 #ifdef CONFIG_ATFILE
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd, const char *pathname, int mode)
322 return (faccessat(dirfd, pathname, mode, 0));
324 #endif
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
328 return (fchmodat(dirfd, pathname, mode, 0));
330 #endif
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
333 gid_t group, int flags)
335 return (fchownat(dirfd, pathname, owner, group, flags));
337 #endif
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
340 int flags)
342 return (fstatat(dirfd, pathname, buf, flags));
344 #endif
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
347 int flags)
349 return (fstatat(dirfd, pathname, buf, flags));
351 #endif
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd, const char *pathname,
354 const struct timeval times[2])
356 return (futimesat(dirfd, pathname, times));
358 #endif
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd, const char *oldpath,
361 int newdirfd, const char *newpath, int flags)
363 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
365 #endif
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
369 return (mkdirat(dirfd, pathname, mode));
371 #endif
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
374 dev_t dev)
376 return (mknodat(dirfd, pathname, mode, dev));
378 #endif
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
383 * open(2) has extra parameter 'mode' when called with
384 * flag O_CREAT.
386 if ((flags & O_CREAT) != 0) {
387 va_list ap;
388 mode_t mode;
391 * Get the 'mode' parameter and translate it to
392 * host bits.
394 va_start(ap, flags);
395 mode = va_arg(ap, mode_t);
396 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
397 va_end(ap);
399 return (openat(dirfd, pathname, flags, mode));
401 return (openat(dirfd, pathname, flags));
403 #endif
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
407 return (readlinkat(dirfd, pathname, buf, bufsiz));
409 #endif
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd, const char *oldpath,
412 int newdirfd, const char *newpath)
414 return (renameat(olddirfd, oldpath, newdirfd, newpath));
416 #endif
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
420 return (symlinkat(oldpath, newdirfd, newpath));
422 #endif
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
426 return (unlinkat(dirfd, pathname, flags));
428 #endif
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
436 #endif
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
439 #endif
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
442 uid_t,owner,gid_t,group,int,flags)
443 #endif
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
447 struct stat *,buf,int,flags)
448 #endif
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
451 const struct timeval *,times)
452 #endif
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
456 struct stat *,buf,int,flags)
457 #endif
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
460 int,newdirfd,const char *,newpath,int,flags)
461 #endif
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
464 #endif
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
467 mode_t,mode,dev_t,dev)
468 #endif
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
471 #endif
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
474 char *,buf,size_t,bufsize)
475 #endif
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
478 int,newdirfd,const char *,newpath)
479 #endif
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat,const char *,oldpath,
482 int,newdirfd,const char *,newpath)
483 #endif
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
486 #endif
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd, const char *pathname,
492 const struct timespec times[2], int flags)
494 if (pathname == NULL)
495 return futimens(dirfd, times);
496 else
497 return utimensat(dirfd, pathname, times, flags);
499 #else
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
502 const struct timespec *,tsp,int,flags)
503 #endif
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
514 #endif
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
518 return (inotify_add_watch(fd, pathname, mask));
520 #endif
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd, int32_t wd)
524 return (inotify_rm_watch(fd, wd));
526 #endif
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags)
531 return (inotify_init1(flags));
533 #endif
534 #endif
535 #else
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
544 #ifndef __NR_ppoll
545 # define __NR_ppoll -1
546 #endif
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
549 struct timespec *, timeout, const __sigset_t *, sigmask,
550 size_t, sigsetsize)
551 #endif
553 #if defined(TARGET_NR_pselect6)
554 #ifndef __NR_pselect6
555 # define __NR_pselect6 -1
556 #endif
557 #define __NR_sys_pselect6 __NR_pselect6
558 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
559 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
560 #endif
562 #if defined(TARGET_NR_prlimit64)
563 #ifndef __NR_prlimit64
564 # define __NR_prlimit64 -1
565 #endif
566 #define __NR_sys_prlimit64 __NR_prlimit64
567 /* The glibc rlimit structure may not be that used by the underlying syscall */
568 struct host_rlimit64 {
569 uint64_t rlim_cur;
570 uint64_t rlim_max;
572 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
573 const struct host_rlimit64 *, new_limit,
574 struct host_rlimit64 *, old_limit)
575 #endif
577 extern int personality(int);
578 extern int flock(int, int);
579 extern int setfsuid(int);
580 extern int setfsgid(int);
581 extern int setgroups(int, gid_t *);
583 #define ERRNO_TABLE_SIZE 1200
585 /* target_to_host_errno_table[] is initialized from
586 * host_to_target_errno_table[] in syscall_init(). */
587 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
591 * This list is the union of errno values overridden in asm-<arch>/errno.h
592 * minus the errnos that are not actually generic to all archs.
594 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
595 [EIDRM] = TARGET_EIDRM,
596 [ECHRNG] = TARGET_ECHRNG,
597 [EL2NSYNC] = TARGET_EL2NSYNC,
598 [EL3HLT] = TARGET_EL3HLT,
599 [EL3RST] = TARGET_EL3RST,
600 [ELNRNG] = TARGET_ELNRNG,
601 [EUNATCH] = TARGET_EUNATCH,
602 [ENOCSI] = TARGET_ENOCSI,
603 [EL2HLT] = TARGET_EL2HLT,
604 [EDEADLK] = TARGET_EDEADLK,
605 [ENOLCK] = TARGET_ENOLCK,
606 [EBADE] = TARGET_EBADE,
607 [EBADR] = TARGET_EBADR,
608 [EXFULL] = TARGET_EXFULL,
609 [ENOANO] = TARGET_ENOANO,
610 [EBADRQC] = TARGET_EBADRQC,
611 [EBADSLT] = TARGET_EBADSLT,
612 [EBFONT] = TARGET_EBFONT,
613 [ENOSTR] = TARGET_ENOSTR,
614 [ENODATA] = TARGET_ENODATA,
615 [ETIME] = TARGET_ETIME,
616 [ENOSR] = TARGET_ENOSR,
617 [ENONET] = TARGET_ENONET,
618 [ENOPKG] = TARGET_ENOPKG,
619 [EREMOTE] = TARGET_EREMOTE,
620 [ENOLINK] = TARGET_ENOLINK,
621 [EADV] = TARGET_EADV,
622 [ESRMNT] = TARGET_ESRMNT,
623 [ECOMM] = TARGET_ECOMM,
624 [EPROTO] = TARGET_EPROTO,
625 [EDOTDOT] = TARGET_EDOTDOT,
626 [EMULTIHOP] = TARGET_EMULTIHOP,
627 [EBADMSG] = TARGET_EBADMSG,
628 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
629 [EOVERFLOW] = TARGET_EOVERFLOW,
630 [ENOTUNIQ] = TARGET_ENOTUNIQ,
631 [EBADFD] = TARGET_EBADFD,
632 [EREMCHG] = TARGET_EREMCHG,
633 [ELIBACC] = TARGET_ELIBACC,
634 [ELIBBAD] = TARGET_ELIBBAD,
635 [ELIBSCN] = TARGET_ELIBSCN,
636 [ELIBMAX] = TARGET_ELIBMAX,
637 [ELIBEXEC] = TARGET_ELIBEXEC,
638 [EILSEQ] = TARGET_EILSEQ,
639 [ENOSYS] = TARGET_ENOSYS,
640 [ELOOP] = TARGET_ELOOP,
641 [ERESTART] = TARGET_ERESTART,
642 [ESTRPIPE] = TARGET_ESTRPIPE,
643 [ENOTEMPTY] = TARGET_ENOTEMPTY,
644 [EUSERS] = TARGET_EUSERS,
645 [ENOTSOCK] = TARGET_ENOTSOCK,
646 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
647 [EMSGSIZE] = TARGET_EMSGSIZE,
648 [EPROTOTYPE] = TARGET_EPROTOTYPE,
649 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
650 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
651 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
652 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
653 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
654 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
655 [EADDRINUSE] = TARGET_EADDRINUSE,
656 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
657 [ENETDOWN] = TARGET_ENETDOWN,
658 [ENETUNREACH] = TARGET_ENETUNREACH,
659 [ENETRESET] = TARGET_ENETRESET,
660 [ECONNABORTED] = TARGET_ECONNABORTED,
661 [ECONNRESET] = TARGET_ECONNRESET,
662 [ENOBUFS] = TARGET_ENOBUFS,
663 [EISCONN] = TARGET_EISCONN,
664 [ENOTCONN] = TARGET_ENOTCONN,
665 [EUCLEAN] = TARGET_EUCLEAN,
666 [ENOTNAM] = TARGET_ENOTNAM,
667 [ENAVAIL] = TARGET_ENAVAIL,
668 [EISNAM] = TARGET_EISNAM,
669 [EREMOTEIO] = TARGET_EREMOTEIO,
670 [ESHUTDOWN] = TARGET_ESHUTDOWN,
671 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
672 [ETIMEDOUT] = TARGET_ETIMEDOUT,
673 [ECONNREFUSED] = TARGET_ECONNREFUSED,
674 [EHOSTDOWN] = TARGET_EHOSTDOWN,
675 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
676 [EALREADY] = TARGET_EALREADY,
677 [EINPROGRESS] = TARGET_EINPROGRESS,
678 [ESTALE] = TARGET_ESTALE,
679 [ECANCELED] = TARGET_ECANCELED,
680 [ENOMEDIUM] = TARGET_ENOMEDIUM,
681 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
682 #ifdef ENOKEY
683 [ENOKEY] = TARGET_ENOKEY,
684 #endif
685 #ifdef EKEYEXPIRED
686 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
687 #endif
688 #ifdef EKEYREVOKED
689 [EKEYREVOKED] = TARGET_EKEYREVOKED,
690 #endif
691 #ifdef EKEYREJECTED
692 [EKEYREJECTED] = TARGET_EKEYREJECTED,
693 #endif
694 #ifdef EOWNERDEAD
695 [EOWNERDEAD] = TARGET_EOWNERDEAD,
696 #endif
697 #ifdef ENOTRECOVERABLE
698 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
699 #endif
702 static inline int host_to_target_errno(int err)
704 if(host_to_target_errno_table[err])
705 return host_to_target_errno_table[err];
706 return err;
709 static inline int target_to_host_errno(int err)
711 if (target_to_host_errno_table[err])
712 return target_to_host_errno_table[err];
713 return err;
716 static inline abi_long get_errno(abi_long ret)
718 if (ret == -1)
719 return -host_to_target_errno(errno);
720 else
721 return ret;
724 static inline int is_error(abi_long ret)
726 return (abi_ulong)ret >= (abi_ulong)(-4096);
729 char *target_strerror(int err)
731 return strerror(target_to_host_errno(err));
734 static abi_ulong target_brk;
735 static abi_ulong target_original_brk;
736 static abi_ulong brk_page;
738 void target_set_brk(abi_ulong new_brk)
740 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
741 brk_page = HOST_PAGE_ALIGN(target_brk);
744 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
745 #define DEBUGF_BRK(message, args...)
747 /* do_brk() must return target values and target errnos. */
748 abi_long do_brk(abi_ulong new_brk)
750 abi_long mapped_addr;
751 int new_alloc_size;
753 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
755 if (!new_brk) {
756 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
757 return target_brk;
759 if (new_brk < target_original_brk) {
760 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
761 return target_brk;
764 /* If the new brk is less than the highest page reserved to the
765 * target heap allocation, set it and we're almost done... */
766 if (new_brk <= brk_page) {
767 /* Heap contents are initialized to zero, as for anonymous
768 * mapped pages. */
769 if (new_brk > target_brk) {
770 memset(g2h(target_brk), 0, new_brk - target_brk);
772 target_brk = new_brk;
773 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
774 return target_brk;
777 /* We need to allocate more memory after the brk... Note that
778 * we don't use MAP_FIXED because that will map over the top of
779 * any existing mapping (like the one with the host libc or qemu
780 * itself); instead we treat "mapped but at wrong address" as
781 * a failure and unmap again.
783 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
784 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
785 PROT_READ|PROT_WRITE,
786 MAP_ANON|MAP_PRIVATE, 0, 0));
788 if (mapped_addr == brk_page) {
789 target_brk = new_brk;
790 brk_page = HOST_PAGE_ALIGN(target_brk);
791 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
792 return target_brk;
793 } else if (mapped_addr != -1) {
794 /* Mapped but at wrong address, meaning there wasn't actually
795 * enough space for this brk.
797 target_munmap(mapped_addr, new_alloc_size);
798 mapped_addr = -1;
799 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
801 else {
802 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
805 #if defined(TARGET_ALPHA)
806 /* We (partially) emulate OSF/1 on Alpha, which requires we
807 return a proper errno, not an unchanged brk value. */
808 return -TARGET_ENOMEM;
809 #endif
810 /* For everything else, return the previous break. */
811 return target_brk;
814 static inline abi_long copy_from_user_fdset(fd_set *fds,
815 abi_ulong target_fds_addr,
816 int n)
818 int i, nw, j, k;
819 abi_ulong b, *target_fds;
821 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
822 if (!(target_fds = lock_user(VERIFY_READ,
823 target_fds_addr,
824 sizeof(abi_ulong) * nw,
825 1)))
826 return -TARGET_EFAULT;
828 FD_ZERO(fds);
829 k = 0;
830 for (i = 0; i < nw; i++) {
831 /* grab the abi_ulong */
832 __get_user(b, &target_fds[i]);
833 for (j = 0; j < TARGET_ABI_BITS; j++) {
834 /* check the bit inside the abi_ulong */
835 if ((b >> j) & 1)
836 FD_SET(k, fds);
837 k++;
841 unlock_user(target_fds, target_fds_addr, 0);
843 return 0;
846 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
847 abi_ulong target_fds_addr,
848 int n)
850 if (target_fds_addr) {
851 if (copy_from_user_fdset(fds, target_fds_addr, n))
852 return -TARGET_EFAULT;
853 *fds_ptr = fds;
854 } else {
855 *fds_ptr = NULL;
857 return 0;
860 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
861 const fd_set *fds,
862 int n)
864 int i, nw, j, k;
865 abi_long v;
866 abi_ulong *target_fds;
868 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
869 if (!(target_fds = lock_user(VERIFY_WRITE,
870 target_fds_addr,
871 sizeof(abi_ulong) * nw,
872 0)))
873 return -TARGET_EFAULT;
875 k = 0;
876 for (i = 0; i < nw; i++) {
877 v = 0;
878 for (j = 0; j < TARGET_ABI_BITS; j++) {
879 v |= ((FD_ISSET(k, fds) != 0) << j);
880 k++;
882 __put_user(v, &target_fds[i]);
885 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
887 return 0;
890 #if defined(__alpha__)
891 #define HOST_HZ 1024
892 #else
893 #define HOST_HZ 100
894 #endif
896 static inline abi_long host_to_target_clock_t(long ticks)
898 #if HOST_HZ == TARGET_HZ
899 return ticks;
900 #else
901 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
902 #endif
905 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
906 const struct rusage *rusage)
908 struct target_rusage *target_rusage;
910 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
911 return -TARGET_EFAULT;
912 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
913 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
914 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
915 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
916 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
917 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
918 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
919 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
920 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
921 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
922 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
923 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
924 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
925 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
926 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
927 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
928 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
929 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
930 unlock_user_struct(target_rusage, target_addr, 1);
932 return 0;
935 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
937 if (target_rlim == TARGET_RLIM_INFINITY)
938 return RLIM_INFINITY;
939 else
940 return tswapl(target_rlim);
943 static inline target_ulong host_to_target_rlim(rlim_t rlim)
945 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
946 return TARGET_RLIM_INFINITY;
947 else
948 return tswapl(rlim);
951 static inline abi_long copy_from_user_timeval(struct timeval *tv,
952 abi_ulong target_tv_addr)
954 struct target_timeval *target_tv;
956 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
957 return -TARGET_EFAULT;
959 __get_user(tv->tv_sec, &target_tv->tv_sec);
960 __get_user(tv->tv_usec, &target_tv->tv_usec);
962 unlock_user_struct(target_tv, target_tv_addr, 0);
964 return 0;
967 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
968 const struct timeval *tv)
970 struct target_timeval *target_tv;
972 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
973 return -TARGET_EFAULT;
975 __put_user(tv->tv_sec, &target_tv->tv_sec);
976 __put_user(tv->tv_usec, &target_tv->tv_usec);
978 unlock_user_struct(target_tv, target_tv_addr, 1);
980 return 0;
983 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
984 #include <mqueue.h>
986 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
987 abi_ulong target_mq_attr_addr)
989 struct target_mq_attr *target_mq_attr;
991 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
992 target_mq_attr_addr, 1))
993 return -TARGET_EFAULT;
995 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
996 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
997 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
998 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1000 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1002 return 0;
1005 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1006 const struct mq_attr *attr)
1008 struct target_mq_attr *target_mq_attr;
1010 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1011 target_mq_attr_addr, 0))
1012 return -TARGET_EFAULT;
1014 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1015 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1016 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1017 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1019 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1021 return 0;
1023 #endif
1025 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1026 /* do_select() must return target values and target errnos. */
1027 static abi_long do_select(int n,
1028 abi_ulong rfd_addr, abi_ulong wfd_addr,
1029 abi_ulong efd_addr, abi_ulong target_tv_addr)
1031 fd_set rfds, wfds, efds;
1032 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1033 struct timeval tv, *tv_ptr;
1034 abi_long ret;
1036 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1037 if (ret) {
1038 return ret;
1040 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1041 if (ret) {
1042 return ret;
1044 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1045 if (ret) {
1046 return ret;
1049 if (target_tv_addr) {
1050 if (copy_from_user_timeval(&tv, target_tv_addr))
1051 return -TARGET_EFAULT;
1052 tv_ptr = &tv;
1053 } else {
1054 tv_ptr = NULL;
1057 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1059 if (!is_error(ret)) {
1060 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1061 return -TARGET_EFAULT;
1062 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1063 return -TARGET_EFAULT;
1064 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1065 return -TARGET_EFAULT;
1067 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1068 return -TARGET_EFAULT;
1071 return ret;
1073 #endif
1075 static abi_long do_pipe2(int host_pipe[], int flags)
1077 #ifdef CONFIG_PIPE2
1078 return pipe2(host_pipe, flags);
1079 #else
1080 return -ENOSYS;
1081 #endif
1084 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1085 int flags, int is_pipe2)
1087 int host_pipe[2];
1088 abi_long ret;
1089 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1091 if (is_error(ret))
1092 return get_errno(ret);
1094 /* Several targets have special calling conventions for the original
1095 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1096 if (!is_pipe2) {
1097 #if defined(TARGET_ALPHA)
1098 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1099 return host_pipe[0];
1100 #elif defined(TARGET_MIPS)
1101 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1102 return host_pipe[0];
1103 #elif defined(TARGET_SH4)
1104 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1105 return host_pipe[0];
1106 #endif
1109 if (put_user_s32(host_pipe[0], pipedes)
1110 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1111 return -TARGET_EFAULT;
1112 return get_errno(ret);
1115 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1116 abi_ulong target_addr,
1117 socklen_t len)
1119 struct target_ip_mreqn *target_smreqn;
1121 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1122 if (!target_smreqn)
1123 return -TARGET_EFAULT;
1124 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1125 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1126 if (len == sizeof(struct target_ip_mreqn))
1127 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1128 unlock_user(target_smreqn, target_addr, 0);
1130 return 0;
1133 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1134 abi_ulong target_addr,
1135 socklen_t len)
1137 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1138 sa_family_t sa_family;
1139 struct target_sockaddr *target_saddr;
1141 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1142 if (!target_saddr)
1143 return -TARGET_EFAULT;
1145 sa_family = tswap16(target_saddr->sa_family);
1147 /* Oops. The caller might send a incomplete sun_path; sun_path
1148 * must be terminated by \0 (see the manual page), but
1149 * unfortunately it is quite common to specify sockaddr_un
1150 * length as "strlen(x->sun_path)" while it should be
1151 * "strlen(...) + 1". We'll fix that here if needed.
1152 * Linux kernel has a similar feature.
1155 if (sa_family == AF_UNIX) {
1156 if (len < unix_maxlen && len > 0) {
1157 char *cp = (char*)target_saddr;
1159 if ( cp[len-1] && !cp[len] )
1160 len++;
1162 if (len > unix_maxlen)
1163 len = unix_maxlen;
1166 memcpy(addr, target_saddr, len);
1167 addr->sa_family = sa_family;
1168 unlock_user(target_saddr, target_addr, 0);
1170 return 0;
1173 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1174 struct sockaddr *addr,
1175 socklen_t len)
1177 struct target_sockaddr *target_saddr;
1179 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1180 if (!target_saddr)
1181 return -TARGET_EFAULT;
1182 memcpy(target_saddr, addr, len);
1183 target_saddr->sa_family = tswap16(addr->sa_family);
1184 unlock_user(target_saddr, target_addr, len);
1186 return 0;
1189 /* ??? Should this also swap msgh->name? */
1190 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1191 struct target_msghdr *target_msgh)
1193 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1194 abi_long msg_controllen;
1195 abi_ulong target_cmsg_addr;
1196 struct target_cmsghdr *target_cmsg;
1197 socklen_t space = 0;
1199 msg_controllen = tswapl(target_msgh->msg_controllen);
1200 if (msg_controllen < sizeof (struct target_cmsghdr))
1201 goto the_end;
1202 target_cmsg_addr = tswapl(target_msgh->msg_control);
1203 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1204 if (!target_cmsg)
1205 return -TARGET_EFAULT;
1207 while (cmsg && target_cmsg) {
1208 void *data = CMSG_DATA(cmsg);
1209 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1211 int len = tswapl(target_cmsg->cmsg_len)
1212 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1214 space += CMSG_SPACE(len);
1215 if (space > msgh->msg_controllen) {
1216 space -= CMSG_SPACE(len);
1217 gemu_log("Host cmsg overflow\n");
1218 break;
1221 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1222 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1223 cmsg->cmsg_len = CMSG_LEN(len);
1225 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1226 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1227 memcpy(data, target_data, len);
1228 } else {
1229 int *fd = (int *)data;
1230 int *target_fd = (int *)target_data;
1231 int i, numfds = len / sizeof(int);
1233 for (i = 0; i < numfds; i++)
1234 fd[i] = tswap32(target_fd[i]);
1237 cmsg = CMSG_NXTHDR(msgh, cmsg);
1238 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1240 unlock_user(target_cmsg, target_cmsg_addr, 0);
1241 the_end:
1242 msgh->msg_controllen = space;
1243 return 0;
1246 /* ??? Should this also swap msgh->name? */
1247 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1248 struct msghdr *msgh)
1250 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1251 abi_long msg_controllen;
1252 abi_ulong target_cmsg_addr;
1253 struct target_cmsghdr *target_cmsg;
1254 socklen_t space = 0;
1256 msg_controllen = tswapl(target_msgh->msg_controllen);
1257 if (msg_controllen < sizeof (struct target_cmsghdr))
1258 goto the_end;
1259 target_cmsg_addr = tswapl(target_msgh->msg_control);
1260 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1261 if (!target_cmsg)
1262 return -TARGET_EFAULT;
1264 while (cmsg && target_cmsg) {
1265 void *data = CMSG_DATA(cmsg);
1266 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1268 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1270 space += TARGET_CMSG_SPACE(len);
1271 if (space > msg_controllen) {
1272 space -= TARGET_CMSG_SPACE(len);
1273 gemu_log("Target cmsg overflow\n");
1274 break;
1277 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1278 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1279 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1281 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1282 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1283 memcpy(target_data, data, len);
1284 } else {
1285 int *fd = (int *)data;
1286 int *target_fd = (int *)target_data;
1287 int i, numfds = len / sizeof(int);
1289 for (i = 0; i < numfds; i++)
1290 target_fd[i] = tswap32(fd[i]);
1293 cmsg = CMSG_NXTHDR(msgh, cmsg);
1294 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1296 unlock_user(target_cmsg, target_cmsg_addr, space);
1297 the_end:
1298 target_msgh->msg_controllen = tswapl(space);
1299 return 0;
1302 /* do_setsockopt() Must return target values and target errnos. */
1303 static abi_long do_setsockopt(int sockfd, int level, int optname,
1304 abi_ulong optval_addr, socklen_t optlen)
1306 abi_long ret;
1307 int val;
1308 struct ip_mreqn *ip_mreq;
1309 struct ip_mreq_source *ip_mreq_source;
1311 switch(level) {
1312 case SOL_TCP:
1313 /* TCP options all take an 'int' value. */
1314 if (optlen < sizeof(uint32_t))
1315 return -TARGET_EINVAL;
1317 if (get_user_u32(val, optval_addr))
1318 return -TARGET_EFAULT;
1319 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1320 break;
1321 case SOL_IP:
1322 switch(optname) {
1323 case IP_TOS:
1324 case IP_TTL:
1325 case IP_HDRINCL:
1326 case IP_ROUTER_ALERT:
1327 case IP_RECVOPTS:
1328 case IP_RETOPTS:
1329 case IP_PKTINFO:
1330 case IP_MTU_DISCOVER:
1331 case IP_RECVERR:
1332 case IP_RECVTOS:
1333 #ifdef IP_FREEBIND
1334 case IP_FREEBIND:
1335 #endif
1336 case IP_MULTICAST_TTL:
1337 case IP_MULTICAST_LOOP:
1338 val = 0;
1339 if (optlen >= sizeof(uint32_t)) {
1340 if (get_user_u32(val, optval_addr))
1341 return -TARGET_EFAULT;
1342 } else if (optlen >= 1) {
1343 if (get_user_u8(val, optval_addr))
1344 return -TARGET_EFAULT;
1346 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1347 break;
1348 case IP_ADD_MEMBERSHIP:
1349 case IP_DROP_MEMBERSHIP:
1350 if (optlen < sizeof (struct target_ip_mreq) ||
1351 optlen > sizeof (struct target_ip_mreqn))
1352 return -TARGET_EINVAL;
1354 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1355 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1356 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1357 break;
1359 case IP_BLOCK_SOURCE:
1360 case IP_UNBLOCK_SOURCE:
1361 case IP_ADD_SOURCE_MEMBERSHIP:
1362 case IP_DROP_SOURCE_MEMBERSHIP:
1363 if (optlen != sizeof (struct target_ip_mreq_source))
1364 return -TARGET_EINVAL;
1366 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1367 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1368 unlock_user (ip_mreq_source, optval_addr, 0);
1369 break;
1371 default:
1372 goto unimplemented;
1374 break;
1375 case TARGET_SOL_SOCKET:
1376 switch (optname) {
1377 /* Options with 'int' argument. */
1378 case TARGET_SO_DEBUG:
1379 optname = SO_DEBUG;
1380 break;
1381 case TARGET_SO_REUSEADDR:
1382 optname = SO_REUSEADDR;
1383 break;
1384 case TARGET_SO_TYPE:
1385 optname = SO_TYPE;
1386 break;
1387 case TARGET_SO_ERROR:
1388 optname = SO_ERROR;
1389 break;
1390 case TARGET_SO_DONTROUTE:
1391 optname = SO_DONTROUTE;
1392 break;
1393 case TARGET_SO_BROADCAST:
1394 optname = SO_BROADCAST;
1395 break;
1396 case TARGET_SO_SNDBUF:
1397 optname = SO_SNDBUF;
1398 break;
1399 case TARGET_SO_RCVBUF:
1400 optname = SO_RCVBUF;
1401 break;
1402 case TARGET_SO_KEEPALIVE:
1403 optname = SO_KEEPALIVE;
1404 break;
1405 case TARGET_SO_OOBINLINE:
1406 optname = SO_OOBINLINE;
1407 break;
1408 case TARGET_SO_NO_CHECK:
1409 optname = SO_NO_CHECK;
1410 break;
1411 case TARGET_SO_PRIORITY:
1412 optname = SO_PRIORITY;
1413 break;
1414 #ifdef SO_BSDCOMPAT
1415 case TARGET_SO_BSDCOMPAT:
1416 optname = SO_BSDCOMPAT;
1417 break;
1418 #endif
1419 case TARGET_SO_PASSCRED:
1420 optname = SO_PASSCRED;
1421 break;
1422 case TARGET_SO_TIMESTAMP:
1423 optname = SO_TIMESTAMP;
1424 break;
1425 case TARGET_SO_RCVLOWAT:
1426 optname = SO_RCVLOWAT;
1427 break;
1428 case TARGET_SO_RCVTIMEO:
1429 optname = SO_RCVTIMEO;
1430 break;
1431 case TARGET_SO_SNDTIMEO:
1432 optname = SO_SNDTIMEO;
1433 break;
1434 break;
1435 default:
1436 goto unimplemented;
1438 if (optlen < sizeof(uint32_t))
1439 return -TARGET_EINVAL;
1441 if (get_user_u32(val, optval_addr))
1442 return -TARGET_EFAULT;
1443 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1444 break;
1445 default:
1446 unimplemented:
1447 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1448 ret = -TARGET_ENOPROTOOPT;
1450 return ret;
1453 /* do_getsockopt() Must return target values and target errnos. */
1454 static abi_long do_getsockopt(int sockfd, int level, int optname,
1455 abi_ulong optval_addr, abi_ulong optlen)
1457 abi_long ret;
1458 int len, val;
1459 socklen_t lv;
1461 switch(level) {
1462 case TARGET_SOL_SOCKET:
1463 level = SOL_SOCKET;
1464 switch (optname) {
1465 /* These don't just return a single integer */
1466 case TARGET_SO_LINGER:
1467 case TARGET_SO_RCVTIMEO:
1468 case TARGET_SO_SNDTIMEO:
1469 case TARGET_SO_PEERCRED:
1470 case TARGET_SO_PEERNAME:
1471 goto unimplemented;
1472 /* Options with 'int' argument. */
1473 case TARGET_SO_DEBUG:
1474 optname = SO_DEBUG;
1475 goto int_case;
1476 case TARGET_SO_REUSEADDR:
1477 optname = SO_REUSEADDR;
1478 goto int_case;
1479 case TARGET_SO_TYPE:
1480 optname = SO_TYPE;
1481 goto int_case;
1482 case TARGET_SO_ERROR:
1483 optname = SO_ERROR;
1484 goto int_case;
1485 case TARGET_SO_DONTROUTE:
1486 optname = SO_DONTROUTE;
1487 goto int_case;
1488 case TARGET_SO_BROADCAST:
1489 optname = SO_BROADCAST;
1490 goto int_case;
1491 case TARGET_SO_SNDBUF:
1492 optname = SO_SNDBUF;
1493 goto int_case;
1494 case TARGET_SO_RCVBUF:
1495 optname = SO_RCVBUF;
1496 goto int_case;
1497 case TARGET_SO_KEEPALIVE:
1498 optname = SO_KEEPALIVE;
1499 goto int_case;
1500 case TARGET_SO_OOBINLINE:
1501 optname = SO_OOBINLINE;
1502 goto int_case;
1503 case TARGET_SO_NO_CHECK:
1504 optname = SO_NO_CHECK;
1505 goto int_case;
1506 case TARGET_SO_PRIORITY:
1507 optname = SO_PRIORITY;
1508 goto int_case;
1509 #ifdef SO_BSDCOMPAT
1510 case TARGET_SO_BSDCOMPAT:
1511 optname = SO_BSDCOMPAT;
1512 goto int_case;
1513 #endif
1514 case TARGET_SO_PASSCRED:
1515 optname = SO_PASSCRED;
1516 goto int_case;
1517 case TARGET_SO_TIMESTAMP:
1518 optname = SO_TIMESTAMP;
1519 goto int_case;
1520 case TARGET_SO_RCVLOWAT:
1521 optname = SO_RCVLOWAT;
1522 goto int_case;
1523 default:
1524 goto int_case;
1526 break;
1527 case SOL_TCP:
1528 /* TCP options all take an 'int' value. */
1529 int_case:
1530 if (get_user_u32(len, optlen))
1531 return -TARGET_EFAULT;
1532 if (len < 0)
1533 return -TARGET_EINVAL;
1534 lv = sizeof(lv);
1535 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1536 if (ret < 0)
1537 return ret;
1538 if (len > lv)
1539 len = lv;
1540 if (len == 4) {
1541 if (put_user_u32(val, optval_addr))
1542 return -TARGET_EFAULT;
1543 } else {
1544 if (put_user_u8(val, optval_addr))
1545 return -TARGET_EFAULT;
1547 if (put_user_u32(len, optlen))
1548 return -TARGET_EFAULT;
1549 break;
1550 case SOL_IP:
1551 switch(optname) {
1552 case IP_TOS:
1553 case IP_TTL:
1554 case IP_HDRINCL:
1555 case IP_ROUTER_ALERT:
1556 case IP_RECVOPTS:
1557 case IP_RETOPTS:
1558 case IP_PKTINFO:
1559 case IP_MTU_DISCOVER:
1560 case IP_RECVERR:
1561 case IP_RECVTOS:
1562 #ifdef IP_FREEBIND
1563 case IP_FREEBIND:
1564 #endif
1565 case IP_MULTICAST_TTL:
1566 case IP_MULTICAST_LOOP:
1567 if (get_user_u32(len, optlen))
1568 return -TARGET_EFAULT;
1569 if (len < 0)
1570 return -TARGET_EINVAL;
1571 lv = sizeof(lv);
1572 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1573 if (ret < 0)
1574 return ret;
1575 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1576 len = 1;
1577 if (put_user_u32(len, optlen)
1578 || put_user_u8(val, optval_addr))
1579 return -TARGET_EFAULT;
1580 } else {
1581 if (len > sizeof(int))
1582 len = sizeof(int);
1583 if (put_user_u32(len, optlen)
1584 || put_user_u32(val, optval_addr))
1585 return -TARGET_EFAULT;
1587 break;
1588 default:
1589 ret = -TARGET_ENOPROTOOPT;
1590 break;
1592 break;
1593 default:
1594 unimplemented:
1595 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1596 level, optname);
1597 ret = -TARGET_EOPNOTSUPP;
1598 break;
1600 return ret;
1603 /* FIXME
1604 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1605 * other lock functions have a return code of 0 for failure.
1607 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1608 int count, int copy)
1610 struct target_iovec *target_vec;
1611 abi_ulong base;
1612 int i;
1614 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1615 if (!target_vec)
1616 return -TARGET_EFAULT;
1617 for(i = 0;i < count; i++) {
1618 base = tswapl(target_vec[i].iov_base);
1619 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1620 if (vec[i].iov_len != 0) {
1621 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1622 /* Don't check lock_user return value. We must call writev even
1623 if a element has invalid base address. */
1624 } else {
1625 /* zero length pointer is ignored */
1626 vec[i].iov_base = NULL;
1629 unlock_user (target_vec, target_addr, 0);
1630 return 0;
1633 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1634 int count, int copy)
1636 struct target_iovec *target_vec;
1637 abi_ulong base;
1638 int i;
1640 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1641 if (!target_vec)
1642 return -TARGET_EFAULT;
1643 for(i = 0;i < count; i++) {
1644 if (target_vec[i].iov_base) {
1645 base = tswapl(target_vec[i].iov_base);
1646 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1649 unlock_user (target_vec, target_addr, 0);
1651 return 0;
1654 /* do_socket() Must return target values and target errnos. */
1655 static abi_long do_socket(int domain, int type, int protocol)
1657 #if defined(TARGET_MIPS)
1658 switch(type) {
1659 case TARGET_SOCK_DGRAM:
1660 type = SOCK_DGRAM;
1661 break;
1662 case TARGET_SOCK_STREAM:
1663 type = SOCK_STREAM;
1664 break;
1665 case TARGET_SOCK_RAW:
1666 type = SOCK_RAW;
1667 break;
1668 case TARGET_SOCK_RDM:
1669 type = SOCK_RDM;
1670 break;
1671 case TARGET_SOCK_SEQPACKET:
1672 type = SOCK_SEQPACKET;
1673 break;
1674 case TARGET_SOCK_PACKET:
1675 type = SOCK_PACKET;
1676 break;
1678 #endif
1679 if (domain == PF_NETLINK)
1680 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1681 return get_errno(socket(domain, type, protocol));
1684 /* do_bind() Must return target values and target errnos. */
1685 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1686 socklen_t addrlen)
1688 void *addr;
1689 abi_long ret;
1691 if ((int)addrlen < 0) {
1692 return -TARGET_EINVAL;
1695 addr = alloca(addrlen+1);
1697 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1698 if (ret)
1699 return ret;
1701 return get_errno(bind(sockfd, addr, addrlen));
1704 /* do_connect() Must return target values and target errnos. */
1705 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1706 socklen_t addrlen)
1708 void *addr;
1709 abi_long ret;
1711 if ((int)addrlen < 0) {
1712 return -TARGET_EINVAL;
1715 addr = alloca(addrlen);
1717 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1718 if (ret)
1719 return ret;
1721 return get_errno(connect(sockfd, addr, addrlen));
1724 /* do_sendrecvmsg() Must return target values and target errnos. */
1725 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1726 int flags, int send)
1728 abi_long ret, len;
1729 struct target_msghdr *msgp;
1730 struct msghdr msg;
1731 int count;
1732 struct iovec *vec;
1733 abi_ulong target_vec;
1735 /* FIXME */
1736 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1737 msgp,
1738 target_msg,
1739 send ? 1 : 0))
1740 return -TARGET_EFAULT;
1741 if (msgp->msg_name) {
1742 msg.msg_namelen = tswap32(msgp->msg_namelen);
1743 msg.msg_name = alloca(msg.msg_namelen);
1744 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1745 msg.msg_namelen);
1746 if (ret) {
1747 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1748 return ret;
1750 } else {
1751 msg.msg_name = NULL;
1752 msg.msg_namelen = 0;
1754 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1755 msg.msg_control = alloca(msg.msg_controllen);
1756 msg.msg_flags = tswap32(msgp->msg_flags);
1758 count = tswapl(msgp->msg_iovlen);
1759 vec = alloca(count * sizeof(struct iovec));
1760 target_vec = tswapl(msgp->msg_iov);
1761 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1762 msg.msg_iovlen = count;
1763 msg.msg_iov = vec;
1765 if (send) {
1766 ret = target_to_host_cmsg(&msg, msgp);
1767 if (ret == 0)
1768 ret = get_errno(sendmsg(fd, &msg, flags));
1769 } else {
1770 ret = get_errno(recvmsg(fd, &msg, flags));
1771 if (!is_error(ret)) {
1772 len = ret;
1773 ret = host_to_target_cmsg(msgp, &msg);
1774 if (!is_error(ret))
1775 ret = len;
1778 unlock_iovec(vec, target_vec, count, !send);
1779 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1780 return ret;
1783 /* do_accept() Must return target values and target errnos. */
1784 static abi_long do_accept(int fd, abi_ulong target_addr,
1785 abi_ulong target_addrlen_addr)
1787 socklen_t addrlen;
1788 void *addr;
1789 abi_long ret;
1791 if (target_addr == 0)
1792 return get_errno(accept(fd, NULL, NULL));
1794 /* linux returns EINVAL if addrlen pointer is invalid */
1795 if (get_user_u32(addrlen, target_addrlen_addr))
1796 return -TARGET_EINVAL;
1798 if ((int)addrlen < 0) {
1799 return -TARGET_EINVAL;
1802 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1803 return -TARGET_EINVAL;
1805 addr = alloca(addrlen);
1807 ret = get_errno(accept(fd, addr, &addrlen));
1808 if (!is_error(ret)) {
1809 host_to_target_sockaddr(target_addr, addr, addrlen);
1810 if (put_user_u32(addrlen, target_addrlen_addr))
1811 ret = -TARGET_EFAULT;
1813 return ret;
1816 /* do_getpeername() Must return target values and target errnos. */
1817 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1818 abi_ulong target_addrlen_addr)
1820 socklen_t addrlen;
1821 void *addr;
1822 abi_long ret;
1824 if (get_user_u32(addrlen, target_addrlen_addr))
1825 return -TARGET_EFAULT;
1827 if ((int)addrlen < 0) {
1828 return -TARGET_EINVAL;
1831 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1832 return -TARGET_EFAULT;
1834 addr = alloca(addrlen);
1836 ret = get_errno(getpeername(fd, addr, &addrlen));
1837 if (!is_error(ret)) {
1838 host_to_target_sockaddr(target_addr, addr, addrlen);
1839 if (put_user_u32(addrlen, target_addrlen_addr))
1840 ret = -TARGET_EFAULT;
1842 return ret;
1845 /* do_getsockname() Must return target values and target errnos. */
1846 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1847 abi_ulong target_addrlen_addr)
1849 socklen_t addrlen;
1850 void *addr;
1851 abi_long ret;
1853 if (get_user_u32(addrlen, target_addrlen_addr))
1854 return -TARGET_EFAULT;
1856 if ((int)addrlen < 0) {
1857 return -TARGET_EINVAL;
1860 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1861 return -TARGET_EFAULT;
1863 addr = alloca(addrlen);
1865 ret = get_errno(getsockname(fd, addr, &addrlen));
1866 if (!is_error(ret)) {
1867 host_to_target_sockaddr(target_addr, addr, addrlen);
1868 if (put_user_u32(addrlen, target_addrlen_addr))
1869 ret = -TARGET_EFAULT;
1871 return ret;
1874 /* do_socketpair() Must return target values and target errnos. */
1875 static abi_long do_socketpair(int domain, int type, int protocol,
1876 abi_ulong target_tab_addr)
1878 int tab[2];
1879 abi_long ret;
1881 ret = get_errno(socketpair(domain, type, protocol, tab));
1882 if (!is_error(ret)) {
1883 if (put_user_s32(tab[0], target_tab_addr)
1884 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1885 ret = -TARGET_EFAULT;
1887 return ret;
1890 /* do_sendto() Must return target values and target errnos. */
1891 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1892 abi_ulong target_addr, socklen_t addrlen)
1894 void *addr;
1895 void *host_msg;
1896 abi_long ret;
1898 if ((int)addrlen < 0) {
1899 return -TARGET_EINVAL;
1902 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1903 if (!host_msg)
1904 return -TARGET_EFAULT;
1905 if (target_addr) {
1906 addr = alloca(addrlen);
1907 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1908 if (ret) {
1909 unlock_user(host_msg, msg, 0);
1910 return ret;
1912 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1913 } else {
1914 ret = get_errno(send(fd, host_msg, len, flags));
1916 unlock_user(host_msg, msg, 0);
1917 return ret;
1920 /* do_recvfrom() Must return target values and target errnos. */
1921 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1922 abi_ulong target_addr,
1923 abi_ulong target_addrlen)
1925 socklen_t addrlen;
1926 void *addr;
1927 void *host_msg;
1928 abi_long ret;
1930 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1931 if (!host_msg)
1932 return -TARGET_EFAULT;
1933 if (target_addr) {
1934 if (get_user_u32(addrlen, target_addrlen)) {
1935 ret = -TARGET_EFAULT;
1936 goto fail;
1938 if ((int)addrlen < 0) {
1939 ret = -TARGET_EINVAL;
1940 goto fail;
1942 addr = alloca(addrlen);
1943 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1944 } else {
1945 addr = NULL; /* To keep compiler quiet. */
1946 ret = get_errno(recv(fd, host_msg, len, flags));
1948 if (!is_error(ret)) {
1949 if (target_addr) {
1950 host_to_target_sockaddr(target_addr, addr, addrlen);
1951 if (put_user_u32(addrlen, target_addrlen)) {
1952 ret = -TARGET_EFAULT;
1953 goto fail;
1956 unlock_user(host_msg, msg, len);
1957 } else {
1958 fail:
1959 unlock_user(host_msg, msg, 0);
1961 return ret;
1964 #ifdef TARGET_NR_socketcall
1965 /* do_socketcall() Must return target values and target errnos. */
1966 static abi_long do_socketcall(int num, abi_ulong vptr)
1968 abi_long ret;
1969 const int n = sizeof(abi_ulong);
1971 switch(num) {
1972 case SOCKOP_socket:
1974 abi_ulong domain, type, protocol;
1976 if (get_user_ual(domain, vptr)
1977 || get_user_ual(type, vptr + n)
1978 || get_user_ual(protocol, vptr + 2 * n))
1979 return -TARGET_EFAULT;
1981 ret = do_socket(domain, type, protocol);
1983 break;
1984 case SOCKOP_bind:
1986 abi_ulong sockfd;
1987 abi_ulong target_addr;
1988 socklen_t addrlen;
1990 if (get_user_ual(sockfd, vptr)
1991 || get_user_ual(target_addr, vptr + n)
1992 || get_user_ual(addrlen, vptr + 2 * n))
1993 return -TARGET_EFAULT;
1995 ret = do_bind(sockfd, target_addr, addrlen);
1997 break;
1998 case SOCKOP_connect:
2000 abi_ulong sockfd;
2001 abi_ulong target_addr;
2002 socklen_t addrlen;
2004 if (get_user_ual(sockfd, vptr)
2005 || get_user_ual(target_addr, vptr + n)
2006 || get_user_ual(addrlen, vptr + 2 * n))
2007 return -TARGET_EFAULT;
2009 ret = do_connect(sockfd, target_addr, addrlen);
2011 break;
2012 case SOCKOP_listen:
2014 abi_ulong sockfd, backlog;
2016 if (get_user_ual(sockfd, vptr)
2017 || get_user_ual(backlog, vptr + n))
2018 return -TARGET_EFAULT;
2020 ret = get_errno(listen(sockfd, backlog));
2022 break;
2023 case SOCKOP_accept:
2025 abi_ulong sockfd;
2026 abi_ulong target_addr, target_addrlen;
2028 if (get_user_ual(sockfd, vptr)
2029 || get_user_ual(target_addr, vptr + n)
2030 || get_user_ual(target_addrlen, vptr + 2 * n))
2031 return -TARGET_EFAULT;
2033 ret = do_accept(sockfd, target_addr, target_addrlen);
2035 break;
2036 case SOCKOP_getsockname:
2038 abi_ulong sockfd;
2039 abi_ulong target_addr, target_addrlen;
2041 if (get_user_ual(sockfd, vptr)
2042 || get_user_ual(target_addr, vptr + n)
2043 || get_user_ual(target_addrlen, vptr + 2 * n))
2044 return -TARGET_EFAULT;
2046 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2048 break;
2049 case SOCKOP_getpeername:
2051 abi_ulong sockfd;
2052 abi_ulong target_addr, target_addrlen;
2054 if (get_user_ual(sockfd, vptr)
2055 || get_user_ual(target_addr, vptr + n)
2056 || get_user_ual(target_addrlen, vptr + 2 * n))
2057 return -TARGET_EFAULT;
2059 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2061 break;
2062 case SOCKOP_socketpair:
2064 abi_ulong domain, type, protocol;
2065 abi_ulong tab;
2067 if (get_user_ual(domain, vptr)
2068 || get_user_ual(type, vptr + n)
2069 || get_user_ual(protocol, vptr + 2 * n)
2070 || get_user_ual(tab, vptr + 3 * n))
2071 return -TARGET_EFAULT;
2073 ret = do_socketpair(domain, type, protocol, tab);
2075 break;
2076 case SOCKOP_send:
2078 abi_ulong sockfd;
2079 abi_ulong msg;
2080 size_t len;
2081 abi_ulong flags;
2083 if (get_user_ual(sockfd, vptr)
2084 || get_user_ual(msg, vptr + n)
2085 || get_user_ual(len, vptr + 2 * n)
2086 || get_user_ual(flags, vptr + 3 * n))
2087 return -TARGET_EFAULT;
2089 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2091 break;
2092 case SOCKOP_recv:
2094 abi_ulong sockfd;
2095 abi_ulong msg;
2096 size_t len;
2097 abi_ulong flags;
2099 if (get_user_ual(sockfd, vptr)
2100 || get_user_ual(msg, vptr + n)
2101 || get_user_ual(len, vptr + 2 * n)
2102 || get_user_ual(flags, vptr + 3 * n))
2103 return -TARGET_EFAULT;
2105 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2107 break;
2108 case SOCKOP_sendto:
2110 abi_ulong sockfd;
2111 abi_ulong msg;
2112 size_t len;
2113 abi_ulong flags;
2114 abi_ulong addr;
2115 socklen_t addrlen;
2117 if (get_user_ual(sockfd, vptr)
2118 || get_user_ual(msg, vptr + n)
2119 || get_user_ual(len, vptr + 2 * n)
2120 || get_user_ual(flags, vptr + 3 * n)
2121 || get_user_ual(addr, vptr + 4 * n)
2122 || get_user_ual(addrlen, vptr + 5 * n))
2123 return -TARGET_EFAULT;
2125 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2127 break;
2128 case SOCKOP_recvfrom:
2130 abi_ulong sockfd;
2131 abi_ulong msg;
2132 size_t len;
2133 abi_ulong flags;
2134 abi_ulong addr;
2135 socklen_t addrlen;
2137 if (get_user_ual(sockfd, vptr)
2138 || get_user_ual(msg, vptr + n)
2139 || get_user_ual(len, vptr + 2 * n)
2140 || get_user_ual(flags, vptr + 3 * n)
2141 || get_user_ual(addr, vptr + 4 * n)
2142 || get_user_ual(addrlen, vptr + 5 * n))
2143 return -TARGET_EFAULT;
2145 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2147 break;
2148 case SOCKOP_shutdown:
2150 abi_ulong sockfd, how;
2152 if (get_user_ual(sockfd, vptr)
2153 || get_user_ual(how, vptr + n))
2154 return -TARGET_EFAULT;
2156 ret = get_errno(shutdown(sockfd, how));
2158 break;
2159 case SOCKOP_sendmsg:
2160 case SOCKOP_recvmsg:
2162 abi_ulong fd;
2163 abi_ulong target_msg;
2164 abi_ulong flags;
2166 if (get_user_ual(fd, vptr)
2167 || get_user_ual(target_msg, vptr + n)
2168 || get_user_ual(flags, vptr + 2 * n))
2169 return -TARGET_EFAULT;
2171 ret = do_sendrecvmsg(fd, target_msg, flags,
2172 (num == SOCKOP_sendmsg));
2174 break;
2175 case SOCKOP_setsockopt:
2177 abi_ulong sockfd;
2178 abi_ulong level;
2179 abi_ulong optname;
2180 abi_ulong optval;
2181 socklen_t optlen;
2183 if (get_user_ual(sockfd, vptr)
2184 || get_user_ual(level, vptr + n)
2185 || get_user_ual(optname, vptr + 2 * n)
2186 || get_user_ual(optval, vptr + 3 * n)
2187 || get_user_ual(optlen, vptr + 4 * n))
2188 return -TARGET_EFAULT;
2190 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2192 break;
2193 case SOCKOP_getsockopt:
2195 abi_ulong sockfd;
2196 abi_ulong level;
2197 abi_ulong optname;
2198 abi_ulong optval;
2199 socklen_t optlen;
2201 if (get_user_ual(sockfd, vptr)
2202 || get_user_ual(level, vptr + n)
2203 || get_user_ual(optname, vptr + 2 * n)
2204 || get_user_ual(optval, vptr + 3 * n)
2205 || get_user_ual(optlen, vptr + 4 * n))
2206 return -TARGET_EFAULT;
2208 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2210 break;
2211 default:
2212 gemu_log("Unsupported socketcall: %d\n", num);
2213 ret = -TARGET_ENOSYS;
2214 break;
2216 return ret;
2218 #endif
2220 #define N_SHM_REGIONS 32
2222 static struct shm_region {
2223 abi_ulong start;
2224 abi_ulong size;
2225 } shm_regions[N_SHM_REGIONS];
2227 struct target_ipc_perm
2229 abi_long __key;
2230 abi_ulong uid;
2231 abi_ulong gid;
2232 abi_ulong cuid;
2233 abi_ulong cgid;
2234 unsigned short int mode;
2235 unsigned short int __pad1;
2236 unsigned short int __seq;
2237 unsigned short int __pad2;
2238 abi_ulong __unused1;
2239 abi_ulong __unused2;
2242 struct target_semid_ds
2244 struct target_ipc_perm sem_perm;
2245 abi_ulong sem_otime;
2246 abi_ulong __unused1;
2247 abi_ulong sem_ctime;
2248 abi_ulong __unused2;
2249 abi_ulong sem_nsems;
2250 abi_ulong __unused3;
2251 abi_ulong __unused4;
2254 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2255 abi_ulong target_addr)
2257 struct target_ipc_perm *target_ip;
2258 struct target_semid_ds *target_sd;
2260 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2261 return -TARGET_EFAULT;
2262 target_ip = &(target_sd->sem_perm);
2263 host_ip->__key = tswapl(target_ip->__key);
2264 host_ip->uid = tswapl(target_ip->uid);
2265 host_ip->gid = tswapl(target_ip->gid);
2266 host_ip->cuid = tswapl(target_ip->cuid);
2267 host_ip->cgid = tswapl(target_ip->cgid);
2268 host_ip->mode = tswapl(target_ip->mode);
2269 unlock_user_struct(target_sd, target_addr, 0);
2270 return 0;
2273 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2274 struct ipc_perm *host_ip)
2276 struct target_ipc_perm *target_ip;
2277 struct target_semid_ds *target_sd;
2279 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2280 return -TARGET_EFAULT;
2281 target_ip = &(target_sd->sem_perm);
2282 target_ip->__key = tswapl(host_ip->__key);
2283 target_ip->uid = tswapl(host_ip->uid);
2284 target_ip->gid = tswapl(host_ip->gid);
2285 target_ip->cuid = tswapl(host_ip->cuid);
2286 target_ip->cgid = tswapl(host_ip->cgid);
2287 target_ip->mode = tswapl(host_ip->mode);
2288 unlock_user_struct(target_sd, target_addr, 1);
2289 return 0;
2292 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2293 abi_ulong target_addr)
2295 struct target_semid_ds *target_sd;
2297 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2298 return -TARGET_EFAULT;
2299 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2300 return -TARGET_EFAULT;
2301 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2302 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2303 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2304 unlock_user_struct(target_sd, target_addr, 0);
2305 return 0;
2308 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2309 struct semid_ds *host_sd)
2311 struct target_semid_ds *target_sd;
2313 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2314 return -TARGET_EFAULT;
2315 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2316 return -TARGET_EFAULT;;
2317 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2318 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2319 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2320 unlock_user_struct(target_sd, target_addr, 1);
2321 return 0;
2324 struct target_seminfo {
2325 int semmap;
2326 int semmni;
2327 int semmns;
2328 int semmnu;
2329 int semmsl;
2330 int semopm;
2331 int semume;
2332 int semusz;
2333 int semvmx;
2334 int semaem;
2337 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2338 struct seminfo *host_seminfo)
2340 struct target_seminfo *target_seminfo;
2341 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2342 return -TARGET_EFAULT;
2343 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2344 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2345 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2346 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2347 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2348 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2349 __put_user(host_seminfo->semume, &target_seminfo->semume);
2350 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2351 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2352 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2353 unlock_user_struct(target_seminfo, target_addr, 1);
2354 return 0;
2357 union semun {
2358 int val;
2359 struct semid_ds *buf;
2360 unsigned short *array;
2361 struct seminfo *__buf;
2364 union target_semun {
2365 int val;
2366 abi_ulong buf;
2367 abi_ulong array;
2368 abi_ulong __buf;
2371 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2372 abi_ulong target_addr)
2374 int nsems;
2375 unsigned short *array;
2376 union semun semun;
2377 struct semid_ds semid_ds;
2378 int i, ret;
2380 semun.buf = &semid_ds;
2382 ret = semctl(semid, 0, IPC_STAT, semun);
2383 if (ret == -1)
2384 return get_errno(ret);
2386 nsems = semid_ds.sem_nsems;
2388 *host_array = malloc(nsems*sizeof(unsigned short));
2389 array = lock_user(VERIFY_READ, target_addr,
2390 nsems*sizeof(unsigned short), 1);
2391 if (!array)
2392 return -TARGET_EFAULT;
2394 for(i=0; i<nsems; i++) {
2395 __get_user((*host_array)[i], &array[i]);
2397 unlock_user(array, target_addr, 0);
2399 return 0;
2402 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2403 unsigned short **host_array)
2405 int nsems;
2406 unsigned short *array;
2407 union semun semun;
2408 struct semid_ds semid_ds;
2409 int i, ret;
2411 semun.buf = &semid_ds;
2413 ret = semctl(semid, 0, IPC_STAT, semun);
2414 if (ret == -1)
2415 return get_errno(ret);
2417 nsems = semid_ds.sem_nsems;
2419 array = lock_user(VERIFY_WRITE, target_addr,
2420 nsems*sizeof(unsigned short), 0);
2421 if (!array)
2422 return -TARGET_EFAULT;
2424 for(i=0; i<nsems; i++) {
2425 __put_user((*host_array)[i], &array[i]);
2427 free(*host_array);
2428 unlock_user(array, target_addr, 1);
2430 return 0;
2433 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2434 union target_semun target_su)
2436 union semun arg;
2437 struct semid_ds dsarg;
2438 unsigned short *array = NULL;
2439 struct seminfo seminfo;
2440 abi_long ret = -TARGET_EINVAL;
2441 abi_long err;
2442 cmd &= 0xff;
2444 switch( cmd ) {
2445 case GETVAL:
2446 case SETVAL:
2447 arg.val = tswapl(target_su.val);
2448 ret = get_errno(semctl(semid, semnum, cmd, arg));
2449 target_su.val = tswapl(arg.val);
2450 break;
2451 case GETALL:
2452 case SETALL:
2453 err = target_to_host_semarray(semid, &array, target_su.array);
2454 if (err)
2455 return err;
2456 arg.array = array;
2457 ret = get_errno(semctl(semid, semnum, cmd, arg));
2458 err = host_to_target_semarray(semid, target_su.array, &array);
2459 if (err)
2460 return err;
2461 break;
2462 case IPC_STAT:
2463 case IPC_SET:
2464 case SEM_STAT:
2465 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2466 if (err)
2467 return err;
2468 arg.buf = &dsarg;
2469 ret = get_errno(semctl(semid, semnum, cmd, arg));
2470 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2471 if (err)
2472 return err;
2473 break;
2474 case IPC_INFO:
2475 case SEM_INFO:
2476 arg.__buf = &seminfo;
2477 ret = get_errno(semctl(semid, semnum, cmd, arg));
2478 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2479 if (err)
2480 return err;
2481 break;
2482 case IPC_RMID:
2483 case GETPID:
2484 case GETNCNT:
2485 case GETZCNT:
2486 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2487 break;
2490 return ret;
2493 struct target_sembuf {
2494 unsigned short sem_num;
2495 short sem_op;
2496 short sem_flg;
2499 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2500 abi_ulong target_addr,
2501 unsigned nsops)
2503 struct target_sembuf *target_sembuf;
2504 int i;
2506 target_sembuf = lock_user(VERIFY_READ, target_addr,
2507 nsops*sizeof(struct target_sembuf), 1);
2508 if (!target_sembuf)
2509 return -TARGET_EFAULT;
2511 for(i=0; i<nsops; i++) {
2512 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2513 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2514 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2517 unlock_user(target_sembuf, target_addr, 0);
2519 return 0;
2522 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2524 struct sembuf sops[nsops];
2526 if (target_to_host_sembuf(sops, ptr, nsops))
2527 return -TARGET_EFAULT;
2529 return semop(semid, sops, nsops);
2532 struct target_msqid_ds
2534 struct target_ipc_perm msg_perm;
2535 abi_ulong msg_stime;
2536 #if TARGET_ABI_BITS == 32
2537 abi_ulong __unused1;
2538 #endif
2539 abi_ulong msg_rtime;
2540 #if TARGET_ABI_BITS == 32
2541 abi_ulong __unused2;
2542 #endif
2543 abi_ulong msg_ctime;
2544 #if TARGET_ABI_BITS == 32
2545 abi_ulong __unused3;
2546 #endif
2547 abi_ulong __msg_cbytes;
2548 abi_ulong msg_qnum;
2549 abi_ulong msg_qbytes;
2550 abi_ulong msg_lspid;
2551 abi_ulong msg_lrpid;
2552 abi_ulong __unused4;
2553 abi_ulong __unused5;
2556 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2557 abi_ulong target_addr)
2559 struct target_msqid_ds *target_md;
2561 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2562 return -TARGET_EFAULT;
2563 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2564 return -TARGET_EFAULT;
2565 host_md->msg_stime = tswapl(target_md->msg_stime);
2566 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2567 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2568 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2569 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2570 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2571 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2572 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2573 unlock_user_struct(target_md, target_addr, 0);
2574 return 0;
2577 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2578 struct msqid_ds *host_md)
2580 struct target_msqid_ds *target_md;
2582 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2583 return -TARGET_EFAULT;
2584 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2585 return -TARGET_EFAULT;
2586 target_md->msg_stime = tswapl(host_md->msg_stime);
2587 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2588 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2589 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2590 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2591 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2592 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2593 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2594 unlock_user_struct(target_md, target_addr, 1);
2595 return 0;
2598 struct target_msginfo {
2599 int msgpool;
2600 int msgmap;
2601 int msgmax;
2602 int msgmnb;
2603 int msgmni;
2604 int msgssz;
2605 int msgtql;
2606 unsigned short int msgseg;
2609 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2610 struct msginfo *host_msginfo)
2612 struct target_msginfo *target_msginfo;
2613 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2614 return -TARGET_EFAULT;
2615 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2616 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2617 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2618 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2619 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2620 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2621 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2622 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2623 unlock_user_struct(target_msginfo, target_addr, 1);
2624 return 0;
2627 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2629 struct msqid_ds dsarg;
2630 struct msginfo msginfo;
2631 abi_long ret = -TARGET_EINVAL;
2633 cmd &= 0xff;
2635 switch (cmd) {
2636 case IPC_STAT:
2637 case IPC_SET:
2638 case MSG_STAT:
2639 if (target_to_host_msqid_ds(&dsarg,ptr))
2640 return -TARGET_EFAULT;
2641 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2642 if (host_to_target_msqid_ds(ptr,&dsarg))
2643 return -TARGET_EFAULT;
2644 break;
2645 case IPC_RMID:
2646 ret = get_errno(msgctl(msgid, cmd, NULL));
2647 break;
2648 case IPC_INFO:
2649 case MSG_INFO:
2650 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2651 if (host_to_target_msginfo(ptr, &msginfo))
2652 return -TARGET_EFAULT;
2653 break;
2656 return ret;
2659 struct target_msgbuf {
2660 abi_long mtype;
2661 char mtext[1];
2664 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2665 unsigned int msgsz, int msgflg)
2667 struct target_msgbuf *target_mb;
2668 struct msgbuf *host_mb;
2669 abi_long ret = 0;
2671 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2672 return -TARGET_EFAULT;
2673 host_mb = malloc(msgsz+sizeof(long));
2674 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2675 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2676 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2677 free(host_mb);
2678 unlock_user_struct(target_mb, msgp, 0);
2680 return ret;
2683 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2684 unsigned int msgsz, abi_long msgtyp,
2685 int msgflg)
2687 struct target_msgbuf *target_mb;
2688 char *target_mtext;
2689 struct msgbuf *host_mb;
2690 abi_long ret = 0;
2692 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2693 return -TARGET_EFAULT;
2695 host_mb = malloc(msgsz+sizeof(long));
2696 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2698 if (ret > 0) {
2699 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2700 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2701 if (!target_mtext) {
2702 ret = -TARGET_EFAULT;
2703 goto end;
2705 memcpy(target_mb->mtext, host_mb->mtext, ret);
2706 unlock_user(target_mtext, target_mtext_addr, ret);
2709 target_mb->mtype = tswapl(host_mb->mtype);
2710 free(host_mb);
2712 end:
2713 if (target_mb)
2714 unlock_user_struct(target_mb, msgp, 1);
2715 return ret;
2718 struct target_shmid_ds
2720 struct target_ipc_perm shm_perm;
2721 abi_ulong shm_segsz;
2722 abi_ulong shm_atime;
2723 #if TARGET_ABI_BITS == 32
2724 abi_ulong __unused1;
2725 #endif
2726 abi_ulong shm_dtime;
2727 #if TARGET_ABI_BITS == 32
2728 abi_ulong __unused2;
2729 #endif
2730 abi_ulong shm_ctime;
2731 #if TARGET_ABI_BITS == 32
2732 abi_ulong __unused3;
2733 #endif
2734 int shm_cpid;
2735 int shm_lpid;
2736 abi_ulong shm_nattch;
2737 unsigned long int __unused4;
2738 unsigned long int __unused5;
2741 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2742 abi_ulong target_addr)
2744 struct target_shmid_ds *target_sd;
2746 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2747 return -TARGET_EFAULT;
2748 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2749 return -TARGET_EFAULT;
2750 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2751 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2752 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2753 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2754 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2755 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2756 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2757 unlock_user_struct(target_sd, target_addr, 0);
2758 return 0;
2761 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2762 struct shmid_ds *host_sd)
2764 struct target_shmid_ds *target_sd;
2766 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2767 return -TARGET_EFAULT;
2768 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2769 return -TARGET_EFAULT;
2770 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2771 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2772 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2773 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2774 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2775 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2776 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2777 unlock_user_struct(target_sd, target_addr, 1);
2778 return 0;
2781 struct target_shminfo {
2782 abi_ulong shmmax;
2783 abi_ulong shmmin;
2784 abi_ulong shmmni;
2785 abi_ulong shmseg;
2786 abi_ulong shmall;
2789 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2790 struct shminfo *host_shminfo)
2792 struct target_shminfo *target_shminfo;
2793 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2794 return -TARGET_EFAULT;
2795 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2796 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2797 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2798 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2799 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2800 unlock_user_struct(target_shminfo, target_addr, 1);
2801 return 0;
2804 struct target_shm_info {
2805 int used_ids;
2806 abi_ulong shm_tot;
2807 abi_ulong shm_rss;
2808 abi_ulong shm_swp;
2809 abi_ulong swap_attempts;
2810 abi_ulong swap_successes;
2813 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2814 struct shm_info *host_shm_info)
2816 struct target_shm_info *target_shm_info;
2817 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2818 return -TARGET_EFAULT;
2819 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2820 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2821 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2822 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2823 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2824 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2825 unlock_user_struct(target_shm_info, target_addr, 1);
2826 return 0;
2829 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2831 struct shmid_ds dsarg;
2832 struct shminfo shminfo;
2833 struct shm_info shm_info;
2834 abi_long ret = -TARGET_EINVAL;
2836 cmd &= 0xff;
2838 switch(cmd) {
2839 case IPC_STAT:
2840 case IPC_SET:
2841 case SHM_STAT:
2842 if (target_to_host_shmid_ds(&dsarg, buf))
2843 return -TARGET_EFAULT;
2844 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2845 if (host_to_target_shmid_ds(buf, &dsarg))
2846 return -TARGET_EFAULT;
2847 break;
2848 case IPC_INFO:
2849 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2850 if (host_to_target_shminfo(buf, &shminfo))
2851 return -TARGET_EFAULT;
2852 break;
2853 case SHM_INFO:
2854 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2855 if (host_to_target_shm_info(buf, &shm_info))
2856 return -TARGET_EFAULT;
2857 break;
2858 case IPC_RMID:
2859 case SHM_LOCK:
2860 case SHM_UNLOCK:
2861 ret = get_errno(shmctl(shmid, cmd, NULL));
2862 break;
2865 return ret;
2868 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2870 abi_long raddr;
2871 void *host_raddr;
2872 struct shmid_ds shm_info;
2873 int i,ret;
2875 /* find out the length of the shared memory segment */
2876 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2877 if (is_error(ret)) {
2878 /* can't get length, bail out */
2879 return ret;
2882 mmap_lock();
2884 if (shmaddr)
2885 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2886 else {
2887 abi_ulong mmap_start;
2889 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2891 if (mmap_start == -1) {
2892 errno = ENOMEM;
2893 host_raddr = (void *)-1;
2894 } else
2895 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2898 if (host_raddr == (void *)-1) {
2899 mmap_unlock();
2900 return get_errno((long)host_raddr);
2902 raddr=h2g((unsigned long)host_raddr);
2904 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2905 PAGE_VALID | PAGE_READ |
2906 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2908 for (i = 0; i < N_SHM_REGIONS; i++) {
2909 if (shm_regions[i].start == 0) {
2910 shm_regions[i].start = raddr;
2911 shm_regions[i].size = shm_info.shm_segsz;
2912 break;
2916 mmap_unlock();
2917 return raddr;
2921 static inline abi_long do_shmdt(abi_ulong shmaddr)
2923 int i;
2925 for (i = 0; i < N_SHM_REGIONS; ++i) {
2926 if (shm_regions[i].start == shmaddr) {
2927 shm_regions[i].start = 0;
2928 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2929 break;
2933 return get_errno(shmdt(g2h(shmaddr)));
2936 #ifdef TARGET_NR_ipc
2937 /* ??? This only works with linear mappings. */
2938 /* do_ipc() must return target values and target errnos. */
2939 static abi_long do_ipc(unsigned int call, int first,
2940 int second, int third,
2941 abi_long ptr, abi_long fifth)
2943 int version;
2944 abi_long ret = 0;
2946 version = call >> 16;
2947 call &= 0xffff;
2949 switch (call) {
2950 case IPCOP_semop:
2951 ret = do_semop(first, ptr, second);
2952 break;
2954 case IPCOP_semget:
2955 ret = get_errno(semget(first, second, third));
2956 break;
2958 case IPCOP_semctl:
2959 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2960 break;
2962 case IPCOP_msgget:
2963 ret = get_errno(msgget(first, second));
2964 break;
2966 case IPCOP_msgsnd:
2967 ret = do_msgsnd(first, ptr, second, third);
2968 break;
2970 case IPCOP_msgctl:
2971 ret = do_msgctl(first, second, ptr);
2972 break;
2974 case IPCOP_msgrcv:
2975 switch (version) {
2976 case 0:
2978 struct target_ipc_kludge {
2979 abi_long msgp;
2980 abi_long msgtyp;
2981 } *tmp;
2983 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2984 ret = -TARGET_EFAULT;
2985 break;
2988 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2990 unlock_user_struct(tmp, ptr, 0);
2991 break;
2993 default:
2994 ret = do_msgrcv(first, ptr, second, fifth, third);
2996 break;
2998 case IPCOP_shmat:
2999 switch (version) {
3000 default:
3002 abi_ulong raddr;
3003 raddr = do_shmat(first, ptr, second);
3004 if (is_error(raddr))
3005 return get_errno(raddr);
3006 if (put_user_ual(raddr, third))
3007 return -TARGET_EFAULT;
3008 break;
3010 case 1:
3011 ret = -TARGET_EINVAL;
3012 break;
3014 break;
3015 case IPCOP_shmdt:
3016 ret = do_shmdt(ptr);
3017 break;
3019 case IPCOP_shmget:
3020 /* IPC_* flag values are the same on all linux platforms */
3021 ret = get_errno(shmget(first, second, third));
3022 break;
3024 /* IPC_* and SHM_* command values are the same on all linux platforms */
3025 case IPCOP_shmctl:
3026 ret = do_shmctl(first, second, third);
3027 break;
3028 default:
3029 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3030 ret = -TARGET_ENOSYS;
3031 break;
3033 return ret;
3035 #endif
3037 /* kernel structure types definitions */
3039 #define STRUCT(name, ...) STRUCT_ ## name,
3040 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3041 enum {
3042 #include "syscall_types.h"
3044 #undef STRUCT
3045 #undef STRUCT_SPECIAL
3047 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3048 #define STRUCT_SPECIAL(name)
3049 #include "syscall_types.h"
3050 #undef STRUCT
3051 #undef STRUCT_SPECIAL
3053 typedef struct IOCTLEntry IOCTLEntry;
3055 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3056 int fd, abi_long cmd, abi_long arg);
3058 struct IOCTLEntry {
3059 unsigned int target_cmd;
3060 unsigned int host_cmd;
3061 const char *name;
3062 int access;
3063 do_ioctl_fn *do_ioctl;
3064 const argtype arg_type[5];
3067 #define IOC_R 0x0001
3068 #define IOC_W 0x0002
3069 #define IOC_RW (IOC_R | IOC_W)
3071 #define MAX_STRUCT_SIZE 4096
3073 #ifdef CONFIG_FIEMAP
3074 /* So fiemap access checks don't overflow on 32 bit systems.
3075 * This is very slightly smaller than the limit imposed by
3076 * the underlying kernel.
3078 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3079 / sizeof(struct fiemap_extent))
3081 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3082 int fd, abi_long cmd, abi_long arg)
3084 /* The parameter for this ioctl is a struct fiemap followed
3085 * by an array of struct fiemap_extent whose size is set
3086 * in fiemap->fm_extent_count. The array is filled in by the
3087 * ioctl.
3089 int target_size_in, target_size_out;
3090 struct fiemap *fm;
3091 const argtype *arg_type = ie->arg_type;
3092 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3093 void *argptr, *p;
3094 abi_long ret;
3095 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3096 uint32_t outbufsz;
3097 int free_fm = 0;
3099 assert(arg_type[0] == TYPE_PTR);
3100 assert(ie->access == IOC_RW);
3101 arg_type++;
3102 target_size_in = thunk_type_size(arg_type, 0);
3103 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3104 if (!argptr) {
3105 return -TARGET_EFAULT;
3107 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3108 unlock_user(argptr, arg, 0);
3109 fm = (struct fiemap *)buf_temp;
3110 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3111 return -TARGET_EINVAL;
3114 outbufsz = sizeof (*fm) +
3115 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3117 if (outbufsz > MAX_STRUCT_SIZE) {
3118 /* We can't fit all the extents into the fixed size buffer.
3119 * Allocate one that is large enough and use it instead.
3121 fm = malloc(outbufsz);
3122 if (!fm) {
3123 return -TARGET_ENOMEM;
3125 memcpy(fm, buf_temp, sizeof(struct fiemap));
3126 free_fm = 1;
3128 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3129 if (!is_error(ret)) {
3130 target_size_out = target_size_in;
3131 /* An extent_count of 0 means we were only counting the extents
3132 * so there are no structs to copy
3134 if (fm->fm_extent_count != 0) {
3135 target_size_out += fm->fm_mapped_extents * extent_size;
3137 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3138 if (!argptr) {
3139 ret = -TARGET_EFAULT;
3140 } else {
3141 /* Convert the struct fiemap */
3142 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3143 if (fm->fm_extent_count != 0) {
3144 p = argptr + target_size_in;
3145 /* ...and then all the struct fiemap_extents */
3146 for (i = 0; i < fm->fm_mapped_extents; i++) {
3147 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3148 THUNK_TARGET);
3149 p += extent_size;
3152 unlock_user(argptr, arg, target_size_out);
3155 if (free_fm) {
3156 free(fm);
3158 return ret;
3160 #endif
3162 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3163 int fd, abi_long cmd, abi_long arg)
3165 const argtype *arg_type = ie->arg_type;
3166 int target_size;
3167 void *argptr;
3168 int ret;
3169 struct ifconf *host_ifconf;
3170 uint32_t outbufsz;
3171 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3172 int target_ifreq_size;
3173 int nb_ifreq;
3174 int free_buf = 0;
3175 int i;
3176 int target_ifc_len;
3177 abi_long target_ifc_buf;
3178 int host_ifc_len;
3179 char *host_ifc_buf;
3181 assert(arg_type[0] == TYPE_PTR);
3182 assert(ie->access == IOC_RW);
3184 arg_type++;
3185 target_size = thunk_type_size(arg_type, 0);
3187 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3188 if (!argptr)
3189 return -TARGET_EFAULT;
3190 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3191 unlock_user(argptr, arg, 0);
3193 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3194 target_ifc_len = host_ifconf->ifc_len;
3195 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3197 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3198 nb_ifreq = target_ifc_len / target_ifreq_size;
3199 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3201 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3202 if (outbufsz > MAX_STRUCT_SIZE) {
3203 /* We can't fit all the extents into the fixed size buffer.
3204 * Allocate one that is large enough and use it instead.
3206 host_ifconf = malloc(outbufsz);
3207 if (!host_ifconf) {
3208 return -TARGET_ENOMEM;
3210 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3211 free_buf = 1;
3213 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3215 host_ifconf->ifc_len = host_ifc_len;
3216 host_ifconf->ifc_buf = host_ifc_buf;
3218 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3219 if (!is_error(ret)) {
3220 /* convert host ifc_len to target ifc_len */
3222 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3223 target_ifc_len = nb_ifreq * target_ifreq_size;
3224 host_ifconf->ifc_len = target_ifc_len;
3226 /* restore target ifc_buf */
3228 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3230 /* copy struct ifconf to target user */
3232 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3233 if (!argptr)
3234 return -TARGET_EFAULT;
3235 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3236 unlock_user(argptr, arg, target_size);
3238 /* copy ifreq[] to target user */
3240 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3241 for (i = 0; i < nb_ifreq ; i++) {
3242 thunk_convert(argptr + i * target_ifreq_size,
3243 host_ifc_buf + i * sizeof(struct ifreq),
3244 ifreq_arg_type, THUNK_TARGET);
3246 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3249 if (free_buf) {
3250 free(host_ifconf);
3253 return ret;
3256 static IOCTLEntry ioctl_entries[] = {
3257 #define IOCTL(cmd, access, ...) \
3258 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3259 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3260 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3261 #include "ioctls.h"
3262 { 0, 0, },
3265 /* ??? Implement proper locking for ioctls. */
3266 /* do_ioctl() Must return target values and target errnos. */
3267 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3269 const IOCTLEntry *ie;
3270 const argtype *arg_type;
3271 abi_long ret;
3272 uint8_t buf_temp[MAX_STRUCT_SIZE];
3273 int target_size;
3274 void *argptr;
3276 ie = ioctl_entries;
3277 for(;;) {
3278 if (ie->target_cmd == 0) {
3279 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3280 return -TARGET_ENOSYS;
3282 if (ie->target_cmd == cmd)
3283 break;
3284 ie++;
3286 arg_type = ie->arg_type;
3287 #if defined(DEBUG)
3288 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3289 #endif
3290 if (ie->do_ioctl) {
3291 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3294 switch(arg_type[0]) {
3295 case TYPE_NULL:
3296 /* no argument */
3297 ret = get_errno(ioctl(fd, ie->host_cmd));
3298 break;
3299 case TYPE_PTRVOID:
3300 case TYPE_INT:
3301 /* int argment */
3302 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3303 break;
3304 case TYPE_PTR:
3305 arg_type++;
3306 target_size = thunk_type_size(arg_type, 0);
3307 switch(ie->access) {
3308 case IOC_R:
3309 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3310 if (!is_error(ret)) {
3311 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3312 if (!argptr)
3313 return -TARGET_EFAULT;
3314 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3315 unlock_user(argptr, arg, target_size);
3317 break;
3318 case IOC_W:
3319 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3320 if (!argptr)
3321 return -TARGET_EFAULT;
3322 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3323 unlock_user(argptr, arg, 0);
3324 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3325 break;
3326 default:
3327 case IOC_RW:
3328 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3329 if (!argptr)
3330 return -TARGET_EFAULT;
3331 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3332 unlock_user(argptr, arg, 0);
3333 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3334 if (!is_error(ret)) {
3335 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3336 if (!argptr)
3337 return -TARGET_EFAULT;
3338 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3339 unlock_user(argptr, arg, target_size);
3341 break;
3343 break;
3344 default:
3345 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3346 (long)cmd, arg_type[0]);
3347 ret = -TARGET_ENOSYS;
3348 break;
3350 return ret;
3353 static const bitmask_transtbl iflag_tbl[] = {
3354 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3355 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3356 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3357 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3358 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3359 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3360 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3361 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3362 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3363 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3364 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3365 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3366 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3367 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3368 { 0, 0, 0, 0 }
3371 static const bitmask_transtbl oflag_tbl[] = {
3372 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3373 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3374 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3375 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3376 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3377 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3378 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3379 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3380 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3381 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3382 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3383 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3384 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3385 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3386 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3387 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3388 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3389 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3390 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3391 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3392 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3393 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3394 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3395 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3396 { 0, 0, 0, 0 }
3399 static const bitmask_transtbl cflag_tbl[] = {
3400 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3401 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3402 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3403 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3404 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3405 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3406 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3407 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3408 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3409 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3410 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3411 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3412 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3413 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3414 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3415 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3416 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3417 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3418 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3419 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3420 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3421 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3422 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3423 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3424 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3425 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3426 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3427 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3428 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3429 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3430 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3431 { 0, 0, 0, 0 }
3434 static const bitmask_transtbl lflag_tbl[] = {
3435 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3436 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3437 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3438 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3439 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3440 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3441 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3442 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3443 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3444 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3445 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3446 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3447 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3448 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3449 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3450 { 0, 0, 0, 0 }
3453 static void target_to_host_termios (void *dst, const void *src)
3455 struct host_termios *host = dst;
3456 const struct target_termios *target = src;
3458 host->c_iflag =
3459 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3460 host->c_oflag =
3461 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3462 host->c_cflag =
3463 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3464 host->c_lflag =
3465 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3466 host->c_line = target->c_line;
3468 memset(host->c_cc, 0, sizeof(host->c_cc));
3469 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3470 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3471 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3472 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3473 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3474 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3475 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3476 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3477 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3478 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3479 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3480 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3481 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3482 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3483 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3484 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3485 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3488 static void host_to_target_termios (void *dst, const void *src)
3490 struct target_termios *target = dst;
3491 const struct host_termios *host = src;
3493 target->c_iflag =
3494 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3495 target->c_oflag =
3496 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3497 target->c_cflag =
3498 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3499 target->c_lflag =
3500 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3501 target->c_line = host->c_line;
3503 memset(target->c_cc, 0, sizeof(target->c_cc));
3504 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3505 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3506 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3507 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3508 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3509 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3510 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3511 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3512 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3513 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3514 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3515 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3516 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3517 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3518 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3519 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3520 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3523 static const StructEntry struct_termios_def = {
3524 .convert = { host_to_target_termios, target_to_host_termios },
3525 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3526 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3529 static bitmask_transtbl mmap_flags_tbl[] = {
3530 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3531 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3532 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3533 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3534 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3535 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3536 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3537 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3538 { 0, 0, 0, 0 }
3541 #if defined(TARGET_I386)
3543 /* NOTE: there is really one LDT for all the threads */
3544 static uint8_t *ldt_table;
3546 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3548 int size;
3549 void *p;
3551 if (!ldt_table)
3552 return 0;
3553 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3554 if (size > bytecount)
3555 size = bytecount;
3556 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3557 if (!p)
3558 return -TARGET_EFAULT;
3559 /* ??? Should this by byteswapped? */
3560 memcpy(p, ldt_table, size);
3561 unlock_user(p, ptr, size);
3562 return size;
3565 /* XXX: add locking support */
3566 static abi_long write_ldt(CPUX86State *env,
3567 abi_ulong ptr, unsigned long bytecount, int oldmode)
3569 struct target_modify_ldt_ldt_s ldt_info;
3570 struct target_modify_ldt_ldt_s *target_ldt_info;
3571 int seg_32bit, contents, read_exec_only, limit_in_pages;
3572 int seg_not_present, useable, lm;
3573 uint32_t *lp, entry_1, entry_2;
3575 if (bytecount != sizeof(ldt_info))
3576 return -TARGET_EINVAL;
3577 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3578 return -TARGET_EFAULT;
3579 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3580 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3581 ldt_info.limit = tswap32(target_ldt_info->limit);
3582 ldt_info.flags = tswap32(target_ldt_info->flags);
3583 unlock_user_struct(target_ldt_info, ptr, 0);
3585 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3586 return -TARGET_EINVAL;
3587 seg_32bit = ldt_info.flags & 1;
3588 contents = (ldt_info.flags >> 1) & 3;
3589 read_exec_only = (ldt_info.flags >> 3) & 1;
3590 limit_in_pages = (ldt_info.flags >> 4) & 1;
3591 seg_not_present = (ldt_info.flags >> 5) & 1;
3592 useable = (ldt_info.flags >> 6) & 1;
3593 #ifdef TARGET_ABI32
3594 lm = 0;
3595 #else
3596 lm = (ldt_info.flags >> 7) & 1;
3597 #endif
3598 if (contents == 3) {
3599 if (oldmode)
3600 return -TARGET_EINVAL;
3601 if (seg_not_present == 0)
3602 return -TARGET_EINVAL;
3604 /* allocate the LDT */
3605 if (!ldt_table) {
3606 env->ldt.base = target_mmap(0,
3607 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3608 PROT_READ|PROT_WRITE,
3609 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3610 if (env->ldt.base == -1)
3611 return -TARGET_ENOMEM;
3612 memset(g2h(env->ldt.base), 0,
3613 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3614 env->ldt.limit = 0xffff;
3615 ldt_table = g2h(env->ldt.base);
3618 /* NOTE: same code as Linux kernel */
3619 /* Allow LDTs to be cleared by the user. */
3620 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3621 if (oldmode ||
3622 (contents == 0 &&
3623 read_exec_only == 1 &&
3624 seg_32bit == 0 &&
3625 limit_in_pages == 0 &&
3626 seg_not_present == 1 &&
3627 useable == 0 )) {
3628 entry_1 = 0;
3629 entry_2 = 0;
3630 goto install;
3634 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3635 (ldt_info.limit & 0x0ffff);
3636 entry_2 = (ldt_info.base_addr & 0xff000000) |
3637 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3638 (ldt_info.limit & 0xf0000) |
3639 ((read_exec_only ^ 1) << 9) |
3640 (contents << 10) |
3641 ((seg_not_present ^ 1) << 15) |
3642 (seg_32bit << 22) |
3643 (limit_in_pages << 23) |
3644 (lm << 21) |
3645 0x7000;
3646 if (!oldmode)
3647 entry_2 |= (useable << 20);
3649 /* Install the new entry ... */
3650 install:
3651 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3652 lp[0] = tswap32(entry_1);
3653 lp[1] = tswap32(entry_2);
3654 return 0;
3657 /* specific and weird i386 syscalls */
3658 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3659 unsigned long bytecount)
3661 abi_long ret;
3663 switch (func) {
3664 case 0:
3665 ret = read_ldt(ptr, bytecount);
3666 break;
3667 case 1:
3668 ret = write_ldt(env, ptr, bytecount, 1);
3669 break;
3670 case 0x11:
3671 ret = write_ldt(env, ptr, bytecount, 0);
3672 break;
3673 default:
3674 ret = -TARGET_ENOSYS;
3675 break;
3677 return ret;
3680 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3681 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3683 uint64_t *gdt_table = g2h(env->gdt.base);
3684 struct target_modify_ldt_ldt_s ldt_info;
3685 struct target_modify_ldt_ldt_s *target_ldt_info;
3686 int seg_32bit, contents, read_exec_only, limit_in_pages;
3687 int seg_not_present, useable, lm;
3688 uint32_t *lp, entry_1, entry_2;
3689 int i;
3691 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3692 if (!target_ldt_info)
3693 return -TARGET_EFAULT;
3694 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3695 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3696 ldt_info.limit = tswap32(target_ldt_info->limit);
3697 ldt_info.flags = tswap32(target_ldt_info->flags);
3698 if (ldt_info.entry_number == -1) {
3699 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3700 if (gdt_table[i] == 0) {
3701 ldt_info.entry_number = i;
3702 target_ldt_info->entry_number = tswap32(i);
3703 break;
3707 unlock_user_struct(target_ldt_info, ptr, 1);
3709 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3710 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3711 return -TARGET_EINVAL;
3712 seg_32bit = ldt_info.flags & 1;
3713 contents = (ldt_info.flags >> 1) & 3;
3714 read_exec_only = (ldt_info.flags >> 3) & 1;
3715 limit_in_pages = (ldt_info.flags >> 4) & 1;
3716 seg_not_present = (ldt_info.flags >> 5) & 1;
3717 useable = (ldt_info.flags >> 6) & 1;
3718 #ifdef TARGET_ABI32
3719 lm = 0;
3720 #else
3721 lm = (ldt_info.flags >> 7) & 1;
3722 #endif
3724 if (contents == 3) {
3725 if (seg_not_present == 0)
3726 return -TARGET_EINVAL;
3729 /* NOTE: same code as Linux kernel */
3730 /* Allow LDTs to be cleared by the user. */
3731 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3732 if ((contents == 0 &&
3733 read_exec_only == 1 &&
3734 seg_32bit == 0 &&
3735 limit_in_pages == 0 &&
3736 seg_not_present == 1 &&
3737 useable == 0 )) {
3738 entry_1 = 0;
3739 entry_2 = 0;
3740 goto install;
3744 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3745 (ldt_info.limit & 0x0ffff);
3746 entry_2 = (ldt_info.base_addr & 0xff000000) |
3747 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3748 (ldt_info.limit & 0xf0000) |
3749 ((read_exec_only ^ 1) << 9) |
3750 (contents << 10) |
3751 ((seg_not_present ^ 1) << 15) |
3752 (seg_32bit << 22) |
3753 (limit_in_pages << 23) |
3754 (useable << 20) |
3755 (lm << 21) |
3756 0x7000;
3758 /* Install the new entry ... */
3759 install:
3760 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3761 lp[0] = tswap32(entry_1);
3762 lp[1] = tswap32(entry_2);
3763 return 0;
3766 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3768 struct target_modify_ldt_ldt_s *target_ldt_info;
3769 uint64_t *gdt_table = g2h(env->gdt.base);
3770 uint32_t base_addr, limit, flags;
3771 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3772 int seg_not_present, useable, lm;
3773 uint32_t *lp, entry_1, entry_2;
3775 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3776 if (!target_ldt_info)
3777 return -TARGET_EFAULT;
3778 idx = tswap32(target_ldt_info->entry_number);
3779 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3780 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3781 unlock_user_struct(target_ldt_info, ptr, 1);
3782 return -TARGET_EINVAL;
3784 lp = (uint32_t *)(gdt_table + idx);
3785 entry_1 = tswap32(lp[0]);
3786 entry_2 = tswap32(lp[1]);
3788 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3789 contents = (entry_2 >> 10) & 3;
3790 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3791 seg_32bit = (entry_2 >> 22) & 1;
3792 limit_in_pages = (entry_2 >> 23) & 1;
3793 useable = (entry_2 >> 20) & 1;
3794 #ifdef TARGET_ABI32
3795 lm = 0;
3796 #else
3797 lm = (entry_2 >> 21) & 1;
3798 #endif
3799 flags = (seg_32bit << 0) | (contents << 1) |
3800 (read_exec_only << 3) | (limit_in_pages << 4) |
3801 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3802 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3803 base_addr = (entry_1 >> 16) |
3804 (entry_2 & 0xff000000) |
3805 ((entry_2 & 0xff) << 16);
3806 target_ldt_info->base_addr = tswapl(base_addr);
3807 target_ldt_info->limit = tswap32(limit);
3808 target_ldt_info->flags = tswap32(flags);
3809 unlock_user_struct(target_ldt_info, ptr, 1);
3810 return 0;
3812 #endif /* TARGET_I386 && TARGET_ABI32 */
3814 #ifndef TARGET_ABI32
3815 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3817 abi_long ret = 0;
3818 abi_ulong val;
3819 int idx;
3821 switch(code) {
3822 case TARGET_ARCH_SET_GS:
3823 case TARGET_ARCH_SET_FS:
3824 if (code == TARGET_ARCH_SET_GS)
3825 idx = R_GS;
3826 else
3827 idx = R_FS;
3828 cpu_x86_load_seg(env, idx, 0);
3829 env->segs[idx].base = addr;
3830 break;
3831 case TARGET_ARCH_GET_GS:
3832 case TARGET_ARCH_GET_FS:
3833 if (code == TARGET_ARCH_GET_GS)
3834 idx = R_GS;
3835 else
3836 idx = R_FS;
3837 val = env->segs[idx].base;
3838 if (put_user(val, addr, abi_ulong))
3839 ret = -TARGET_EFAULT;
3840 break;
3841 default:
3842 ret = -TARGET_EINVAL;
3843 break;
3845 return ret;
3847 #endif
3849 #endif /* defined(TARGET_I386) */
3851 #define NEW_STACK_SIZE 0x40000
3853 #if defined(CONFIG_USE_NPTL)
3855 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3856 typedef struct {
3857 CPUState *env;
3858 pthread_mutex_t mutex;
3859 pthread_cond_t cond;
3860 pthread_t thread;
3861 uint32_t tid;
3862 abi_ulong child_tidptr;
3863 abi_ulong parent_tidptr;
3864 sigset_t sigmask;
3865 } new_thread_info;
3867 static void *clone_func(void *arg)
3869 new_thread_info *info = arg;
3870 CPUState *env;
3871 TaskState *ts;
3873 env = info->env;
3874 thread_env = env;
3875 ts = (TaskState *)thread_env->opaque;
3876 info->tid = gettid();
3877 env->host_tid = info->tid;
3878 task_settid(ts);
3879 if (info->child_tidptr)
3880 put_user_u32(info->tid, info->child_tidptr);
3881 if (info->parent_tidptr)
3882 put_user_u32(info->tid, info->parent_tidptr);
3883 /* Enable signals. */
3884 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3885 /* Signal to the parent that we're ready. */
3886 pthread_mutex_lock(&info->mutex);
3887 pthread_cond_broadcast(&info->cond);
3888 pthread_mutex_unlock(&info->mutex);
3889 /* Wait until the parent has finshed initializing the tls state. */
3890 pthread_mutex_lock(&clone_lock);
3891 pthread_mutex_unlock(&clone_lock);
3892 cpu_loop(env);
3893 /* never exits */
3894 return NULL;
3896 #else
3898 static int clone_func(void *arg)
3900 CPUState *env = arg;
3901 cpu_loop(env);
3902 /* never exits */
3903 return 0;
3905 #endif
3907 /* do_fork() Must return host values and target errnos (unlike most
3908 do_*() functions). */
3909 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3910 abi_ulong parent_tidptr, target_ulong newtls,
3911 abi_ulong child_tidptr)
3913 int ret;
3914 TaskState *ts;
3915 CPUState *new_env;
3916 #if defined(CONFIG_USE_NPTL)
3917 unsigned int nptl_flags;
3918 sigset_t sigmask;
3919 #else
3920 uint8_t *new_stack;
3921 #endif
3923 /* Emulate vfork() with fork() */
3924 if (flags & CLONE_VFORK)
3925 flags &= ~(CLONE_VFORK | CLONE_VM);
3927 if (flags & CLONE_VM) {
3928 TaskState *parent_ts = (TaskState *)env->opaque;
3929 #if defined(CONFIG_USE_NPTL)
3930 new_thread_info info;
3931 pthread_attr_t attr;
3932 #endif
3933 ts = qemu_mallocz(sizeof(TaskState));
3934 init_task_state(ts);
3935 /* we create a new CPU instance. */
3936 new_env = cpu_copy(env);
3937 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3938 cpu_reset(new_env);
3939 #endif
3940 /* Init regs that differ from the parent. */
3941 cpu_clone_regs(new_env, newsp);
3942 new_env->opaque = ts;
3943 ts->bprm = parent_ts->bprm;
3944 ts->info = parent_ts->info;
3945 #if defined(CONFIG_USE_NPTL)
3946 nptl_flags = flags;
3947 flags &= ~CLONE_NPTL_FLAGS2;
3949 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3950 ts->child_tidptr = child_tidptr;
3953 if (nptl_flags & CLONE_SETTLS)
3954 cpu_set_tls (new_env, newtls);
3956 /* Grab a mutex so that thread setup appears atomic. */
3957 pthread_mutex_lock(&clone_lock);
3959 memset(&info, 0, sizeof(info));
3960 pthread_mutex_init(&info.mutex, NULL);
3961 pthread_mutex_lock(&info.mutex);
3962 pthread_cond_init(&info.cond, NULL);
3963 info.env = new_env;
3964 if (nptl_flags & CLONE_CHILD_SETTID)
3965 info.child_tidptr = child_tidptr;
3966 if (nptl_flags & CLONE_PARENT_SETTID)
3967 info.parent_tidptr = parent_tidptr;
3969 ret = pthread_attr_init(&attr);
3970 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
3971 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3972 /* It is not safe to deliver signals until the child has finished
3973 initializing, so temporarily block all signals. */
3974 sigfillset(&sigmask);
3975 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3977 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3978 /* TODO: Free new CPU state if thread creation failed. */
3980 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3981 pthread_attr_destroy(&attr);
3982 if (ret == 0) {
3983 /* Wait for the child to initialize. */
3984 pthread_cond_wait(&info.cond, &info.mutex);
3985 ret = info.tid;
3986 if (flags & CLONE_PARENT_SETTID)
3987 put_user_u32(ret, parent_tidptr);
3988 } else {
3989 ret = -1;
3991 pthread_mutex_unlock(&info.mutex);
3992 pthread_cond_destroy(&info.cond);
3993 pthread_mutex_destroy(&info.mutex);
3994 pthread_mutex_unlock(&clone_lock);
3995 #else
3996 if (flags & CLONE_NPTL_FLAGS2)
3997 return -EINVAL;
3998 /* This is probably going to die very quickly, but do it anyway. */
3999 new_stack = qemu_mallocz (NEW_STACK_SIZE);
4000 #ifdef __ia64__
4001 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4002 #else
4003 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4004 #endif
4005 #endif
4006 } else {
4007 /* if no CLONE_VM, we consider it is a fork */
4008 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4009 return -EINVAL;
4010 fork_start();
4011 ret = fork();
4012 if (ret == 0) {
4013 /* Child Process. */
4014 cpu_clone_regs(env, newsp);
4015 fork_end(1);
4016 #if defined(CONFIG_USE_NPTL)
4017 /* There is a race condition here. The parent process could
4018 theoretically read the TID in the child process before the child
4019 tid is set. This would require using either ptrace
4020 (not implemented) or having *_tidptr to point at a shared memory
4021 mapping. We can't repeat the spinlock hack used above because
4022 the child process gets its own copy of the lock. */
4023 if (flags & CLONE_CHILD_SETTID)
4024 put_user_u32(gettid(), child_tidptr);
4025 if (flags & CLONE_PARENT_SETTID)
4026 put_user_u32(gettid(), parent_tidptr);
4027 ts = (TaskState *)env->opaque;
4028 if (flags & CLONE_SETTLS)
4029 cpu_set_tls (env, newtls);
4030 if (flags & CLONE_CHILD_CLEARTID)
4031 ts->child_tidptr = child_tidptr;
4032 #endif
4033 } else {
4034 fork_end(0);
4037 return ret;
4040 /* warning : doesn't handle linux specific flags... */
4041 static int target_to_host_fcntl_cmd(int cmd)
4043 switch(cmd) {
4044 case TARGET_F_DUPFD:
4045 case TARGET_F_GETFD:
4046 case TARGET_F_SETFD:
4047 case TARGET_F_GETFL:
4048 case TARGET_F_SETFL:
4049 return cmd;
4050 case TARGET_F_GETLK:
4051 return F_GETLK;
4052 case TARGET_F_SETLK:
4053 return F_SETLK;
4054 case TARGET_F_SETLKW:
4055 return F_SETLKW;
4056 case TARGET_F_GETOWN:
4057 return F_GETOWN;
4058 case TARGET_F_SETOWN:
4059 return F_SETOWN;
4060 case TARGET_F_GETSIG:
4061 return F_GETSIG;
4062 case TARGET_F_SETSIG:
4063 return F_SETSIG;
4064 #if TARGET_ABI_BITS == 32
4065 case TARGET_F_GETLK64:
4066 return F_GETLK64;
4067 case TARGET_F_SETLK64:
4068 return F_SETLK64;
4069 case TARGET_F_SETLKW64:
4070 return F_SETLKW64;
4071 #endif
4072 case TARGET_F_SETLEASE:
4073 return F_SETLEASE;
4074 case TARGET_F_GETLEASE:
4075 return F_GETLEASE;
4076 #ifdef F_DUPFD_CLOEXEC
4077 case TARGET_F_DUPFD_CLOEXEC:
4078 return F_DUPFD_CLOEXEC;
4079 #endif
4080 case TARGET_F_NOTIFY:
4081 return F_NOTIFY;
4082 default:
4083 return -TARGET_EINVAL;
4085 return -TARGET_EINVAL;
4088 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4090 struct flock fl;
4091 struct target_flock *target_fl;
4092 struct flock64 fl64;
4093 struct target_flock64 *target_fl64;
4094 abi_long ret;
4095 int host_cmd = target_to_host_fcntl_cmd(cmd);
4097 if (host_cmd == -TARGET_EINVAL)
4098 return host_cmd;
4100 switch(cmd) {
4101 case TARGET_F_GETLK:
4102 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4103 return -TARGET_EFAULT;
4104 fl.l_type = tswap16(target_fl->l_type);
4105 fl.l_whence = tswap16(target_fl->l_whence);
4106 fl.l_start = tswapl(target_fl->l_start);
4107 fl.l_len = tswapl(target_fl->l_len);
4108 fl.l_pid = tswap32(target_fl->l_pid);
4109 unlock_user_struct(target_fl, arg, 0);
4110 ret = get_errno(fcntl(fd, host_cmd, &fl));
4111 if (ret == 0) {
4112 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4113 return -TARGET_EFAULT;
4114 target_fl->l_type = tswap16(fl.l_type);
4115 target_fl->l_whence = tswap16(fl.l_whence);
4116 target_fl->l_start = tswapl(fl.l_start);
4117 target_fl->l_len = tswapl(fl.l_len);
4118 target_fl->l_pid = tswap32(fl.l_pid);
4119 unlock_user_struct(target_fl, arg, 1);
4121 break;
4123 case TARGET_F_SETLK:
4124 case TARGET_F_SETLKW:
4125 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4126 return -TARGET_EFAULT;
4127 fl.l_type = tswap16(target_fl->l_type);
4128 fl.l_whence = tswap16(target_fl->l_whence);
4129 fl.l_start = tswapl(target_fl->l_start);
4130 fl.l_len = tswapl(target_fl->l_len);
4131 fl.l_pid = tswap32(target_fl->l_pid);
4132 unlock_user_struct(target_fl, arg, 0);
4133 ret = get_errno(fcntl(fd, host_cmd, &fl));
4134 break;
4136 case TARGET_F_GETLK64:
4137 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4138 return -TARGET_EFAULT;
4139 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4140 fl64.l_whence = tswap16(target_fl64->l_whence);
4141 fl64.l_start = tswapl(target_fl64->l_start);
4142 fl64.l_len = tswapl(target_fl64->l_len);
4143 fl64.l_pid = tswap32(target_fl64->l_pid);
4144 unlock_user_struct(target_fl64, arg, 0);
4145 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4146 if (ret == 0) {
4147 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4148 return -TARGET_EFAULT;
4149 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4150 target_fl64->l_whence = tswap16(fl64.l_whence);
4151 target_fl64->l_start = tswapl(fl64.l_start);
4152 target_fl64->l_len = tswapl(fl64.l_len);
4153 target_fl64->l_pid = tswap32(fl64.l_pid);
4154 unlock_user_struct(target_fl64, arg, 1);
4156 break;
4157 case TARGET_F_SETLK64:
4158 case TARGET_F_SETLKW64:
4159 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4160 return -TARGET_EFAULT;
4161 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4162 fl64.l_whence = tswap16(target_fl64->l_whence);
4163 fl64.l_start = tswapl(target_fl64->l_start);
4164 fl64.l_len = tswapl(target_fl64->l_len);
4165 fl64.l_pid = tswap32(target_fl64->l_pid);
4166 unlock_user_struct(target_fl64, arg, 0);
4167 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4168 break;
4170 case TARGET_F_GETFL:
4171 ret = get_errno(fcntl(fd, host_cmd, arg));
4172 if (ret >= 0) {
4173 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4175 break;
4177 case TARGET_F_SETFL:
4178 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4179 break;
4181 case TARGET_F_SETOWN:
4182 case TARGET_F_GETOWN:
4183 case TARGET_F_SETSIG:
4184 case TARGET_F_GETSIG:
4185 case TARGET_F_SETLEASE:
4186 case TARGET_F_GETLEASE:
4187 ret = get_errno(fcntl(fd, host_cmd, arg));
4188 break;
4190 default:
4191 ret = get_errno(fcntl(fd, cmd, arg));
4192 break;
4194 return ret;
4197 #ifdef USE_UID16
4199 static inline int high2lowuid(int uid)
4201 if (uid > 65535)
4202 return 65534;
4203 else
4204 return uid;
4207 static inline int high2lowgid(int gid)
4209 if (gid > 65535)
4210 return 65534;
4211 else
4212 return gid;
4215 static inline int low2highuid(int uid)
4217 if ((int16_t)uid == -1)
4218 return -1;
4219 else
4220 return uid;
4223 static inline int low2highgid(int gid)
4225 if ((int16_t)gid == -1)
4226 return -1;
4227 else
4228 return gid;
4230 static inline int tswapid(int id)
4232 return tswap16(id);
4234 #else /* !USE_UID16 */
4235 static inline int high2lowuid(int uid)
4237 return uid;
4239 static inline int high2lowgid(int gid)
4241 return gid;
4243 static inline int low2highuid(int uid)
4245 return uid;
4247 static inline int low2highgid(int gid)
4249 return gid;
4251 static inline int tswapid(int id)
4253 return tswap32(id);
4255 #endif /* USE_UID16 */
4257 void syscall_init(void)
4259 IOCTLEntry *ie;
4260 const argtype *arg_type;
4261 int size;
4262 int i;
4264 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4265 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4266 #include "syscall_types.h"
4267 #undef STRUCT
4268 #undef STRUCT_SPECIAL
4270 /* we patch the ioctl size if necessary. We rely on the fact that
4271 no ioctl has all the bits at '1' in the size field */
4272 ie = ioctl_entries;
4273 while (ie->target_cmd != 0) {
4274 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4275 TARGET_IOC_SIZEMASK) {
4276 arg_type = ie->arg_type;
4277 if (arg_type[0] != TYPE_PTR) {
4278 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4279 ie->target_cmd);
4280 exit(1);
4282 arg_type++;
4283 size = thunk_type_size(arg_type, 0);
4284 ie->target_cmd = (ie->target_cmd &
4285 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4286 (size << TARGET_IOC_SIZESHIFT);
4289 /* Build target_to_host_errno_table[] table from
4290 * host_to_target_errno_table[]. */
4291 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4292 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4294 /* automatic consistency check if same arch */
4295 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4296 (defined(__x86_64__) && defined(TARGET_X86_64))
4297 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4298 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4299 ie->name, ie->target_cmd, ie->host_cmd);
4301 #endif
4302 ie++;
4306 #if TARGET_ABI_BITS == 32
4307 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4309 #ifdef TARGET_WORDS_BIGENDIAN
4310 return ((uint64_t)word0 << 32) | word1;
4311 #else
4312 return ((uint64_t)word1 << 32) | word0;
4313 #endif
4315 #else /* TARGET_ABI_BITS == 32 */
4316 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4318 return word0;
4320 #endif /* TARGET_ABI_BITS != 32 */
4322 #ifdef TARGET_NR_truncate64
4323 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4324 abi_long arg2,
4325 abi_long arg3,
4326 abi_long arg4)
4328 #ifdef TARGET_ARM
4329 if (((CPUARMState *)cpu_env)->eabi)
4331 arg2 = arg3;
4332 arg3 = arg4;
4334 #endif
4335 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4337 #endif
4339 #ifdef TARGET_NR_ftruncate64
4340 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4341 abi_long arg2,
4342 abi_long arg3,
4343 abi_long arg4)
4345 #ifdef TARGET_ARM
4346 if (((CPUARMState *)cpu_env)->eabi)
4348 arg2 = arg3;
4349 arg3 = arg4;
4351 #endif
4352 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4354 #endif
4356 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4357 abi_ulong target_addr)
4359 struct target_timespec *target_ts;
4361 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4362 return -TARGET_EFAULT;
4363 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4364 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4365 unlock_user_struct(target_ts, target_addr, 0);
4366 return 0;
4369 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4370 struct timespec *host_ts)
4372 struct target_timespec *target_ts;
4374 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4375 return -TARGET_EFAULT;
4376 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4377 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4378 unlock_user_struct(target_ts, target_addr, 1);
4379 return 0;
4382 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4383 static inline abi_long host_to_target_stat64(void *cpu_env,
4384 abi_ulong target_addr,
4385 struct stat *host_st)
4387 #ifdef TARGET_ARM
4388 if (((CPUARMState *)cpu_env)->eabi) {
4389 struct target_eabi_stat64 *target_st;
4391 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4392 return -TARGET_EFAULT;
4393 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4394 __put_user(host_st->st_dev, &target_st->st_dev);
4395 __put_user(host_st->st_ino, &target_st->st_ino);
4396 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4397 __put_user(host_st->st_ino, &target_st->__st_ino);
4398 #endif
4399 __put_user(host_st->st_mode, &target_st->st_mode);
4400 __put_user(host_st->st_nlink, &target_st->st_nlink);
4401 __put_user(host_st->st_uid, &target_st->st_uid);
4402 __put_user(host_st->st_gid, &target_st->st_gid);
4403 __put_user(host_st->st_rdev, &target_st->st_rdev);
4404 __put_user(host_st->st_size, &target_st->st_size);
4405 __put_user(host_st->st_blksize, &target_st->st_blksize);
4406 __put_user(host_st->st_blocks, &target_st->st_blocks);
4407 __put_user(host_st->st_atime, &target_st->target_st_atime);
4408 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4409 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4410 unlock_user_struct(target_st, target_addr, 1);
4411 } else
4412 #endif
4414 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4415 struct target_stat *target_st;
4416 #else
4417 struct target_stat64 *target_st;
4418 #endif
4420 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4421 return -TARGET_EFAULT;
4422 memset(target_st, 0, sizeof(*target_st));
4423 __put_user(host_st->st_dev, &target_st->st_dev);
4424 __put_user(host_st->st_ino, &target_st->st_ino);
4425 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4426 __put_user(host_st->st_ino, &target_st->__st_ino);
4427 #endif
4428 __put_user(host_st->st_mode, &target_st->st_mode);
4429 __put_user(host_st->st_nlink, &target_st->st_nlink);
4430 __put_user(host_st->st_uid, &target_st->st_uid);
4431 __put_user(host_st->st_gid, &target_st->st_gid);
4432 __put_user(host_st->st_rdev, &target_st->st_rdev);
4433 /* XXX: better use of kernel struct */
4434 __put_user(host_st->st_size, &target_st->st_size);
4435 __put_user(host_st->st_blksize, &target_st->st_blksize);
4436 __put_user(host_st->st_blocks, &target_st->st_blocks);
4437 __put_user(host_st->st_atime, &target_st->target_st_atime);
4438 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4439 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4440 unlock_user_struct(target_st, target_addr, 1);
4443 return 0;
4445 #endif
4447 #if defined(CONFIG_USE_NPTL)
4448 /* ??? Using host futex calls even when target atomic operations
4449 are not really atomic probably breaks things. However implementing
4450 futexes locally would make futexes shared between multiple processes
4451 tricky. However they're probably useless because guest atomic
4452 operations won't work either. */
4453 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4454 target_ulong uaddr2, int val3)
4456 struct timespec ts, *pts;
4457 int base_op;
4459 /* ??? We assume FUTEX_* constants are the same on both host
4460 and target. */
4461 #ifdef FUTEX_CMD_MASK
4462 base_op = op & FUTEX_CMD_MASK;
4463 #else
4464 base_op = op;
4465 #endif
4466 switch (base_op) {
4467 case FUTEX_WAIT:
4468 if (timeout) {
4469 pts = &ts;
4470 target_to_host_timespec(pts, timeout);
4471 } else {
4472 pts = NULL;
4474 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4475 pts, NULL, 0));
4476 case FUTEX_WAKE:
4477 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4478 case FUTEX_FD:
4479 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4480 case FUTEX_REQUEUE:
4481 case FUTEX_CMP_REQUEUE:
4482 case FUTEX_WAKE_OP:
4483 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4484 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4485 But the prototype takes a `struct timespec *'; insert casts
4486 to satisfy the compiler. We do not need to tswap TIMEOUT
4487 since it's not compared to guest memory. */
4488 pts = (struct timespec *)(uintptr_t) timeout;
4489 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4490 g2h(uaddr2),
4491 (base_op == FUTEX_CMP_REQUEUE
4492 ? tswap32(val3)
4493 : val3)));
4494 default:
4495 return -TARGET_ENOSYS;
4498 #endif
4500 /* Map host to target signal numbers for the wait family of syscalls.
4501 Assume all other status bits are the same. */
4502 static int host_to_target_waitstatus(int status)
4504 if (WIFSIGNALED(status)) {
4505 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4507 if (WIFSTOPPED(status)) {
4508 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4509 | (status & 0xff);
4511 return status;
4514 int get_osversion(void)
4516 static int osversion;
4517 struct new_utsname buf;
4518 const char *s;
4519 int i, n, tmp;
4520 if (osversion)
4521 return osversion;
4522 if (qemu_uname_release && *qemu_uname_release) {
4523 s = qemu_uname_release;
4524 } else {
4525 if (sys_uname(&buf))
4526 return 0;
4527 s = buf.release;
4529 tmp = 0;
4530 for (i = 0; i < 3; i++) {
4531 n = 0;
4532 while (*s >= '0' && *s <= '9') {
4533 n *= 10;
4534 n += *s - '0';
4535 s++;
4537 tmp = (tmp << 8) + n;
4538 if (*s == '.')
4539 s++;
4541 osversion = tmp;
4542 return osversion;
4545 /* do_syscall() should always have a single exit point at the end so
4546 that actions, such as logging of syscall results, can be performed.
4547 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4548 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4549 abi_long arg2, abi_long arg3, abi_long arg4,
4550 abi_long arg5, abi_long arg6, abi_long arg7,
4551 abi_long arg8)
4553 abi_long ret;
4554 struct stat st;
4555 struct statfs stfs;
4556 void *p;
4558 #ifdef DEBUG
4559 gemu_log("syscall %d", num);
4560 #endif
4561 if(do_strace)
4562 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4564 switch(num) {
4565 case TARGET_NR_exit:
4566 #ifdef CONFIG_USE_NPTL
4567 /* In old applications this may be used to implement _exit(2).
4568 However in threaded applictions it is used for thread termination,
4569 and _exit_group is used for application termination.
4570 Do thread termination if we have more then one thread. */
4571 /* FIXME: This probably breaks if a signal arrives. We should probably
4572 be disabling signals. */
4573 if (first_cpu->next_cpu) {
4574 TaskState *ts;
4575 CPUState **lastp;
4576 CPUState *p;
4578 cpu_list_lock();
4579 lastp = &first_cpu;
4580 p = first_cpu;
4581 while (p && p != (CPUState *)cpu_env) {
4582 lastp = &p->next_cpu;
4583 p = p->next_cpu;
4585 /* If we didn't find the CPU for this thread then something is
4586 horribly wrong. */
4587 if (!p)
4588 abort();
4589 /* Remove the CPU from the list. */
4590 *lastp = p->next_cpu;
4591 cpu_list_unlock();
4592 ts = ((CPUState *)cpu_env)->opaque;
4593 if (ts->child_tidptr) {
4594 put_user_u32(0, ts->child_tidptr);
4595 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4596 NULL, NULL, 0);
4598 thread_env = NULL;
4599 qemu_free(cpu_env);
4600 qemu_free(ts);
4601 pthread_exit(NULL);
4603 #endif
4604 #ifdef TARGET_GPROF
4605 _mcleanup();
4606 #endif
4607 gdb_exit(cpu_env, arg1);
4608 _exit(arg1);
4609 ret = 0; /* avoid warning */
4610 break;
4611 case TARGET_NR_read:
4612 if (arg3 == 0)
4613 ret = 0;
4614 else {
4615 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4616 goto efault;
4617 ret = get_errno(read(arg1, p, arg3));
4618 unlock_user(p, arg2, ret);
4620 break;
4621 case TARGET_NR_write:
4622 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4623 goto efault;
4624 ret = get_errno(write(arg1, p, arg3));
4625 unlock_user(p, arg2, 0);
4626 break;
4627 case TARGET_NR_open:
4628 if (!(p = lock_user_string(arg1)))
4629 goto efault;
4630 ret = get_errno(open(path(p),
4631 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4632 arg3));
4633 unlock_user(p, arg1, 0);
4634 break;
4635 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4636 case TARGET_NR_openat:
4637 if (!(p = lock_user_string(arg2)))
4638 goto efault;
4639 ret = get_errno(sys_openat(arg1,
4640 path(p),
4641 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4642 arg4));
4643 unlock_user(p, arg2, 0);
4644 break;
4645 #endif
4646 case TARGET_NR_close:
4647 ret = get_errno(close(arg1));
4648 break;
4649 case TARGET_NR_brk:
4650 ret = do_brk(arg1);
4651 break;
4652 case TARGET_NR_fork:
4653 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4654 break;
4655 #ifdef TARGET_NR_waitpid
4656 case TARGET_NR_waitpid:
4658 int status;
4659 ret = get_errno(waitpid(arg1, &status, arg3));
4660 if (!is_error(ret) && arg2
4661 && put_user_s32(host_to_target_waitstatus(status), arg2))
4662 goto efault;
4664 break;
4665 #endif
4666 #ifdef TARGET_NR_waitid
4667 case TARGET_NR_waitid:
4669 siginfo_t info;
4670 info.si_pid = 0;
4671 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4672 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4673 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4674 goto efault;
4675 host_to_target_siginfo(p, &info);
4676 unlock_user(p, arg3, sizeof(target_siginfo_t));
4679 break;
4680 #endif
4681 #ifdef TARGET_NR_creat /* not on alpha */
4682 case TARGET_NR_creat:
4683 if (!(p = lock_user_string(arg1)))
4684 goto efault;
4685 ret = get_errno(creat(p, arg2));
4686 unlock_user(p, arg1, 0);
4687 break;
4688 #endif
4689 case TARGET_NR_link:
4691 void * p2;
4692 p = lock_user_string(arg1);
4693 p2 = lock_user_string(arg2);
4694 if (!p || !p2)
4695 ret = -TARGET_EFAULT;
4696 else
4697 ret = get_errno(link(p, p2));
4698 unlock_user(p2, arg2, 0);
4699 unlock_user(p, arg1, 0);
4701 break;
4702 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4703 case TARGET_NR_linkat:
4705 void * p2 = NULL;
4706 if (!arg2 || !arg4)
4707 goto efault;
4708 p = lock_user_string(arg2);
4709 p2 = lock_user_string(arg4);
4710 if (!p || !p2)
4711 ret = -TARGET_EFAULT;
4712 else
4713 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4714 unlock_user(p, arg2, 0);
4715 unlock_user(p2, arg4, 0);
4717 break;
4718 #endif
4719 case TARGET_NR_unlink:
4720 if (!(p = lock_user_string(arg1)))
4721 goto efault;
4722 ret = get_errno(unlink(p));
4723 unlock_user(p, arg1, 0);
4724 break;
4725 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4726 case TARGET_NR_unlinkat:
4727 if (!(p = lock_user_string(arg2)))
4728 goto efault;
4729 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4730 unlock_user(p, arg2, 0);
4731 break;
4732 #endif
4733 case TARGET_NR_execve:
4735 char **argp, **envp;
4736 int argc, envc;
4737 abi_ulong gp;
4738 abi_ulong guest_argp;
4739 abi_ulong guest_envp;
4740 abi_ulong addr;
4741 char **q;
4743 argc = 0;
4744 guest_argp = arg2;
4745 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4746 if (get_user_ual(addr, gp))
4747 goto efault;
4748 if (!addr)
4749 break;
4750 argc++;
4752 envc = 0;
4753 guest_envp = arg3;
4754 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4755 if (get_user_ual(addr, gp))
4756 goto efault;
4757 if (!addr)
4758 break;
4759 envc++;
4762 argp = alloca((argc + 1) * sizeof(void *));
4763 envp = alloca((envc + 1) * sizeof(void *));
4765 for (gp = guest_argp, q = argp; gp;
4766 gp += sizeof(abi_ulong), q++) {
4767 if (get_user_ual(addr, gp))
4768 goto execve_efault;
4769 if (!addr)
4770 break;
4771 if (!(*q = lock_user_string(addr)))
4772 goto execve_efault;
4774 *q = NULL;
4776 for (gp = guest_envp, q = envp; gp;
4777 gp += sizeof(abi_ulong), q++) {
4778 if (get_user_ual(addr, gp))
4779 goto execve_efault;
4780 if (!addr)
4781 break;
4782 if (!(*q = lock_user_string(addr)))
4783 goto execve_efault;
4785 *q = NULL;
4787 if (!(p = lock_user_string(arg1)))
4788 goto execve_efault;
4789 ret = get_errno(execve(p, argp, envp));
4790 unlock_user(p, arg1, 0);
4792 goto execve_end;
4794 execve_efault:
4795 ret = -TARGET_EFAULT;
4797 execve_end:
4798 for (gp = guest_argp, q = argp; *q;
4799 gp += sizeof(abi_ulong), q++) {
4800 if (get_user_ual(addr, gp)
4801 || !addr)
4802 break;
4803 unlock_user(*q, addr, 0);
4805 for (gp = guest_envp, q = envp; *q;
4806 gp += sizeof(abi_ulong), q++) {
4807 if (get_user_ual(addr, gp)
4808 || !addr)
4809 break;
4810 unlock_user(*q, addr, 0);
4813 break;
4814 case TARGET_NR_chdir:
4815 if (!(p = lock_user_string(arg1)))
4816 goto efault;
4817 ret = get_errno(chdir(p));
4818 unlock_user(p, arg1, 0);
4819 break;
4820 #ifdef TARGET_NR_time
4821 case TARGET_NR_time:
4823 time_t host_time;
4824 ret = get_errno(time(&host_time));
4825 if (!is_error(ret)
4826 && arg1
4827 && put_user_sal(host_time, arg1))
4828 goto efault;
4830 break;
4831 #endif
4832 case TARGET_NR_mknod:
4833 if (!(p = lock_user_string(arg1)))
4834 goto efault;
4835 ret = get_errno(mknod(p, arg2, arg3));
4836 unlock_user(p, arg1, 0);
4837 break;
4838 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4839 case TARGET_NR_mknodat:
4840 if (!(p = lock_user_string(arg2)))
4841 goto efault;
4842 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4843 unlock_user(p, arg2, 0);
4844 break;
4845 #endif
4846 case TARGET_NR_chmod:
4847 if (!(p = lock_user_string(arg1)))
4848 goto efault;
4849 ret = get_errno(chmod(p, arg2));
4850 unlock_user(p, arg1, 0);
4851 break;
4852 #ifdef TARGET_NR_break
4853 case TARGET_NR_break:
4854 goto unimplemented;
4855 #endif
4856 #ifdef TARGET_NR_oldstat
4857 case TARGET_NR_oldstat:
4858 goto unimplemented;
4859 #endif
4860 case TARGET_NR_lseek:
4861 ret = get_errno(lseek(arg1, arg2, arg3));
4862 break;
4863 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4864 /* Alpha specific */
4865 case TARGET_NR_getxpid:
4866 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4867 ret = get_errno(getpid());
4868 break;
4869 #endif
4870 #ifdef TARGET_NR_getpid
4871 case TARGET_NR_getpid:
4872 ret = get_errno(getpid());
4873 break;
4874 #endif
4875 case TARGET_NR_mount:
4877 /* need to look at the data field */
4878 void *p2, *p3;
4879 p = lock_user_string(arg1);
4880 p2 = lock_user_string(arg2);
4881 p3 = lock_user_string(arg3);
4882 if (!p || !p2 || !p3)
4883 ret = -TARGET_EFAULT;
4884 else {
4885 /* FIXME - arg5 should be locked, but it isn't clear how to
4886 * do that since it's not guaranteed to be a NULL-terminated
4887 * string.
4889 if ( ! arg5 )
4890 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4891 else
4892 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4894 unlock_user(p, arg1, 0);
4895 unlock_user(p2, arg2, 0);
4896 unlock_user(p3, arg3, 0);
4897 break;
4899 #ifdef TARGET_NR_umount
4900 case TARGET_NR_umount:
4901 if (!(p = lock_user_string(arg1)))
4902 goto efault;
4903 ret = get_errno(umount(p));
4904 unlock_user(p, arg1, 0);
4905 break;
4906 #endif
4907 #ifdef TARGET_NR_stime /* not on alpha */
4908 case TARGET_NR_stime:
4910 time_t host_time;
4911 if (get_user_sal(host_time, arg1))
4912 goto efault;
4913 ret = get_errno(stime(&host_time));
4915 break;
4916 #endif
4917 case TARGET_NR_ptrace:
4918 goto unimplemented;
4919 #ifdef TARGET_NR_alarm /* not on alpha */
4920 case TARGET_NR_alarm:
4921 ret = alarm(arg1);
4922 break;
4923 #endif
4924 #ifdef TARGET_NR_oldfstat
4925 case TARGET_NR_oldfstat:
4926 goto unimplemented;
4927 #endif
4928 #ifdef TARGET_NR_pause /* not on alpha */
4929 case TARGET_NR_pause:
4930 ret = get_errno(pause());
4931 break;
4932 #endif
4933 #ifdef TARGET_NR_utime
4934 case TARGET_NR_utime:
4936 struct utimbuf tbuf, *host_tbuf;
4937 struct target_utimbuf *target_tbuf;
4938 if (arg2) {
4939 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4940 goto efault;
4941 tbuf.actime = tswapl(target_tbuf->actime);
4942 tbuf.modtime = tswapl(target_tbuf->modtime);
4943 unlock_user_struct(target_tbuf, arg2, 0);
4944 host_tbuf = &tbuf;
4945 } else {
4946 host_tbuf = NULL;
4948 if (!(p = lock_user_string(arg1)))
4949 goto efault;
4950 ret = get_errno(utime(p, host_tbuf));
4951 unlock_user(p, arg1, 0);
4953 break;
4954 #endif
4955 case TARGET_NR_utimes:
4957 struct timeval *tvp, tv[2];
4958 if (arg2) {
4959 if (copy_from_user_timeval(&tv[0], arg2)
4960 || copy_from_user_timeval(&tv[1],
4961 arg2 + sizeof(struct target_timeval)))
4962 goto efault;
4963 tvp = tv;
4964 } else {
4965 tvp = NULL;
4967 if (!(p = lock_user_string(arg1)))
4968 goto efault;
4969 ret = get_errno(utimes(p, tvp));
4970 unlock_user(p, arg1, 0);
4972 break;
4973 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4974 case TARGET_NR_futimesat:
4976 struct timeval *tvp, tv[2];
4977 if (arg3) {
4978 if (copy_from_user_timeval(&tv[0], arg3)
4979 || copy_from_user_timeval(&tv[1],
4980 arg3 + sizeof(struct target_timeval)))
4981 goto efault;
4982 tvp = tv;
4983 } else {
4984 tvp = NULL;
4986 if (!(p = lock_user_string(arg2)))
4987 goto efault;
4988 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4989 unlock_user(p, arg2, 0);
4991 break;
4992 #endif
4993 #ifdef TARGET_NR_stty
4994 case TARGET_NR_stty:
4995 goto unimplemented;
4996 #endif
4997 #ifdef TARGET_NR_gtty
4998 case TARGET_NR_gtty:
4999 goto unimplemented;
5000 #endif
5001 case TARGET_NR_access:
5002 if (!(p = lock_user_string(arg1)))
5003 goto efault;
5004 ret = get_errno(access(path(p), arg2));
5005 unlock_user(p, arg1, 0);
5006 break;
5007 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5008 case TARGET_NR_faccessat:
5009 if (!(p = lock_user_string(arg2)))
5010 goto efault;
5011 ret = get_errno(sys_faccessat(arg1, p, arg3));
5012 unlock_user(p, arg2, 0);
5013 break;
5014 #endif
5015 #ifdef TARGET_NR_nice /* not on alpha */
5016 case TARGET_NR_nice:
5017 ret = get_errno(nice(arg1));
5018 break;
5019 #endif
5020 #ifdef TARGET_NR_ftime
5021 case TARGET_NR_ftime:
5022 goto unimplemented;
5023 #endif
5024 case TARGET_NR_sync:
5025 sync();
5026 ret = 0;
5027 break;
5028 case TARGET_NR_kill:
5029 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5030 break;
5031 case TARGET_NR_rename:
5033 void *p2;
5034 p = lock_user_string(arg1);
5035 p2 = lock_user_string(arg2);
5036 if (!p || !p2)
5037 ret = -TARGET_EFAULT;
5038 else
5039 ret = get_errno(rename(p, p2));
5040 unlock_user(p2, arg2, 0);
5041 unlock_user(p, arg1, 0);
5043 break;
5044 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5045 case TARGET_NR_renameat:
5047 void *p2;
5048 p = lock_user_string(arg2);
5049 p2 = lock_user_string(arg4);
5050 if (!p || !p2)
5051 ret = -TARGET_EFAULT;
5052 else
5053 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5054 unlock_user(p2, arg4, 0);
5055 unlock_user(p, arg2, 0);
5057 break;
5058 #endif
5059 case TARGET_NR_mkdir:
5060 if (!(p = lock_user_string(arg1)))
5061 goto efault;
5062 ret = get_errno(mkdir(p, arg2));
5063 unlock_user(p, arg1, 0);
5064 break;
5065 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5066 case TARGET_NR_mkdirat:
5067 if (!(p = lock_user_string(arg2)))
5068 goto efault;
5069 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5070 unlock_user(p, arg2, 0);
5071 break;
5072 #endif
5073 case TARGET_NR_rmdir:
5074 if (!(p = lock_user_string(arg1)))
5075 goto efault;
5076 ret = get_errno(rmdir(p));
5077 unlock_user(p, arg1, 0);
5078 break;
5079 case TARGET_NR_dup:
5080 ret = get_errno(dup(arg1));
5081 break;
5082 case TARGET_NR_pipe:
5083 ret = do_pipe(cpu_env, arg1, 0, 0);
5084 break;
5085 #ifdef TARGET_NR_pipe2
5086 case TARGET_NR_pipe2:
5087 ret = do_pipe(cpu_env, arg1, arg2, 1);
5088 break;
5089 #endif
5090 case TARGET_NR_times:
5092 struct target_tms *tmsp;
5093 struct tms tms;
5094 ret = get_errno(times(&tms));
5095 if (arg1) {
5096 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5097 if (!tmsp)
5098 goto efault;
5099 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5100 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5101 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5102 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5104 if (!is_error(ret))
5105 ret = host_to_target_clock_t(ret);
5107 break;
5108 #ifdef TARGET_NR_prof
5109 case TARGET_NR_prof:
5110 goto unimplemented;
5111 #endif
5112 #ifdef TARGET_NR_signal
5113 case TARGET_NR_signal:
5114 goto unimplemented;
5115 #endif
5116 case TARGET_NR_acct:
5117 if (arg1 == 0) {
5118 ret = get_errno(acct(NULL));
5119 } else {
5120 if (!(p = lock_user_string(arg1)))
5121 goto efault;
5122 ret = get_errno(acct(path(p)));
5123 unlock_user(p, arg1, 0);
5125 break;
5126 #ifdef TARGET_NR_umount2 /* not on alpha */
5127 case TARGET_NR_umount2:
5128 if (!(p = lock_user_string(arg1)))
5129 goto efault;
5130 ret = get_errno(umount2(p, arg2));
5131 unlock_user(p, arg1, 0);
5132 break;
5133 #endif
5134 #ifdef TARGET_NR_lock
5135 case TARGET_NR_lock:
5136 goto unimplemented;
5137 #endif
5138 case TARGET_NR_ioctl:
5139 ret = do_ioctl(arg1, arg2, arg3);
5140 break;
5141 case TARGET_NR_fcntl:
5142 ret = do_fcntl(arg1, arg2, arg3);
5143 break;
5144 #ifdef TARGET_NR_mpx
5145 case TARGET_NR_mpx:
5146 goto unimplemented;
5147 #endif
5148 case TARGET_NR_setpgid:
5149 ret = get_errno(setpgid(arg1, arg2));
5150 break;
5151 #ifdef TARGET_NR_ulimit
5152 case TARGET_NR_ulimit:
5153 goto unimplemented;
5154 #endif
5155 #ifdef TARGET_NR_oldolduname
5156 case TARGET_NR_oldolduname:
5157 goto unimplemented;
5158 #endif
5159 case TARGET_NR_umask:
5160 ret = get_errno(umask(arg1));
5161 break;
5162 case TARGET_NR_chroot:
5163 if (!(p = lock_user_string(arg1)))
5164 goto efault;
5165 ret = get_errno(chroot(p));
5166 unlock_user(p, arg1, 0);
5167 break;
5168 case TARGET_NR_ustat:
5169 goto unimplemented;
5170 case TARGET_NR_dup2:
5171 ret = get_errno(dup2(arg1, arg2));
5172 break;
5173 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5174 case TARGET_NR_dup3:
5175 ret = get_errno(dup3(arg1, arg2, arg3));
5176 break;
5177 #endif
5178 #ifdef TARGET_NR_getppid /* not on alpha */
5179 case TARGET_NR_getppid:
5180 ret = get_errno(getppid());
5181 break;
5182 #endif
5183 case TARGET_NR_getpgrp:
5184 ret = get_errno(getpgrp());
5185 break;
5186 case TARGET_NR_setsid:
5187 ret = get_errno(setsid());
5188 break;
5189 #ifdef TARGET_NR_sigaction
5190 case TARGET_NR_sigaction:
5192 #if defined(TARGET_ALPHA)
5193 struct target_sigaction act, oact, *pact = 0;
5194 struct target_old_sigaction *old_act;
5195 if (arg2) {
5196 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5197 goto efault;
5198 act._sa_handler = old_act->_sa_handler;
5199 target_siginitset(&act.sa_mask, old_act->sa_mask);
5200 act.sa_flags = old_act->sa_flags;
5201 act.sa_restorer = 0;
5202 unlock_user_struct(old_act, arg2, 0);
5203 pact = &act;
5205 ret = get_errno(do_sigaction(arg1, pact, &oact));
5206 if (!is_error(ret) && arg3) {
5207 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5208 goto efault;
5209 old_act->_sa_handler = oact._sa_handler;
5210 old_act->sa_mask = oact.sa_mask.sig[0];
5211 old_act->sa_flags = oact.sa_flags;
5212 unlock_user_struct(old_act, arg3, 1);
5214 #elif defined(TARGET_MIPS)
5215 struct target_sigaction act, oact, *pact, *old_act;
5217 if (arg2) {
5218 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5219 goto efault;
5220 act._sa_handler = old_act->_sa_handler;
5221 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5222 act.sa_flags = old_act->sa_flags;
5223 unlock_user_struct(old_act, arg2, 0);
5224 pact = &act;
5225 } else {
5226 pact = NULL;
5229 ret = get_errno(do_sigaction(arg1, pact, &oact));
5231 if (!is_error(ret) && arg3) {
5232 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5233 goto efault;
5234 old_act->_sa_handler = oact._sa_handler;
5235 old_act->sa_flags = oact.sa_flags;
5236 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5237 old_act->sa_mask.sig[1] = 0;
5238 old_act->sa_mask.sig[2] = 0;
5239 old_act->sa_mask.sig[3] = 0;
5240 unlock_user_struct(old_act, arg3, 1);
5242 #else
5243 struct target_old_sigaction *old_act;
5244 struct target_sigaction act, oact, *pact;
5245 if (arg2) {
5246 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5247 goto efault;
5248 act._sa_handler = old_act->_sa_handler;
5249 target_siginitset(&act.sa_mask, old_act->sa_mask);
5250 act.sa_flags = old_act->sa_flags;
5251 act.sa_restorer = old_act->sa_restorer;
5252 unlock_user_struct(old_act, arg2, 0);
5253 pact = &act;
5254 } else {
5255 pact = NULL;
5257 ret = get_errno(do_sigaction(arg1, pact, &oact));
5258 if (!is_error(ret) && arg3) {
5259 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5260 goto efault;
5261 old_act->_sa_handler = oact._sa_handler;
5262 old_act->sa_mask = oact.sa_mask.sig[0];
5263 old_act->sa_flags = oact.sa_flags;
5264 old_act->sa_restorer = oact.sa_restorer;
5265 unlock_user_struct(old_act, arg3, 1);
5267 #endif
5269 break;
5270 #endif
5271 case TARGET_NR_rt_sigaction:
5273 #if defined(TARGET_ALPHA)
5274 struct target_sigaction act, oact, *pact = 0;
5275 struct target_rt_sigaction *rt_act;
5276 /* ??? arg4 == sizeof(sigset_t). */
5277 if (arg2) {
5278 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5279 goto efault;
5280 act._sa_handler = rt_act->_sa_handler;
5281 act.sa_mask = rt_act->sa_mask;
5282 act.sa_flags = rt_act->sa_flags;
5283 act.sa_restorer = arg5;
5284 unlock_user_struct(rt_act, arg2, 0);
5285 pact = &act;
5287 ret = get_errno(do_sigaction(arg1, pact, &oact));
5288 if (!is_error(ret) && arg3) {
5289 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5290 goto efault;
5291 rt_act->_sa_handler = oact._sa_handler;
5292 rt_act->sa_mask = oact.sa_mask;
5293 rt_act->sa_flags = oact.sa_flags;
5294 unlock_user_struct(rt_act, arg3, 1);
5296 #else
5297 struct target_sigaction *act;
5298 struct target_sigaction *oact;
5300 if (arg2) {
5301 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5302 goto efault;
5303 } else
5304 act = NULL;
5305 if (arg3) {
5306 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5307 ret = -TARGET_EFAULT;
5308 goto rt_sigaction_fail;
5310 } else
5311 oact = NULL;
5312 ret = get_errno(do_sigaction(arg1, act, oact));
5313 rt_sigaction_fail:
5314 if (act)
5315 unlock_user_struct(act, arg2, 0);
5316 if (oact)
5317 unlock_user_struct(oact, arg3, 1);
5318 #endif
5320 break;
5321 #ifdef TARGET_NR_sgetmask /* not on alpha */
5322 case TARGET_NR_sgetmask:
5324 sigset_t cur_set;
5325 abi_ulong target_set;
5326 sigprocmask(0, NULL, &cur_set);
5327 host_to_target_old_sigset(&target_set, &cur_set);
5328 ret = target_set;
5330 break;
5331 #endif
5332 #ifdef TARGET_NR_ssetmask /* not on alpha */
5333 case TARGET_NR_ssetmask:
5335 sigset_t set, oset, cur_set;
5336 abi_ulong target_set = arg1;
5337 sigprocmask(0, NULL, &cur_set);
5338 target_to_host_old_sigset(&set, &target_set);
5339 sigorset(&set, &set, &cur_set);
5340 sigprocmask(SIG_SETMASK, &set, &oset);
5341 host_to_target_old_sigset(&target_set, &oset);
5342 ret = target_set;
5344 break;
5345 #endif
5346 #ifdef TARGET_NR_sigprocmask
5347 case TARGET_NR_sigprocmask:
5349 #if defined(TARGET_ALPHA)
5350 sigset_t set, oldset;
5351 abi_ulong mask;
5352 int how;
5354 switch (arg1) {
5355 case TARGET_SIG_BLOCK:
5356 how = SIG_BLOCK;
5357 break;
5358 case TARGET_SIG_UNBLOCK:
5359 how = SIG_UNBLOCK;
5360 break;
5361 case TARGET_SIG_SETMASK:
5362 how = SIG_SETMASK;
5363 break;
5364 default:
5365 ret = -TARGET_EINVAL;
5366 goto fail;
5368 mask = arg2;
5369 target_to_host_old_sigset(&set, &mask);
5371 ret = get_errno(sigprocmask(how, &set, &oldset));
5373 if (!is_error(ret)) {
5374 host_to_target_old_sigset(&mask, &oldset);
5375 ret = mask;
5376 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5378 #else
5379 sigset_t set, oldset, *set_ptr;
5380 int how;
5382 if (arg2) {
5383 switch (arg1) {
5384 case TARGET_SIG_BLOCK:
5385 how = SIG_BLOCK;
5386 break;
5387 case TARGET_SIG_UNBLOCK:
5388 how = SIG_UNBLOCK;
5389 break;
5390 case TARGET_SIG_SETMASK:
5391 how = SIG_SETMASK;
5392 break;
5393 default:
5394 ret = -TARGET_EINVAL;
5395 goto fail;
5397 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5398 goto efault;
5399 target_to_host_old_sigset(&set, p);
5400 unlock_user(p, arg2, 0);
5401 set_ptr = &set;
5402 } else {
5403 how = 0;
5404 set_ptr = NULL;
5406 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5407 if (!is_error(ret) && arg3) {
5408 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5409 goto efault;
5410 host_to_target_old_sigset(p, &oldset);
5411 unlock_user(p, arg3, sizeof(target_sigset_t));
5413 #endif
5415 break;
5416 #endif
5417 case TARGET_NR_rt_sigprocmask:
5419 int how = arg1;
5420 sigset_t set, oldset, *set_ptr;
5422 if (arg2) {
5423 switch(how) {
5424 case TARGET_SIG_BLOCK:
5425 how = SIG_BLOCK;
5426 break;
5427 case TARGET_SIG_UNBLOCK:
5428 how = SIG_UNBLOCK;
5429 break;
5430 case TARGET_SIG_SETMASK:
5431 how = SIG_SETMASK;
5432 break;
5433 default:
5434 ret = -TARGET_EINVAL;
5435 goto fail;
5437 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5438 goto efault;
5439 target_to_host_sigset(&set, p);
5440 unlock_user(p, arg2, 0);
5441 set_ptr = &set;
5442 } else {
5443 how = 0;
5444 set_ptr = NULL;
5446 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5447 if (!is_error(ret) && arg3) {
5448 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5449 goto efault;
5450 host_to_target_sigset(p, &oldset);
5451 unlock_user(p, arg3, sizeof(target_sigset_t));
5454 break;
5455 #ifdef TARGET_NR_sigpending
5456 case TARGET_NR_sigpending:
5458 sigset_t set;
5459 ret = get_errno(sigpending(&set));
5460 if (!is_error(ret)) {
5461 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5462 goto efault;
5463 host_to_target_old_sigset(p, &set);
5464 unlock_user(p, arg1, sizeof(target_sigset_t));
5467 break;
5468 #endif
5469 case TARGET_NR_rt_sigpending:
5471 sigset_t set;
5472 ret = get_errno(sigpending(&set));
5473 if (!is_error(ret)) {
5474 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5475 goto efault;
5476 host_to_target_sigset(p, &set);
5477 unlock_user(p, arg1, sizeof(target_sigset_t));
5480 break;
5481 #ifdef TARGET_NR_sigsuspend
5482 case TARGET_NR_sigsuspend:
5484 sigset_t set;
5485 #if defined(TARGET_ALPHA)
5486 abi_ulong mask = arg1;
5487 target_to_host_old_sigset(&set, &mask);
5488 #else
5489 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5490 goto efault;
5491 target_to_host_old_sigset(&set, p);
5492 unlock_user(p, arg1, 0);
5493 #endif
5494 ret = get_errno(sigsuspend(&set));
5496 break;
5497 #endif
5498 case TARGET_NR_rt_sigsuspend:
5500 sigset_t set;
5501 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5502 goto efault;
5503 target_to_host_sigset(&set, p);
5504 unlock_user(p, arg1, 0);
5505 ret = get_errno(sigsuspend(&set));
5507 break;
5508 case TARGET_NR_rt_sigtimedwait:
5510 sigset_t set;
5511 struct timespec uts, *puts;
5512 siginfo_t uinfo;
5514 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5515 goto efault;
5516 target_to_host_sigset(&set, p);
5517 unlock_user(p, arg1, 0);
5518 if (arg3) {
5519 puts = &uts;
5520 target_to_host_timespec(puts, arg3);
5521 } else {
5522 puts = NULL;
5524 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5525 if (!is_error(ret) && arg2) {
5526 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5527 goto efault;
5528 host_to_target_siginfo(p, &uinfo);
5529 unlock_user(p, arg2, sizeof(target_siginfo_t));
5532 break;
5533 case TARGET_NR_rt_sigqueueinfo:
5535 siginfo_t uinfo;
5536 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5537 goto efault;
5538 target_to_host_siginfo(&uinfo, p);
5539 unlock_user(p, arg1, 0);
5540 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5542 break;
5543 #ifdef TARGET_NR_sigreturn
5544 case TARGET_NR_sigreturn:
5545 /* NOTE: ret is eax, so not transcoding must be done */
5546 ret = do_sigreturn(cpu_env);
5547 break;
5548 #endif
5549 case TARGET_NR_rt_sigreturn:
5550 /* NOTE: ret is eax, so not transcoding must be done */
5551 ret = do_rt_sigreturn(cpu_env);
5552 break;
5553 case TARGET_NR_sethostname:
5554 if (!(p = lock_user_string(arg1)))
5555 goto efault;
5556 ret = get_errno(sethostname(p, arg2));
5557 unlock_user(p, arg1, 0);
5558 break;
5559 case TARGET_NR_setrlimit:
5561 int resource = arg1;
5562 struct target_rlimit *target_rlim;
5563 struct rlimit rlim;
5564 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5565 goto efault;
5566 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5567 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5568 unlock_user_struct(target_rlim, arg2, 0);
5569 ret = get_errno(setrlimit(resource, &rlim));
5571 break;
5572 case TARGET_NR_getrlimit:
5574 int resource = arg1;
5575 struct target_rlimit *target_rlim;
5576 struct rlimit rlim;
5578 ret = get_errno(getrlimit(resource, &rlim));
5579 if (!is_error(ret)) {
5580 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5581 goto efault;
5582 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5583 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5584 unlock_user_struct(target_rlim, arg2, 1);
5587 break;
5588 case TARGET_NR_getrusage:
5590 struct rusage rusage;
5591 ret = get_errno(getrusage(arg1, &rusage));
5592 if (!is_error(ret)) {
5593 host_to_target_rusage(arg2, &rusage);
5596 break;
5597 case TARGET_NR_gettimeofday:
5599 struct timeval tv;
5600 ret = get_errno(gettimeofday(&tv, NULL));
5601 if (!is_error(ret)) {
5602 if (copy_to_user_timeval(arg1, &tv))
5603 goto efault;
5606 break;
5607 case TARGET_NR_settimeofday:
5609 struct timeval tv;
5610 if (copy_from_user_timeval(&tv, arg1))
5611 goto efault;
5612 ret = get_errno(settimeofday(&tv, NULL));
5614 break;
5615 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5616 case TARGET_NR_select:
5618 struct target_sel_arg_struct *sel;
5619 abi_ulong inp, outp, exp, tvp;
5620 long nsel;
5622 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5623 goto efault;
5624 nsel = tswapl(sel->n);
5625 inp = tswapl(sel->inp);
5626 outp = tswapl(sel->outp);
5627 exp = tswapl(sel->exp);
5628 tvp = tswapl(sel->tvp);
5629 unlock_user_struct(sel, arg1, 0);
5630 ret = do_select(nsel, inp, outp, exp, tvp);
5632 break;
5633 #endif
5634 #ifdef TARGET_NR_pselect6
5635 case TARGET_NR_pselect6:
5637 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5638 fd_set rfds, wfds, efds;
5639 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5640 struct timespec ts, *ts_ptr;
5643 * The 6th arg is actually two args smashed together,
5644 * so we cannot use the C library.
5646 sigset_t set;
5647 struct {
5648 sigset_t *set;
5649 size_t size;
5650 } sig, *sig_ptr;
5652 abi_ulong arg_sigset, arg_sigsize, *arg7;
5653 target_sigset_t *target_sigset;
5655 n = arg1;
5656 rfd_addr = arg2;
5657 wfd_addr = arg3;
5658 efd_addr = arg4;
5659 ts_addr = arg5;
5661 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5662 if (ret) {
5663 goto fail;
5665 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5666 if (ret) {
5667 goto fail;
5669 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5670 if (ret) {
5671 goto fail;
5675 * This takes a timespec, and not a timeval, so we cannot
5676 * use the do_select() helper ...
5678 if (ts_addr) {
5679 if (target_to_host_timespec(&ts, ts_addr)) {
5680 goto efault;
5682 ts_ptr = &ts;
5683 } else {
5684 ts_ptr = NULL;
5687 /* Extract the two packed args for the sigset */
5688 if (arg6) {
5689 sig_ptr = &sig;
5690 sig.size = _NSIG / 8;
5692 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5693 if (!arg7) {
5694 goto efault;
5696 arg_sigset = tswapl(arg7[0]);
5697 arg_sigsize = tswapl(arg7[1]);
5698 unlock_user(arg7, arg6, 0);
5700 if (arg_sigset) {
5701 sig.set = &set;
5702 if (arg_sigsize != sizeof(*target_sigset)) {
5703 /* Like the kernel, we enforce correct size sigsets */
5704 ret = -TARGET_EINVAL;
5705 goto fail;
5707 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5708 sizeof(*target_sigset), 1);
5709 if (!target_sigset) {
5710 goto efault;
5712 target_to_host_sigset(&set, target_sigset);
5713 unlock_user(target_sigset, arg_sigset, 0);
5714 } else {
5715 sig.set = NULL;
5717 } else {
5718 sig_ptr = NULL;
5721 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5722 ts_ptr, sig_ptr));
5724 if (!is_error(ret)) {
5725 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5726 goto efault;
5727 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5728 goto efault;
5729 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5730 goto efault;
5732 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5733 goto efault;
5736 break;
5737 #endif
5738 case TARGET_NR_symlink:
5740 void *p2;
5741 p = lock_user_string(arg1);
5742 p2 = lock_user_string(arg2);
5743 if (!p || !p2)
5744 ret = -TARGET_EFAULT;
5745 else
5746 ret = get_errno(symlink(p, p2));
5747 unlock_user(p2, arg2, 0);
5748 unlock_user(p, arg1, 0);
5750 break;
5751 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5752 case TARGET_NR_symlinkat:
5754 void *p2;
5755 p = lock_user_string(arg1);
5756 p2 = lock_user_string(arg3);
5757 if (!p || !p2)
5758 ret = -TARGET_EFAULT;
5759 else
5760 ret = get_errno(sys_symlinkat(p, arg2, p2));
5761 unlock_user(p2, arg3, 0);
5762 unlock_user(p, arg1, 0);
5764 break;
5765 #endif
5766 #ifdef TARGET_NR_oldlstat
5767 case TARGET_NR_oldlstat:
5768 goto unimplemented;
5769 #endif
5770 case TARGET_NR_readlink:
5772 void *p2, *temp;
5773 p = lock_user_string(arg1);
5774 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5775 if (!p || !p2)
5776 ret = -TARGET_EFAULT;
5777 else {
5778 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5779 char real[PATH_MAX];
5780 temp = realpath(exec_path,real);
5781 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5782 snprintf((char *)p2, arg3, "%s", real);
5784 else
5785 ret = get_errno(readlink(path(p), p2, arg3));
5787 unlock_user(p2, arg2, ret);
5788 unlock_user(p, arg1, 0);
5790 break;
5791 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5792 case TARGET_NR_readlinkat:
5794 void *p2;
5795 p = lock_user_string(arg2);
5796 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5797 if (!p || !p2)
5798 ret = -TARGET_EFAULT;
5799 else
5800 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5801 unlock_user(p2, arg3, ret);
5802 unlock_user(p, arg2, 0);
5804 break;
5805 #endif
5806 #ifdef TARGET_NR_uselib
5807 case TARGET_NR_uselib:
5808 goto unimplemented;
5809 #endif
5810 #ifdef TARGET_NR_swapon
5811 case TARGET_NR_swapon:
5812 if (!(p = lock_user_string(arg1)))
5813 goto efault;
5814 ret = get_errno(swapon(p, arg2));
5815 unlock_user(p, arg1, 0);
5816 break;
5817 #endif
5818 case TARGET_NR_reboot:
5819 goto unimplemented;
5820 #ifdef TARGET_NR_readdir
5821 case TARGET_NR_readdir:
5822 goto unimplemented;
5823 #endif
5824 #ifdef TARGET_NR_mmap
5825 case TARGET_NR_mmap:
5826 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5827 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5828 || defined(TARGET_S390X)
5830 abi_ulong *v;
5831 abi_ulong v1, v2, v3, v4, v5, v6;
5832 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5833 goto efault;
5834 v1 = tswapl(v[0]);
5835 v2 = tswapl(v[1]);
5836 v3 = tswapl(v[2]);
5837 v4 = tswapl(v[3]);
5838 v5 = tswapl(v[4]);
5839 v6 = tswapl(v[5]);
5840 unlock_user(v, arg1, 0);
5841 ret = get_errno(target_mmap(v1, v2, v3,
5842 target_to_host_bitmask(v4, mmap_flags_tbl),
5843 v5, v6));
5845 #else
5846 ret = get_errno(target_mmap(arg1, arg2, arg3,
5847 target_to_host_bitmask(arg4, mmap_flags_tbl),
5848 arg5,
5849 arg6));
5850 #endif
5851 break;
5852 #endif
5853 #ifdef TARGET_NR_mmap2
5854 case TARGET_NR_mmap2:
5855 #ifndef MMAP_SHIFT
5856 #define MMAP_SHIFT 12
5857 #endif
5858 ret = get_errno(target_mmap(arg1, arg2, arg3,
5859 target_to_host_bitmask(arg4, mmap_flags_tbl),
5860 arg5,
5861 arg6 << MMAP_SHIFT));
5862 break;
5863 #endif
5864 case TARGET_NR_munmap:
5865 ret = get_errno(target_munmap(arg1, arg2));
5866 break;
5867 case TARGET_NR_mprotect:
5869 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5870 /* Special hack to detect libc making the stack executable. */
5871 if ((arg3 & PROT_GROWSDOWN)
5872 && arg1 >= ts->info->stack_limit
5873 && arg1 <= ts->info->start_stack) {
5874 arg3 &= ~PROT_GROWSDOWN;
5875 arg2 = arg2 + arg1 - ts->info->stack_limit;
5876 arg1 = ts->info->stack_limit;
5879 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5880 break;
5881 #ifdef TARGET_NR_mremap
5882 case TARGET_NR_mremap:
5883 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5884 break;
5885 #endif
5886 /* ??? msync/mlock/munlock are broken for softmmu. */
5887 #ifdef TARGET_NR_msync
5888 case TARGET_NR_msync:
5889 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5890 break;
5891 #endif
5892 #ifdef TARGET_NR_mlock
5893 case TARGET_NR_mlock:
5894 ret = get_errno(mlock(g2h(arg1), arg2));
5895 break;
5896 #endif
5897 #ifdef TARGET_NR_munlock
5898 case TARGET_NR_munlock:
5899 ret = get_errno(munlock(g2h(arg1), arg2));
5900 break;
5901 #endif
5902 #ifdef TARGET_NR_mlockall
5903 case TARGET_NR_mlockall:
5904 ret = get_errno(mlockall(arg1));
5905 break;
5906 #endif
5907 #ifdef TARGET_NR_munlockall
5908 case TARGET_NR_munlockall:
5909 ret = get_errno(munlockall());
5910 break;
5911 #endif
5912 case TARGET_NR_truncate:
5913 if (!(p = lock_user_string(arg1)))
5914 goto efault;
5915 ret = get_errno(truncate(p, arg2));
5916 unlock_user(p, arg1, 0);
5917 break;
5918 case TARGET_NR_ftruncate:
5919 ret = get_errno(ftruncate(arg1, arg2));
5920 break;
5921 case TARGET_NR_fchmod:
5922 ret = get_errno(fchmod(arg1, arg2));
5923 break;
5924 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5925 case TARGET_NR_fchmodat:
5926 if (!(p = lock_user_string(arg2)))
5927 goto efault;
5928 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5929 unlock_user(p, arg2, 0);
5930 break;
5931 #endif
5932 case TARGET_NR_getpriority:
5933 /* libc does special remapping of the return value of
5934 * sys_getpriority() so it's just easiest to call
5935 * sys_getpriority() directly rather than through libc. */
5936 ret = get_errno(sys_getpriority(arg1, arg2));
5937 break;
5938 case TARGET_NR_setpriority:
5939 ret = get_errno(setpriority(arg1, arg2, arg3));
5940 break;
5941 #ifdef TARGET_NR_profil
5942 case TARGET_NR_profil:
5943 goto unimplemented;
5944 #endif
5945 case TARGET_NR_statfs:
5946 if (!(p = lock_user_string(arg1)))
5947 goto efault;
5948 ret = get_errno(statfs(path(p), &stfs));
5949 unlock_user(p, arg1, 0);
5950 convert_statfs:
5951 if (!is_error(ret)) {
5952 struct target_statfs *target_stfs;
5954 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5955 goto efault;
5956 __put_user(stfs.f_type, &target_stfs->f_type);
5957 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5958 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5959 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5960 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5961 __put_user(stfs.f_files, &target_stfs->f_files);
5962 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5963 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5964 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5965 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5966 unlock_user_struct(target_stfs, arg2, 1);
5968 break;
5969 case TARGET_NR_fstatfs:
5970 ret = get_errno(fstatfs(arg1, &stfs));
5971 goto convert_statfs;
5972 #ifdef TARGET_NR_statfs64
5973 case TARGET_NR_statfs64:
5974 if (!(p = lock_user_string(arg1)))
5975 goto efault;
5976 ret = get_errno(statfs(path(p), &stfs));
5977 unlock_user(p, arg1, 0);
5978 convert_statfs64:
5979 if (!is_error(ret)) {
5980 struct target_statfs64 *target_stfs;
5982 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5983 goto efault;
5984 __put_user(stfs.f_type, &target_stfs->f_type);
5985 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5986 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5987 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5988 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5989 __put_user(stfs.f_files, &target_stfs->f_files);
5990 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5991 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5992 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5993 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5994 unlock_user_struct(target_stfs, arg3, 1);
5996 break;
5997 case TARGET_NR_fstatfs64:
5998 ret = get_errno(fstatfs(arg1, &stfs));
5999 goto convert_statfs64;
6000 #endif
6001 #ifdef TARGET_NR_ioperm
6002 case TARGET_NR_ioperm:
6003 goto unimplemented;
6004 #endif
6005 #ifdef TARGET_NR_socketcall
6006 case TARGET_NR_socketcall:
6007 ret = do_socketcall(arg1, arg2);
6008 break;
6009 #endif
6010 #ifdef TARGET_NR_accept
6011 case TARGET_NR_accept:
6012 ret = do_accept(arg1, arg2, arg3);
6013 break;
6014 #endif
6015 #ifdef TARGET_NR_bind
6016 case TARGET_NR_bind:
6017 ret = do_bind(arg1, arg2, arg3);
6018 break;
6019 #endif
6020 #ifdef TARGET_NR_connect
6021 case TARGET_NR_connect:
6022 ret = do_connect(arg1, arg2, arg3);
6023 break;
6024 #endif
6025 #ifdef TARGET_NR_getpeername
6026 case TARGET_NR_getpeername:
6027 ret = do_getpeername(arg1, arg2, arg3);
6028 break;
6029 #endif
6030 #ifdef TARGET_NR_getsockname
6031 case TARGET_NR_getsockname:
6032 ret = do_getsockname(arg1, arg2, arg3);
6033 break;
6034 #endif
6035 #ifdef TARGET_NR_getsockopt
6036 case TARGET_NR_getsockopt:
6037 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6038 break;
6039 #endif
6040 #ifdef TARGET_NR_listen
6041 case TARGET_NR_listen:
6042 ret = get_errno(listen(arg1, arg2));
6043 break;
6044 #endif
6045 #ifdef TARGET_NR_recv
6046 case TARGET_NR_recv:
6047 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6048 break;
6049 #endif
6050 #ifdef TARGET_NR_recvfrom
6051 case TARGET_NR_recvfrom:
6052 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6053 break;
6054 #endif
6055 #ifdef TARGET_NR_recvmsg
6056 case TARGET_NR_recvmsg:
6057 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6058 break;
6059 #endif
6060 #ifdef TARGET_NR_send
6061 case TARGET_NR_send:
6062 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6063 break;
6064 #endif
6065 #ifdef TARGET_NR_sendmsg
6066 case TARGET_NR_sendmsg:
6067 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6068 break;
6069 #endif
6070 #ifdef TARGET_NR_sendto
6071 case TARGET_NR_sendto:
6072 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6073 break;
6074 #endif
6075 #ifdef TARGET_NR_shutdown
6076 case TARGET_NR_shutdown:
6077 ret = get_errno(shutdown(arg1, arg2));
6078 break;
6079 #endif
6080 #ifdef TARGET_NR_socket
6081 case TARGET_NR_socket:
6082 ret = do_socket(arg1, arg2, arg3);
6083 break;
6084 #endif
6085 #ifdef TARGET_NR_socketpair
6086 case TARGET_NR_socketpair:
6087 ret = do_socketpair(arg1, arg2, arg3, arg4);
6088 break;
6089 #endif
6090 #ifdef TARGET_NR_setsockopt
6091 case TARGET_NR_setsockopt:
6092 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6093 break;
6094 #endif
6096 case TARGET_NR_syslog:
6097 if (!(p = lock_user_string(arg2)))
6098 goto efault;
6099 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6100 unlock_user(p, arg2, 0);
6101 break;
6103 case TARGET_NR_setitimer:
6105 struct itimerval value, ovalue, *pvalue;
6107 if (arg2) {
6108 pvalue = &value;
6109 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6110 || copy_from_user_timeval(&pvalue->it_value,
6111 arg2 + sizeof(struct target_timeval)))
6112 goto efault;
6113 } else {
6114 pvalue = NULL;
6116 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6117 if (!is_error(ret) && arg3) {
6118 if (copy_to_user_timeval(arg3,
6119 &ovalue.it_interval)
6120 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6121 &ovalue.it_value))
6122 goto efault;
6125 break;
6126 case TARGET_NR_getitimer:
6128 struct itimerval value;
6130 ret = get_errno(getitimer(arg1, &value));
6131 if (!is_error(ret) && arg2) {
6132 if (copy_to_user_timeval(arg2,
6133 &value.it_interval)
6134 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6135 &value.it_value))
6136 goto efault;
6139 break;
6140 case TARGET_NR_stat:
6141 if (!(p = lock_user_string(arg1)))
6142 goto efault;
6143 ret = get_errno(stat(path(p), &st));
6144 unlock_user(p, arg1, 0);
6145 goto do_stat;
6146 case TARGET_NR_lstat:
6147 if (!(p = lock_user_string(arg1)))
6148 goto efault;
6149 ret = get_errno(lstat(path(p), &st));
6150 unlock_user(p, arg1, 0);
6151 goto do_stat;
6152 case TARGET_NR_fstat:
6154 ret = get_errno(fstat(arg1, &st));
6155 do_stat:
6156 if (!is_error(ret)) {
6157 struct target_stat *target_st;
6159 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6160 goto efault;
6161 memset(target_st, 0, sizeof(*target_st));
6162 __put_user(st.st_dev, &target_st->st_dev);
6163 __put_user(st.st_ino, &target_st->st_ino);
6164 __put_user(st.st_mode, &target_st->st_mode);
6165 __put_user(st.st_uid, &target_st->st_uid);
6166 __put_user(st.st_gid, &target_st->st_gid);
6167 __put_user(st.st_nlink, &target_st->st_nlink);
6168 __put_user(st.st_rdev, &target_st->st_rdev);
6169 __put_user(st.st_size, &target_st->st_size);
6170 __put_user(st.st_blksize, &target_st->st_blksize);
6171 __put_user(st.st_blocks, &target_st->st_blocks);
6172 __put_user(st.st_atime, &target_st->target_st_atime);
6173 __put_user(st.st_mtime, &target_st->target_st_mtime);
6174 __put_user(st.st_ctime, &target_st->target_st_ctime);
6175 unlock_user_struct(target_st, arg2, 1);
6178 break;
6179 #ifdef TARGET_NR_olduname
6180 case TARGET_NR_olduname:
6181 goto unimplemented;
6182 #endif
6183 #ifdef TARGET_NR_iopl
6184 case TARGET_NR_iopl:
6185 goto unimplemented;
6186 #endif
6187 case TARGET_NR_vhangup:
6188 ret = get_errno(vhangup());
6189 break;
6190 #ifdef TARGET_NR_idle
6191 case TARGET_NR_idle:
6192 goto unimplemented;
6193 #endif
6194 #ifdef TARGET_NR_syscall
6195 case TARGET_NR_syscall:
6196 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6197 arg6, arg7, arg8, 0);
6198 break;
6199 #endif
6200 case TARGET_NR_wait4:
6202 int status;
6203 abi_long status_ptr = arg2;
6204 struct rusage rusage, *rusage_ptr;
6205 abi_ulong target_rusage = arg4;
6206 if (target_rusage)
6207 rusage_ptr = &rusage;
6208 else
6209 rusage_ptr = NULL;
6210 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6211 if (!is_error(ret)) {
6212 if (status_ptr) {
6213 status = host_to_target_waitstatus(status);
6214 if (put_user_s32(status, status_ptr))
6215 goto efault;
6217 if (target_rusage)
6218 host_to_target_rusage(target_rusage, &rusage);
6221 break;
6222 #ifdef TARGET_NR_swapoff
6223 case TARGET_NR_swapoff:
6224 if (!(p = lock_user_string(arg1)))
6225 goto efault;
6226 ret = get_errno(swapoff(p));
6227 unlock_user(p, arg1, 0);
6228 break;
6229 #endif
6230 case TARGET_NR_sysinfo:
6232 struct target_sysinfo *target_value;
6233 struct sysinfo value;
6234 ret = get_errno(sysinfo(&value));
6235 if (!is_error(ret) && arg1)
6237 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6238 goto efault;
6239 __put_user(value.uptime, &target_value->uptime);
6240 __put_user(value.loads[0], &target_value->loads[0]);
6241 __put_user(value.loads[1], &target_value->loads[1]);
6242 __put_user(value.loads[2], &target_value->loads[2]);
6243 __put_user(value.totalram, &target_value->totalram);
6244 __put_user(value.freeram, &target_value->freeram);
6245 __put_user(value.sharedram, &target_value->sharedram);
6246 __put_user(value.bufferram, &target_value->bufferram);
6247 __put_user(value.totalswap, &target_value->totalswap);
6248 __put_user(value.freeswap, &target_value->freeswap);
6249 __put_user(value.procs, &target_value->procs);
6250 __put_user(value.totalhigh, &target_value->totalhigh);
6251 __put_user(value.freehigh, &target_value->freehigh);
6252 __put_user(value.mem_unit, &target_value->mem_unit);
6253 unlock_user_struct(target_value, arg1, 1);
6256 break;
6257 #ifdef TARGET_NR_ipc
6258 case TARGET_NR_ipc:
6259 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6260 break;
6261 #endif
6262 #ifdef TARGET_NR_semget
6263 case TARGET_NR_semget:
6264 ret = get_errno(semget(arg1, arg2, arg3));
6265 break;
6266 #endif
6267 #ifdef TARGET_NR_semop
6268 case TARGET_NR_semop:
6269 ret = get_errno(do_semop(arg1, arg2, arg3));
6270 break;
6271 #endif
6272 #ifdef TARGET_NR_semctl
6273 case TARGET_NR_semctl:
6274 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6275 break;
6276 #endif
6277 #ifdef TARGET_NR_msgctl
6278 case TARGET_NR_msgctl:
6279 ret = do_msgctl(arg1, arg2, arg3);
6280 break;
6281 #endif
6282 #ifdef TARGET_NR_msgget
6283 case TARGET_NR_msgget:
6284 ret = get_errno(msgget(arg1, arg2));
6285 break;
6286 #endif
6287 #ifdef TARGET_NR_msgrcv
6288 case TARGET_NR_msgrcv:
6289 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6290 break;
6291 #endif
6292 #ifdef TARGET_NR_msgsnd
6293 case TARGET_NR_msgsnd:
6294 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6295 break;
6296 #endif
6297 #ifdef TARGET_NR_shmget
6298 case TARGET_NR_shmget:
6299 ret = get_errno(shmget(arg1, arg2, arg3));
6300 break;
6301 #endif
6302 #ifdef TARGET_NR_shmctl
6303 case TARGET_NR_shmctl:
6304 ret = do_shmctl(arg1, arg2, arg3);
6305 break;
6306 #endif
6307 #ifdef TARGET_NR_shmat
6308 case TARGET_NR_shmat:
6309 ret = do_shmat(arg1, arg2, arg3);
6310 break;
6311 #endif
6312 #ifdef TARGET_NR_shmdt
6313 case TARGET_NR_shmdt:
6314 ret = do_shmdt(arg1);
6315 break;
6316 #endif
6317 case TARGET_NR_fsync:
6318 ret = get_errno(fsync(arg1));
6319 break;
6320 case TARGET_NR_clone:
6321 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6322 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6323 #elif defined(TARGET_CRIS)
6324 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6325 #elif defined(TARGET_S390X)
6326 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6327 #else
6328 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6329 #endif
6330 break;
6331 #ifdef __NR_exit_group
6332 /* new thread calls */
6333 case TARGET_NR_exit_group:
6334 #ifdef TARGET_GPROF
6335 _mcleanup();
6336 #endif
6337 gdb_exit(cpu_env, arg1);
6338 ret = get_errno(exit_group(arg1));
6339 break;
6340 #endif
6341 case TARGET_NR_setdomainname:
6342 if (!(p = lock_user_string(arg1)))
6343 goto efault;
6344 ret = get_errno(setdomainname(p, arg2));
6345 unlock_user(p, arg1, 0);
6346 break;
6347 case TARGET_NR_uname:
6348 /* no need to transcode because we use the linux syscall */
6350 struct new_utsname * buf;
6352 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6353 goto efault;
6354 ret = get_errno(sys_uname(buf));
6355 if (!is_error(ret)) {
6356 /* Overrite the native machine name with whatever is being
6357 emulated. */
6358 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6359 /* Allow the user to override the reported release. */
6360 if (qemu_uname_release && *qemu_uname_release)
6361 strcpy (buf->release, qemu_uname_release);
6363 unlock_user_struct(buf, arg1, 1);
6365 break;
6366 #ifdef TARGET_I386
6367 case TARGET_NR_modify_ldt:
6368 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6369 break;
6370 #if !defined(TARGET_X86_64)
6371 case TARGET_NR_vm86old:
6372 goto unimplemented;
6373 case TARGET_NR_vm86:
6374 ret = do_vm86(cpu_env, arg1, arg2);
6375 break;
6376 #endif
6377 #endif
6378 case TARGET_NR_adjtimex:
6379 goto unimplemented;
6380 #ifdef TARGET_NR_create_module
6381 case TARGET_NR_create_module:
6382 #endif
6383 case TARGET_NR_init_module:
6384 case TARGET_NR_delete_module:
6385 #ifdef TARGET_NR_get_kernel_syms
6386 case TARGET_NR_get_kernel_syms:
6387 #endif
6388 goto unimplemented;
6389 case TARGET_NR_quotactl:
6390 goto unimplemented;
6391 case TARGET_NR_getpgid:
6392 ret = get_errno(getpgid(arg1));
6393 break;
6394 case TARGET_NR_fchdir:
6395 ret = get_errno(fchdir(arg1));
6396 break;
6397 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6398 case TARGET_NR_bdflush:
6399 goto unimplemented;
6400 #endif
6401 #ifdef TARGET_NR_sysfs
6402 case TARGET_NR_sysfs:
6403 goto unimplemented;
6404 #endif
6405 case TARGET_NR_personality:
6406 ret = get_errno(personality(arg1));
6407 break;
6408 #ifdef TARGET_NR_afs_syscall
6409 case TARGET_NR_afs_syscall:
6410 goto unimplemented;
6411 #endif
6412 #ifdef TARGET_NR__llseek /* Not on alpha */
6413 case TARGET_NR__llseek:
6415 int64_t res;
6416 #if !defined(__NR_llseek)
6417 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6418 if (res == -1) {
6419 ret = get_errno(res);
6420 } else {
6421 ret = 0;
6423 #else
6424 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6425 #endif
6426 if ((ret == 0) && put_user_s64(res, arg4)) {
6427 goto efault;
6430 break;
6431 #endif
6432 case TARGET_NR_getdents:
6433 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6435 struct target_dirent *target_dirp;
6436 struct linux_dirent *dirp;
6437 abi_long count = arg3;
6439 dirp = malloc(count);
6440 if (!dirp) {
6441 ret = -TARGET_ENOMEM;
6442 goto fail;
6445 ret = get_errno(sys_getdents(arg1, dirp, count));
6446 if (!is_error(ret)) {
6447 struct linux_dirent *de;
6448 struct target_dirent *tde;
6449 int len = ret;
6450 int reclen, treclen;
6451 int count1, tnamelen;
6453 count1 = 0;
6454 de = dirp;
6455 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6456 goto efault;
6457 tde = target_dirp;
6458 while (len > 0) {
6459 reclen = de->d_reclen;
6460 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6461 tde->d_reclen = tswap16(treclen);
6462 tde->d_ino = tswapl(de->d_ino);
6463 tde->d_off = tswapl(de->d_off);
6464 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6465 if (tnamelen > 256)
6466 tnamelen = 256;
6467 /* XXX: may not be correct */
6468 pstrcpy(tde->d_name, tnamelen, de->d_name);
6469 de = (struct linux_dirent *)((char *)de + reclen);
6470 len -= reclen;
6471 tde = (struct target_dirent *)((char *)tde + treclen);
6472 count1 += treclen;
6474 ret = count1;
6475 unlock_user(target_dirp, arg2, ret);
6477 free(dirp);
6479 #else
6481 struct linux_dirent *dirp;
6482 abi_long count = arg3;
6484 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6485 goto efault;
6486 ret = get_errno(sys_getdents(arg1, dirp, count));
6487 if (!is_error(ret)) {
6488 struct linux_dirent *de;
6489 int len = ret;
6490 int reclen;
6491 de = dirp;
6492 while (len > 0) {
6493 reclen = de->d_reclen;
6494 if (reclen > len)
6495 break;
6496 de->d_reclen = tswap16(reclen);
6497 tswapls(&de->d_ino);
6498 tswapls(&de->d_off);
6499 de = (struct linux_dirent *)((char *)de + reclen);
6500 len -= reclen;
6503 unlock_user(dirp, arg2, ret);
6505 #endif
6506 break;
6507 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6508 case TARGET_NR_getdents64:
6510 struct linux_dirent64 *dirp;
6511 abi_long count = arg3;
6512 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6513 goto efault;
6514 ret = get_errno(sys_getdents64(arg1, dirp, count));
6515 if (!is_error(ret)) {
6516 struct linux_dirent64 *de;
6517 int len = ret;
6518 int reclen;
6519 de = dirp;
6520 while (len > 0) {
6521 reclen = de->d_reclen;
6522 if (reclen > len)
6523 break;
6524 de->d_reclen = tswap16(reclen);
6525 tswap64s((uint64_t *)&de->d_ino);
6526 tswap64s((uint64_t *)&de->d_off);
6527 de = (struct linux_dirent64 *)((char *)de + reclen);
6528 len -= reclen;
6531 unlock_user(dirp, arg2, ret);
6533 break;
6534 #endif /* TARGET_NR_getdents64 */
6535 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6536 #ifdef TARGET_S390X
6537 case TARGET_NR_select:
6538 #else
6539 case TARGET_NR__newselect:
6540 #endif
6541 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6542 break;
6543 #endif
6544 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6545 # ifdef TARGET_NR_poll
6546 case TARGET_NR_poll:
6547 # endif
6548 # ifdef TARGET_NR_ppoll
6549 case TARGET_NR_ppoll:
6550 # endif
6552 struct target_pollfd *target_pfd;
6553 unsigned int nfds = arg2;
6554 int timeout = arg3;
6555 struct pollfd *pfd;
6556 unsigned int i;
6558 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6559 if (!target_pfd)
6560 goto efault;
6562 pfd = alloca(sizeof(struct pollfd) * nfds);
6563 for(i = 0; i < nfds; i++) {
6564 pfd[i].fd = tswap32(target_pfd[i].fd);
6565 pfd[i].events = tswap16(target_pfd[i].events);
6568 # ifdef TARGET_NR_ppoll
6569 if (num == TARGET_NR_ppoll) {
6570 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6571 target_sigset_t *target_set;
6572 sigset_t _set, *set = &_set;
6574 if (arg3) {
6575 if (target_to_host_timespec(timeout_ts, arg3)) {
6576 unlock_user(target_pfd, arg1, 0);
6577 goto efault;
6579 } else {
6580 timeout_ts = NULL;
6583 if (arg4) {
6584 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6585 if (!target_set) {
6586 unlock_user(target_pfd, arg1, 0);
6587 goto efault;
6589 target_to_host_sigset(set, target_set);
6590 } else {
6591 set = NULL;
6594 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6596 if (!is_error(ret) && arg3) {
6597 host_to_target_timespec(arg3, timeout_ts);
6599 if (arg4) {
6600 unlock_user(target_set, arg4, 0);
6602 } else
6603 # endif
6604 ret = get_errno(poll(pfd, nfds, timeout));
6606 if (!is_error(ret)) {
6607 for(i = 0; i < nfds; i++) {
6608 target_pfd[i].revents = tswap16(pfd[i].revents);
6611 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6613 break;
6614 #endif
6615 case TARGET_NR_flock:
6616 /* NOTE: the flock constant seems to be the same for every
6617 Linux platform */
6618 ret = get_errno(flock(arg1, arg2));
6619 break;
6620 case TARGET_NR_readv:
6622 int count = arg3;
6623 struct iovec *vec;
6625 vec = alloca(count * sizeof(struct iovec));
6626 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6627 goto efault;
6628 ret = get_errno(readv(arg1, vec, count));
6629 unlock_iovec(vec, arg2, count, 1);
6631 break;
6632 case TARGET_NR_writev:
6634 int count = arg3;
6635 struct iovec *vec;
6637 vec = alloca(count * sizeof(struct iovec));
6638 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6639 goto efault;
6640 ret = get_errno(writev(arg1, vec, count));
6641 unlock_iovec(vec, arg2, count, 0);
6643 break;
6644 case TARGET_NR_getsid:
6645 ret = get_errno(getsid(arg1));
6646 break;
6647 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6648 case TARGET_NR_fdatasync:
6649 ret = get_errno(fdatasync(arg1));
6650 break;
6651 #endif
6652 case TARGET_NR__sysctl:
6653 /* We don't implement this, but ENOTDIR is always a safe
6654 return value. */
6655 ret = -TARGET_ENOTDIR;
6656 break;
6657 case TARGET_NR_sched_getaffinity:
6659 unsigned int mask_size;
6660 unsigned long *mask;
6663 * sched_getaffinity needs multiples of ulong, so need to take
6664 * care of mismatches between target ulong and host ulong sizes.
6666 if (arg2 & (sizeof(abi_ulong) - 1)) {
6667 ret = -TARGET_EINVAL;
6668 break;
6670 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6672 mask = alloca(mask_size);
6673 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6675 if (!is_error(ret)) {
6676 if (copy_to_user(arg3, mask, ret)) {
6677 goto efault;
6681 break;
6682 case TARGET_NR_sched_setaffinity:
6684 unsigned int mask_size;
6685 unsigned long *mask;
6688 * sched_setaffinity needs multiples of ulong, so need to take
6689 * care of mismatches between target ulong and host ulong sizes.
6691 if (arg2 & (sizeof(abi_ulong) - 1)) {
6692 ret = -TARGET_EINVAL;
6693 break;
6695 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6697 mask = alloca(mask_size);
6698 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6699 goto efault;
6701 memcpy(mask, p, arg2);
6702 unlock_user_struct(p, arg2, 0);
6704 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6706 break;
6707 case TARGET_NR_sched_setparam:
6709 struct sched_param *target_schp;
6710 struct sched_param schp;
6712 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6713 goto efault;
6714 schp.sched_priority = tswap32(target_schp->sched_priority);
6715 unlock_user_struct(target_schp, arg2, 0);
6716 ret = get_errno(sched_setparam(arg1, &schp));
6718 break;
6719 case TARGET_NR_sched_getparam:
6721 struct sched_param *target_schp;
6722 struct sched_param schp;
6723 ret = get_errno(sched_getparam(arg1, &schp));
6724 if (!is_error(ret)) {
6725 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6726 goto efault;
6727 target_schp->sched_priority = tswap32(schp.sched_priority);
6728 unlock_user_struct(target_schp, arg2, 1);
6731 break;
6732 case TARGET_NR_sched_setscheduler:
6734 struct sched_param *target_schp;
6735 struct sched_param schp;
6736 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6737 goto efault;
6738 schp.sched_priority = tswap32(target_schp->sched_priority);
6739 unlock_user_struct(target_schp, arg3, 0);
6740 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6742 break;
6743 case TARGET_NR_sched_getscheduler:
6744 ret = get_errno(sched_getscheduler(arg1));
6745 break;
6746 case TARGET_NR_sched_yield:
6747 ret = get_errno(sched_yield());
6748 break;
6749 case TARGET_NR_sched_get_priority_max:
6750 ret = get_errno(sched_get_priority_max(arg1));
6751 break;
6752 case TARGET_NR_sched_get_priority_min:
6753 ret = get_errno(sched_get_priority_min(arg1));
6754 break;
6755 case TARGET_NR_sched_rr_get_interval:
6757 struct timespec ts;
6758 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6759 if (!is_error(ret)) {
6760 host_to_target_timespec(arg2, &ts);
6763 break;
6764 case TARGET_NR_nanosleep:
6766 struct timespec req, rem;
6767 target_to_host_timespec(&req, arg1);
6768 ret = get_errno(nanosleep(&req, &rem));
6769 if (is_error(ret) && arg2) {
6770 host_to_target_timespec(arg2, &rem);
6773 break;
6774 #ifdef TARGET_NR_query_module
6775 case TARGET_NR_query_module:
6776 goto unimplemented;
6777 #endif
6778 #ifdef TARGET_NR_nfsservctl
6779 case TARGET_NR_nfsservctl:
6780 goto unimplemented;
6781 #endif
6782 case TARGET_NR_prctl:
6783 switch (arg1)
6785 case PR_GET_PDEATHSIG:
6787 int deathsig;
6788 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6789 if (!is_error(ret) && arg2
6790 && put_user_ual(deathsig, arg2))
6791 goto efault;
6793 break;
6794 default:
6795 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6796 break;
6798 break;
6799 #ifdef TARGET_NR_arch_prctl
6800 case TARGET_NR_arch_prctl:
6801 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6802 ret = do_arch_prctl(cpu_env, arg1, arg2);
6803 break;
6804 #else
6805 goto unimplemented;
6806 #endif
6807 #endif
6808 #ifdef TARGET_NR_pread
6809 case TARGET_NR_pread:
6810 #ifdef TARGET_ARM
6811 if (((CPUARMState *)cpu_env)->eabi)
6812 arg4 = arg5;
6813 #endif
6814 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6815 goto efault;
6816 ret = get_errno(pread(arg1, p, arg3, arg4));
6817 unlock_user(p, arg2, ret);
6818 break;
6819 case TARGET_NR_pwrite:
6820 #ifdef TARGET_ARM
6821 if (((CPUARMState *)cpu_env)->eabi)
6822 arg4 = arg5;
6823 #endif
6824 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6825 goto efault;
6826 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6827 unlock_user(p, arg2, 0);
6828 break;
6829 #endif
6830 #ifdef TARGET_NR_pread64
6831 case TARGET_NR_pread64:
6832 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6833 goto efault;
6834 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6835 unlock_user(p, arg2, ret);
6836 break;
6837 case TARGET_NR_pwrite64:
6838 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6839 goto efault;
6840 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6841 unlock_user(p, arg2, 0);
6842 break;
6843 #endif
6844 case TARGET_NR_getcwd:
6845 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6846 goto efault;
6847 ret = get_errno(sys_getcwd1(p, arg2));
6848 unlock_user(p, arg1, ret);
6849 break;
6850 case TARGET_NR_capget:
6851 goto unimplemented;
6852 case TARGET_NR_capset:
6853 goto unimplemented;
6854 case TARGET_NR_sigaltstack:
6855 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6856 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6857 defined(TARGET_M68K) || defined(TARGET_S390X)
6858 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6859 break;
6860 #else
6861 goto unimplemented;
6862 #endif
6863 case TARGET_NR_sendfile:
6864 goto unimplemented;
6865 #ifdef TARGET_NR_getpmsg
6866 case TARGET_NR_getpmsg:
6867 goto unimplemented;
6868 #endif
6869 #ifdef TARGET_NR_putpmsg
6870 case TARGET_NR_putpmsg:
6871 goto unimplemented;
6872 #endif
6873 #ifdef TARGET_NR_vfork
6874 case TARGET_NR_vfork:
6875 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6876 0, 0, 0, 0));
6877 break;
6878 #endif
6879 #ifdef TARGET_NR_ugetrlimit
6880 case TARGET_NR_ugetrlimit:
6882 struct rlimit rlim;
6883 ret = get_errno(getrlimit(arg1, &rlim));
6884 if (!is_error(ret)) {
6885 struct target_rlimit *target_rlim;
6886 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6887 goto efault;
6888 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6889 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6890 unlock_user_struct(target_rlim, arg2, 1);
6892 break;
6894 #endif
6895 #ifdef TARGET_NR_truncate64
6896 case TARGET_NR_truncate64:
6897 if (!(p = lock_user_string(arg1)))
6898 goto efault;
6899 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6900 unlock_user(p, arg1, 0);
6901 break;
6902 #endif
6903 #ifdef TARGET_NR_ftruncate64
6904 case TARGET_NR_ftruncate64:
6905 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6906 break;
6907 #endif
6908 #ifdef TARGET_NR_stat64
6909 case TARGET_NR_stat64:
6910 if (!(p = lock_user_string(arg1)))
6911 goto efault;
6912 ret = get_errno(stat(path(p), &st));
6913 unlock_user(p, arg1, 0);
6914 if (!is_error(ret))
6915 ret = host_to_target_stat64(cpu_env, arg2, &st);
6916 break;
6917 #endif
6918 #ifdef TARGET_NR_lstat64
6919 case TARGET_NR_lstat64:
6920 if (!(p = lock_user_string(arg1)))
6921 goto efault;
6922 ret = get_errno(lstat(path(p), &st));
6923 unlock_user(p, arg1, 0);
6924 if (!is_error(ret))
6925 ret = host_to_target_stat64(cpu_env, arg2, &st);
6926 break;
6927 #endif
6928 #ifdef TARGET_NR_fstat64
6929 case TARGET_NR_fstat64:
6930 ret = get_errno(fstat(arg1, &st));
6931 if (!is_error(ret))
6932 ret = host_to_target_stat64(cpu_env, arg2, &st);
6933 break;
6934 #endif
6935 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6936 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6937 #ifdef TARGET_NR_fstatat64
6938 case TARGET_NR_fstatat64:
6939 #endif
6940 #ifdef TARGET_NR_newfstatat
6941 case TARGET_NR_newfstatat:
6942 #endif
6943 if (!(p = lock_user_string(arg2)))
6944 goto efault;
6945 #ifdef __NR_fstatat64
6946 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6947 #else
6948 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6949 #endif
6950 if (!is_error(ret))
6951 ret = host_to_target_stat64(cpu_env, arg3, &st);
6952 break;
6953 #endif
6954 case TARGET_NR_lchown:
6955 if (!(p = lock_user_string(arg1)))
6956 goto efault;
6957 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6958 unlock_user(p, arg1, 0);
6959 break;
6960 #ifdef TARGET_NR_getuid
6961 case TARGET_NR_getuid:
6962 ret = get_errno(high2lowuid(getuid()));
6963 break;
6964 #endif
6965 #ifdef TARGET_NR_getgid
6966 case TARGET_NR_getgid:
6967 ret = get_errno(high2lowgid(getgid()));
6968 break;
6969 #endif
6970 #ifdef TARGET_NR_geteuid
6971 case TARGET_NR_geteuid:
6972 ret = get_errno(high2lowuid(geteuid()));
6973 break;
6974 #endif
6975 #ifdef TARGET_NR_getegid
6976 case TARGET_NR_getegid:
6977 ret = get_errno(high2lowgid(getegid()));
6978 break;
6979 #endif
6980 case TARGET_NR_setreuid:
6981 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6982 break;
6983 case TARGET_NR_setregid:
6984 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6985 break;
6986 case TARGET_NR_getgroups:
6988 int gidsetsize = arg1;
6989 target_id *target_grouplist;
6990 gid_t *grouplist;
6991 int i;
6993 grouplist = alloca(gidsetsize * sizeof(gid_t));
6994 ret = get_errno(getgroups(gidsetsize, grouplist));
6995 if (gidsetsize == 0)
6996 break;
6997 if (!is_error(ret)) {
6998 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6999 if (!target_grouplist)
7000 goto efault;
7001 for(i = 0;i < ret; i++)
7002 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7003 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7006 break;
7007 case TARGET_NR_setgroups:
7009 int gidsetsize = arg1;
7010 target_id *target_grouplist;
7011 gid_t *grouplist;
7012 int i;
7014 grouplist = alloca(gidsetsize * sizeof(gid_t));
7015 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7016 if (!target_grouplist) {
7017 ret = -TARGET_EFAULT;
7018 goto fail;
7020 for(i = 0;i < gidsetsize; i++)
7021 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7022 unlock_user(target_grouplist, arg2, 0);
7023 ret = get_errno(setgroups(gidsetsize, grouplist));
7025 break;
7026 case TARGET_NR_fchown:
7027 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7028 break;
7029 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7030 case TARGET_NR_fchownat:
7031 if (!(p = lock_user_string(arg2)))
7032 goto efault;
7033 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7034 unlock_user(p, arg2, 0);
7035 break;
7036 #endif
7037 #ifdef TARGET_NR_setresuid
7038 case TARGET_NR_setresuid:
7039 ret = get_errno(setresuid(low2highuid(arg1),
7040 low2highuid(arg2),
7041 low2highuid(arg3)));
7042 break;
7043 #endif
7044 #ifdef TARGET_NR_getresuid
7045 case TARGET_NR_getresuid:
7047 uid_t ruid, euid, suid;
7048 ret = get_errno(getresuid(&ruid, &euid, &suid));
7049 if (!is_error(ret)) {
7050 if (put_user_u16(high2lowuid(ruid), arg1)
7051 || put_user_u16(high2lowuid(euid), arg2)
7052 || put_user_u16(high2lowuid(suid), arg3))
7053 goto efault;
7056 break;
7057 #endif
7058 #ifdef TARGET_NR_getresgid
7059 case TARGET_NR_setresgid:
7060 ret = get_errno(setresgid(low2highgid(arg1),
7061 low2highgid(arg2),
7062 low2highgid(arg3)));
7063 break;
7064 #endif
7065 #ifdef TARGET_NR_getresgid
7066 case TARGET_NR_getresgid:
7068 gid_t rgid, egid, sgid;
7069 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7070 if (!is_error(ret)) {
7071 if (put_user_u16(high2lowgid(rgid), arg1)
7072 || put_user_u16(high2lowgid(egid), arg2)
7073 || put_user_u16(high2lowgid(sgid), arg3))
7074 goto efault;
7077 break;
7078 #endif
7079 case TARGET_NR_chown:
7080 if (!(p = lock_user_string(arg1)))
7081 goto efault;
7082 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7083 unlock_user(p, arg1, 0);
7084 break;
7085 case TARGET_NR_setuid:
7086 ret = get_errno(setuid(low2highuid(arg1)));
7087 break;
7088 case TARGET_NR_setgid:
7089 ret = get_errno(setgid(low2highgid(arg1)));
7090 break;
7091 case TARGET_NR_setfsuid:
7092 ret = get_errno(setfsuid(arg1));
7093 break;
7094 case TARGET_NR_setfsgid:
7095 ret = get_errno(setfsgid(arg1));
7096 break;
7098 #ifdef TARGET_NR_lchown32
7099 case TARGET_NR_lchown32:
7100 if (!(p = lock_user_string(arg1)))
7101 goto efault;
7102 ret = get_errno(lchown(p, arg2, arg3));
7103 unlock_user(p, arg1, 0);
7104 break;
7105 #endif
7106 #ifdef TARGET_NR_getuid32
7107 case TARGET_NR_getuid32:
7108 ret = get_errno(getuid());
7109 break;
7110 #endif
7112 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7113 /* Alpha specific */
7114 case TARGET_NR_getxuid:
7116 uid_t euid;
7117 euid=geteuid();
7118 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7120 ret = get_errno(getuid());
7121 break;
7122 #endif
7123 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7124 /* Alpha specific */
7125 case TARGET_NR_getxgid:
7127 uid_t egid;
7128 egid=getegid();
7129 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7131 ret = get_errno(getgid());
7132 break;
7133 #endif
7134 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7135 /* Alpha specific */
7136 case TARGET_NR_osf_getsysinfo:
7137 ret = -TARGET_EOPNOTSUPP;
7138 switch (arg1) {
7139 case TARGET_GSI_IEEE_FP_CONTROL:
7141 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7143 /* Copied from linux ieee_fpcr_to_swcr. */
7144 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7145 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7146 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7147 | SWCR_TRAP_ENABLE_DZE
7148 | SWCR_TRAP_ENABLE_OVF);
7149 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7150 | SWCR_TRAP_ENABLE_INE);
7151 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7152 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7154 if (put_user_u64 (swcr, arg2))
7155 goto efault;
7156 ret = 0;
7158 break;
7160 /* case GSI_IEEE_STATE_AT_SIGNAL:
7161 -- Not implemented in linux kernel.
7162 case GSI_UACPROC:
7163 -- Retrieves current unaligned access state; not much used.
7164 case GSI_PROC_TYPE:
7165 -- Retrieves implver information; surely not used.
7166 case GSI_GET_HWRPB:
7167 -- Grabs a copy of the HWRPB; surely not used.
7170 break;
7171 #endif
7172 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7173 /* Alpha specific */
7174 case TARGET_NR_osf_setsysinfo:
7175 ret = -TARGET_EOPNOTSUPP;
7176 switch (arg1) {
7177 case TARGET_SSI_IEEE_FP_CONTROL:
7178 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7180 uint64_t swcr, fpcr, orig_fpcr;
7182 if (get_user_u64 (swcr, arg2))
7183 goto efault;
7184 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7185 fpcr = orig_fpcr & FPCR_DYN_MASK;
7187 /* Copied from linux ieee_swcr_to_fpcr. */
7188 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7189 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7190 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7191 | SWCR_TRAP_ENABLE_DZE
7192 | SWCR_TRAP_ENABLE_OVF)) << 48;
7193 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7194 | SWCR_TRAP_ENABLE_INE)) << 57;
7195 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7196 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7198 cpu_alpha_store_fpcr (cpu_env, fpcr);
7199 ret = 0;
7201 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7202 /* Old exceptions are not signaled. */
7203 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7205 /* If any exceptions set by this call, and are unmasked,
7206 send a signal. */
7207 /* ??? FIXME */
7210 break;
7212 /* case SSI_NVPAIRS:
7213 -- Used with SSIN_UACPROC to enable unaligned accesses.
7214 case SSI_IEEE_STATE_AT_SIGNAL:
7215 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7216 -- Not implemented in linux kernel
7219 break;
7220 #endif
7221 #ifdef TARGET_NR_osf_sigprocmask
7222 /* Alpha specific. */
7223 case TARGET_NR_osf_sigprocmask:
7225 abi_ulong mask;
7226 int how;
7227 sigset_t set, oldset;
7229 switch(arg1) {
7230 case TARGET_SIG_BLOCK:
7231 how = SIG_BLOCK;
7232 break;
7233 case TARGET_SIG_UNBLOCK:
7234 how = SIG_UNBLOCK;
7235 break;
7236 case TARGET_SIG_SETMASK:
7237 how = SIG_SETMASK;
7238 break;
7239 default:
7240 ret = -TARGET_EINVAL;
7241 goto fail;
7243 mask = arg2;
7244 target_to_host_old_sigset(&set, &mask);
7245 sigprocmask(how, &set, &oldset);
7246 host_to_target_old_sigset(&mask, &oldset);
7247 ret = mask;
7249 break;
7250 #endif
7252 #ifdef TARGET_NR_getgid32
7253 case TARGET_NR_getgid32:
7254 ret = get_errno(getgid());
7255 break;
7256 #endif
7257 #ifdef TARGET_NR_geteuid32
7258 case TARGET_NR_geteuid32:
7259 ret = get_errno(geteuid());
7260 break;
7261 #endif
7262 #ifdef TARGET_NR_getegid32
7263 case TARGET_NR_getegid32:
7264 ret = get_errno(getegid());
7265 break;
7266 #endif
7267 #ifdef TARGET_NR_setreuid32
7268 case TARGET_NR_setreuid32:
7269 ret = get_errno(setreuid(arg1, arg2));
7270 break;
7271 #endif
7272 #ifdef TARGET_NR_setregid32
7273 case TARGET_NR_setregid32:
7274 ret = get_errno(setregid(arg1, arg2));
7275 break;
7276 #endif
7277 #ifdef TARGET_NR_getgroups32
7278 case TARGET_NR_getgroups32:
7280 int gidsetsize = arg1;
7281 uint32_t *target_grouplist;
7282 gid_t *grouplist;
7283 int i;
7285 grouplist = alloca(gidsetsize * sizeof(gid_t));
7286 ret = get_errno(getgroups(gidsetsize, grouplist));
7287 if (gidsetsize == 0)
7288 break;
7289 if (!is_error(ret)) {
7290 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7291 if (!target_grouplist) {
7292 ret = -TARGET_EFAULT;
7293 goto fail;
7295 for(i = 0;i < ret; i++)
7296 target_grouplist[i] = tswap32(grouplist[i]);
7297 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7300 break;
7301 #endif
7302 #ifdef TARGET_NR_setgroups32
7303 case TARGET_NR_setgroups32:
7305 int gidsetsize = arg1;
7306 uint32_t *target_grouplist;
7307 gid_t *grouplist;
7308 int i;
7310 grouplist = alloca(gidsetsize * sizeof(gid_t));
7311 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7312 if (!target_grouplist) {
7313 ret = -TARGET_EFAULT;
7314 goto fail;
7316 for(i = 0;i < gidsetsize; i++)
7317 grouplist[i] = tswap32(target_grouplist[i]);
7318 unlock_user(target_grouplist, arg2, 0);
7319 ret = get_errno(setgroups(gidsetsize, grouplist));
7321 break;
7322 #endif
7323 #ifdef TARGET_NR_fchown32
7324 case TARGET_NR_fchown32:
7325 ret = get_errno(fchown(arg1, arg2, arg3));
7326 break;
7327 #endif
7328 #ifdef TARGET_NR_setresuid32
7329 case TARGET_NR_setresuid32:
7330 ret = get_errno(setresuid(arg1, arg2, arg3));
7331 break;
7332 #endif
7333 #ifdef TARGET_NR_getresuid32
7334 case TARGET_NR_getresuid32:
7336 uid_t ruid, euid, suid;
7337 ret = get_errno(getresuid(&ruid, &euid, &suid));
7338 if (!is_error(ret)) {
7339 if (put_user_u32(ruid, arg1)
7340 || put_user_u32(euid, arg2)
7341 || put_user_u32(suid, arg3))
7342 goto efault;
7345 break;
7346 #endif
7347 #ifdef TARGET_NR_setresgid32
7348 case TARGET_NR_setresgid32:
7349 ret = get_errno(setresgid(arg1, arg2, arg3));
7350 break;
7351 #endif
7352 #ifdef TARGET_NR_getresgid32
7353 case TARGET_NR_getresgid32:
7355 gid_t rgid, egid, sgid;
7356 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7357 if (!is_error(ret)) {
7358 if (put_user_u32(rgid, arg1)
7359 || put_user_u32(egid, arg2)
7360 || put_user_u32(sgid, arg3))
7361 goto efault;
7364 break;
7365 #endif
7366 #ifdef TARGET_NR_chown32
7367 case TARGET_NR_chown32:
7368 if (!(p = lock_user_string(arg1)))
7369 goto efault;
7370 ret = get_errno(chown(p, arg2, arg3));
7371 unlock_user(p, arg1, 0);
7372 break;
7373 #endif
7374 #ifdef TARGET_NR_setuid32
7375 case TARGET_NR_setuid32:
7376 ret = get_errno(setuid(arg1));
7377 break;
7378 #endif
7379 #ifdef TARGET_NR_setgid32
7380 case TARGET_NR_setgid32:
7381 ret = get_errno(setgid(arg1));
7382 break;
7383 #endif
7384 #ifdef TARGET_NR_setfsuid32
7385 case TARGET_NR_setfsuid32:
7386 ret = get_errno(setfsuid(arg1));
7387 break;
7388 #endif
7389 #ifdef TARGET_NR_setfsgid32
7390 case TARGET_NR_setfsgid32:
7391 ret = get_errno(setfsgid(arg1));
7392 break;
7393 #endif
7395 case TARGET_NR_pivot_root:
7396 goto unimplemented;
7397 #ifdef TARGET_NR_mincore
7398 case TARGET_NR_mincore:
7400 void *a;
7401 ret = -TARGET_EFAULT;
7402 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7403 goto efault;
7404 if (!(p = lock_user_string(arg3)))
7405 goto mincore_fail;
7406 ret = get_errno(mincore(a, arg2, p));
7407 unlock_user(p, arg3, ret);
7408 mincore_fail:
7409 unlock_user(a, arg1, 0);
7411 break;
7412 #endif
7413 #ifdef TARGET_NR_arm_fadvise64_64
7414 case TARGET_NR_arm_fadvise64_64:
7417 * arm_fadvise64_64 looks like fadvise64_64 but
7418 * with different argument order
7420 abi_long temp;
7421 temp = arg3;
7422 arg3 = arg4;
7423 arg4 = temp;
7425 #endif
7426 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7427 #ifdef TARGET_NR_fadvise64_64
7428 case TARGET_NR_fadvise64_64:
7429 #endif
7430 #ifdef TARGET_NR_fadvise64
7431 case TARGET_NR_fadvise64:
7432 #endif
7433 #ifdef TARGET_S390X
7434 switch (arg4) {
7435 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7436 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7437 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7438 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7439 default: break;
7441 #endif
7442 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7443 break;
7444 #endif
7445 #ifdef TARGET_NR_madvise
7446 case TARGET_NR_madvise:
7447 /* A straight passthrough may not be safe because qemu sometimes
7448 turns private flie-backed mappings into anonymous mappings.
7449 This will break MADV_DONTNEED.
7450 This is a hint, so ignoring and returning success is ok. */
7451 ret = get_errno(0);
7452 break;
7453 #endif
7454 #if TARGET_ABI_BITS == 32
7455 case TARGET_NR_fcntl64:
7457 int cmd;
7458 struct flock64 fl;
7459 struct target_flock64 *target_fl;
7460 #ifdef TARGET_ARM
7461 struct target_eabi_flock64 *target_efl;
7462 #endif
7464 cmd = target_to_host_fcntl_cmd(arg2);
7465 if (cmd == -TARGET_EINVAL)
7466 return cmd;
7468 switch(arg2) {
7469 case TARGET_F_GETLK64:
7470 #ifdef TARGET_ARM
7471 if (((CPUARMState *)cpu_env)->eabi) {
7472 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7473 goto efault;
7474 fl.l_type = tswap16(target_efl->l_type);
7475 fl.l_whence = tswap16(target_efl->l_whence);
7476 fl.l_start = tswap64(target_efl->l_start);
7477 fl.l_len = tswap64(target_efl->l_len);
7478 fl.l_pid = tswap32(target_efl->l_pid);
7479 unlock_user_struct(target_efl, arg3, 0);
7480 } else
7481 #endif
7483 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7484 goto efault;
7485 fl.l_type = tswap16(target_fl->l_type);
7486 fl.l_whence = tswap16(target_fl->l_whence);
7487 fl.l_start = tswap64(target_fl->l_start);
7488 fl.l_len = tswap64(target_fl->l_len);
7489 fl.l_pid = tswap32(target_fl->l_pid);
7490 unlock_user_struct(target_fl, arg3, 0);
7492 ret = get_errno(fcntl(arg1, cmd, &fl));
7493 if (ret == 0) {
7494 #ifdef TARGET_ARM
7495 if (((CPUARMState *)cpu_env)->eabi) {
7496 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7497 goto efault;
7498 target_efl->l_type = tswap16(fl.l_type);
7499 target_efl->l_whence = tswap16(fl.l_whence);
7500 target_efl->l_start = tswap64(fl.l_start);
7501 target_efl->l_len = tswap64(fl.l_len);
7502 target_efl->l_pid = tswap32(fl.l_pid);
7503 unlock_user_struct(target_efl, arg3, 1);
7504 } else
7505 #endif
7507 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7508 goto efault;
7509 target_fl->l_type = tswap16(fl.l_type);
7510 target_fl->l_whence = tswap16(fl.l_whence);
7511 target_fl->l_start = tswap64(fl.l_start);
7512 target_fl->l_len = tswap64(fl.l_len);
7513 target_fl->l_pid = tswap32(fl.l_pid);
7514 unlock_user_struct(target_fl, arg3, 1);
7517 break;
7519 case TARGET_F_SETLK64:
7520 case TARGET_F_SETLKW64:
7521 #ifdef TARGET_ARM
7522 if (((CPUARMState *)cpu_env)->eabi) {
7523 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7524 goto efault;
7525 fl.l_type = tswap16(target_efl->l_type);
7526 fl.l_whence = tswap16(target_efl->l_whence);
7527 fl.l_start = tswap64(target_efl->l_start);
7528 fl.l_len = tswap64(target_efl->l_len);
7529 fl.l_pid = tswap32(target_efl->l_pid);
7530 unlock_user_struct(target_efl, arg3, 0);
7531 } else
7532 #endif
7534 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7535 goto efault;
7536 fl.l_type = tswap16(target_fl->l_type);
7537 fl.l_whence = tswap16(target_fl->l_whence);
7538 fl.l_start = tswap64(target_fl->l_start);
7539 fl.l_len = tswap64(target_fl->l_len);
7540 fl.l_pid = tswap32(target_fl->l_pid);
7541 unlock_user_struct(target_fl, arg3, 0);
7543 ret = get_errno(fcntl(arg1, cmd, &fl));
7544 break;
7545 default:
7546 ret = do_fcntl(arg1, arg2, arg3);
7547 break;
7549 break;
7551 #endif
7552 #ifdef TARGET_NR_cacheflush
7553 case TARGET_NR_cacheflush:
7554 /* self-modifying code is handled automatically, so nothing needed */
7555 ret = 0;
7556 break;
7557 #endif
7558 #ifdef TARGET_NR_security
7559 case TARGET_NR_security:
7560 goto unimplemented;
7561 #endif
7562 #ifdef TARGET_NR_getpagesize
7563 case TARGET_NR_getpagesize:
7564 ret = TARGET_PAGE_SIZE;
7565 break;
7566 #endif
7567 case TARGET_NR_gettid:
7568 ret = get_errno(gettid());
7569 break;
7570 #ifdef TARGET_NR_readahead
7571 case TARGET_NR_readahead:
7572 #if TARGET_ABI_BITS == 32
7573 #ifdef TARGET_ARM
7574 if (((CPUARMState *)cpu_env)->eabi)
7576 arg2 = arg3;
7577 arg3 = arg4;
7578 arg4 = arg5;
7580 #endif
7581 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7582 #else
7583 ret = get_errno(readahead(arg1, arg2, arg3));
7584 #endif
7585 break;
7586 #endif
7587 #ifdef TARGET_NR_setxattr
7588 case TARGET_NR_setxattr:
7589 case TARGET_NR_lsetxattr:
7590 case TARGET_NR_fsetxattr:
7591 case TARGET_NR_getxattr:
7592 case TARGET_NR_lgetxattr:
7593 case TARGET_NR_fgetxattr:
7594 case TARGET_NR_listxattr:
7595 case TARGET_NR_llistxattr:
7596 case TARGET_NR_flistxattr:
7597 case TARGET_NR_removexattr:
7598 case TARGET_NR_lremovexattr:
7599 case TARGET_NR_fremovexattr:
7600 ret = -TARGET_EOPNOTSUPP;
7601 break;
7602 #endif
7603 #ifdef TARGET_NR_set_thread_area
7604 case TARGET_NR_set_thread_area:
7605 #if defined(TARGET_MIPS)
7606 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7607 ret = 0;
7608 break;
7609 #elif defined(TARGET_CRIS)
7610 if (arg1 & 0xff)
7611 ret = -TARGET_EINVAL;
7612 else {
7613 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7614 ret = 0;
7616 break;
7617 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7618 ret = do_set_thread_area(cpu_env, arg1);
7619 break;
7620 #else
7621 goto unimplemented_nowarn;
7622 #endif
7623 #endif
7624 #ifdef TARGET_NR_get_thread_area
7625 case TARGET_NR_get_thread_area:
7626 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7627 ret = do_get_thread_area(cpu_env, arg1);
7628 #else
7629 goto unimplemented_nowarn;
7630 #endif
7631 #endif
7632 #ifdef TARGET_NR_getdomainname
7633 case TARGET_NR_getdomainname:
7634 goto unimplemented_nowarn;
7635 #endif
7637 #ifdef TARGET_NR_clock_gettime
7638 case TARGET_NR_clock_gettime:
7640 struct timespec ts;
7641 ret = get_errno(clock_gettime(arg1, &ts));
7642 if (!is_error(ret)) {
7643 host_to_target_timespec(arg2, &ts);
7645 break;
7647 #endif
7648 #ifdef TARGET_NR_clock_getres
7649 case TARGET_NR_clock_getres:
7651 struct timespec ts;
7652 ret = get_errno(clock_getres(arg1, &ts));
7653 if (!is_error(ret)) {
7654 host_to_target_timespec(arg2, &ts);
7656 break;
7658 #endif
7659 #ifdef TARGET_NR_clock_nanosleep
7660 case TARGET_NR_clock_nanosleep:
7662 struct timespec ts;
7663 target_to_host_timespec(&ts, arg3);
7664 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7665 if (arg4)
7666 host_to_target_timespec(arg4, &ts);
7667 break;
7669 #endif
7671 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7672 case TARGET_NR_set_tid_address:
7673 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7674 break;
7675 #endif
7677 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7678 case TARGET_NR_tkill:
7679 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7680 break;
7681 #endif
7683 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7684 case TARGET_NR_tgkill:
7685 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7686 target_to_host_signal(arg3)));
7687 break;
7688 #endif
7690 #ifdef TARGET_NR_set_robust_list
7691 case TARGET_NR_set_robust_list:
7692 goto unimplemented_nowarn;
7693 #endif
7695 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7696 case TARGET_NR_utimensat:
7698 struct timespec *tsp, ts[2];
7699 if (!arg3) {
7700 tsp = NULL;
7701 } else {
7702 target_to_host_timespec(ts, arg3);
7703 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7704 tsp = ts;
7706 if (!arg2)
7707 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7708 else {
7709 if (!(p = lock_user_string(arg2))) {
7710 ret = -TARGET_EFAULT;
7711 goto fail;
7713 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7714 unlock_user(p, arg2, 0);
7717 break;
7718 #endif
7719 #if defined(CONFIG_USE_NPTL)
7720 case TARGET_NR_futex:
7721 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7722 break;
7723 #endif
7724 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7725 case TARGET_NR_inotify_init:
7726 ret = get_errno(sys_inotify_init());
7727 break;
7728 #endif
7729 #ifdef CONFIG_INOTIFY1
7730 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7731 case TARGET_NR_inotify_init1:
7732 ret = get_errno(sys_inotify_init1(arg1));
7733 break;
7734 #endif
7735 #endif
7736 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7737 case TARGET_NR_inotify_add_watch:
7738 p = lock_user_string(arg2);
7739 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7740 unlock_user(p, arg2, 0);
7741 break;
7742 #endif
7743 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7744 case TARGET_NR_inotify_rm_watch:
7745 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7746 break;
7747 #endif
7749 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7750 case TARGET_NR_mq_open:
7752 struct mq_attr posix_mq_attr;
7754 p = lock_user_string(arg1 - 1);
7755 if (arg4 != 0)
7756 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7757 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7758 unlock_user (p, arg1, 0);
7760 break;
7762 case TARGET_NR_mq_unlink:
7763 p = lock_user_string(arg1 - 1);
7764 ret = get_errno(mq_unlink(p));
7765 unlock_user (p, arg1, 0);
7766 break;
7768 case TARGET_NR_mq_timedsend:
7770 struct timespec ts;
7772 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7773 if (arg5 != 0) {
7774 target_to_host_timespec(&ts, arg5);
7775 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7776 host_to_target_timespec(arg5, &ts);
7778 else
7779 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7780 unlock_user (p, arg2, arg3);
7782 break;
7784 case TARGET_NR_mq_timedreceive:
7786 struct timespec ts;
7787 unsigned int prio;
7789 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7790 if (arg5 != 0) {
7791 target_to_host_timespec(&ts, arg5);
7792 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7793 host_to_target_timespec(arg5, &ts);
7795 else
7796 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7797 unlock_user (p, arg2, arg3);
7798 if (arg4 != 0)
7799 put_user_u32(prio, arg4);
7801 break;
7803 /* Not implemented for now... */
7804 /* case TARGET_NR_mq_notify: */
7805 /* break; */
7807 case TARGET_NR_mq_getsetattr:
7809 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7810 ret = 0;
7811 if (arg3 != 0) {
7812 ret = mq_getattr(arg1, &posix_mq_attr_out);
7813 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7815 if (arg2 != 0) {
7816 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7817 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7821 break;
7822 #endif
7824 #ifdef CONFIG_SPLICE
7825 #ifdef TARGET_NR_tee
7826 case TARGET_NR_tee:
7828 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7830 break;
7831 #endif
7832 #ifdef TARGET_NR_splice
7833 case TARGET_NR_splice:
7835 loff_t loff_in, loff_out;
7836 loff_t *ploff_in = NULL, *ploff_out = NULL;
7837 if(arg2) {
7838 get_user_u64(loff_in, arg2);
7839 ploff_in = &loff_in;
7841 if(arg4) {
7842 get_user_u64(loff_out, arg2);
7843 ploff_out = &loff_out;
7845 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7847 break;
7848 #endif
7849 #ifdef TARGET_NR_vmsplice
7850 case TARGET_NR_vmsplice:
7852 int count = arg3;
7853 struct iovec *vec;
7855 vec = alloca(count * sizeof(struct iovec));
7856 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7857 goto efault;
7858 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7859 unlock_iovec(vec, arg2, count, 0);
7861 break;
7862 #endif
7863 #endif /* CONFIG_SPLICE */
7864 #ifdef CONFIG_EVENTFD
7865 #if defined(TARGET_NR_eventfd)
7866 case TARGET_NR_eventfd:
7867 ret = get_errno(eventfd(arg1, 0));
7868 break;
7869 #endif
7870 #if defined(TARGET_NR_eventfd2)
7871 case TARGET_NR_eventfd2:
7872 ret = get_errno(eventfd(arg1, arg2));
7873 break;
7874 #endif
7875 #endif /* CONFIG_EVENTFD */
7876 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7877 case TARGET_NR_fallocate:
7878 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7879 break;
7880 #endif
7881 #if defined(CONFIG_SYNC_FILE_RANGE)
7882 #if defined(TARGET_NR_sync_file_range)
7883 case TARGET_NR_sync_file_range:
7884 #if TARGET_ABI_BITS == 32
7885 #if defined(TARGET_MIPS)
7886 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7887 target_offset64(arg5, arg6), arg7));
7888 #else
7889 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7890 target_offset64(arg4, arg5), arg6));
7891 #endif /* !TARGET_MIPS */
7892 #else
7893 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7894 #endif
7895 break;
7896 #endif
7897 #if defined(TARGET_NR_sync_file_range2)
7898 case TARGET_NR_sync_file_range2:
7899 /* This is like sync_file_range but the arguments are reordered */
7900 #if TARGET_ABI_BITS == 32
7901 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7902 target_offset64(arg5, arg6), arg2));
7903 #else
7904 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7905 #endif
7906 break;
7907 #endif
7908 #endif
7909 #if defined(CONFIG_EPOLL)
7910 #if defined(TARGET_NR_epoll_create)
7911 case TARGET_NR_epoll_create:
7912 ret = get_errno(epoll_create(arg1));
7913 break;
7914 #endif
7915 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7916 case TARGET_NR_epoll_create1:
7917 ret = get_errno(epoll_create1(arg1));
7918 break;
7919 #endif
7920 #if defined(TARGET_NR_epoll_ctl)
7921 case TARGET_NR_epoll_ctl:
7923 struct epoll_event ep;
7924 struct epoll_event *epp = 0;
7925 if (arg4) {
7926 struct target_epoll_event *target_ep;
7927 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7928 goto efault;
7930 ep.events = tswap32(target_ep->events);
7931 /* The epoll_data_t union is just opaque data to the kernel,
7932 * so we transfer all 64 bits across and need not worry what
7933 * actual data type it is.
7935 ep.data.u64 = tswap64(target_ep->data.u64);
7936 unlock_user_struct(target_ep, arg4, 0);
7937 epp = &ep;
7939 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7940 break;
7942 #endif
7944 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7945 #define IMPLEMENT_EPOLL_PWAIT
7946 #endif
7947 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7948 #if defined(TARGET_NR_epoll_wait)
7949 case TARGET_NR_epoll_wait:
7950 #endif
7951 #if defined(IMPLEMENT_EPOLL_PWAIT)
7952 case TARGET_NR_epoll_pwait:
7953 #endif
7955 struct target_epoll_event *target_ep;
7956 struct epoll_event *ep;
7957 int epfd = arg1;
7958 int maxevents = arg3;
7959 int timeout = arg4;
7961 target_ep = lock_user(VERIFY_WRITE, arg2,
7962 maxevents * sizeof(struct target_epoll_event), 1);
7963 if (!target_ep) {
7964 goto efault;
7967 ep = alloca(maxevents * sizeof(struct epoll_event));
7969 switch (num) {
7970 #if defined(IMPLEMENT_EPOLL_PWAIT)
7971 case TARGET_NR_epoll_pwait:
7973 target_sigset_t *target_set;
7974 sigset_t _set, *set = &_set;
7976 if (arg5) {
7977 target_set = lock_user(VERIFY_READ, arg5,
7978 sizeof(target_sigset_t), 1);
7979 if (!target_set) {
7980 unlock_user(target_ep, arg2, 0);
7981 goto efault;
7983 target_to_host_sigset(set, target_set);
7984 unlock_user(target_set, arg5, 0);
7985 } else {
7986 set = NULL;
7989 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
7990 break;
7992 #endif
7993 #if defined(TARGET_NR_epoll_wait)
7994 case TARGET_NR_epoll_wait:
7995 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
7996 break;
7997 #endif
7998 default:
7999 ret = -TARGET_ENOSYS;
8001 if (!is_error(ret)) {
8002 int i;
8003 for (i = 0; i < ret; i++) {
8004 target_ep[i].events = tswap32(ep[i].events);
8005 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8008 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8009 break;
8011 #endif
8012 #endif
8013 #ifdef TARGET_NR_prlimit64
8014 case TARGET_NR_prlimit64:
8016 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8017 struct target_rlimit64 *target_rnew, *target_rold;
8018 struct host_rlimit64 rnew, rold, *rnewp = 0;
8019 if (arg3) {
8020 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8021 goto efault;
8023 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8024 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8025 unlock_user_struct(target_rnew, arg3, 0);
8026 rnewp = &rnew;
8029 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8030 if (!is_error(ret) && arg4) {
8031 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8032 goto efault;
8034 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8035 target_rold->rlim_max = tswap64(rold.rlim_max);
8036 unlock_user_struct(target_rold, arg4, 1);
8038 break;
8040 #endif
8041 default:
8042 unimplemented:
8043 gemu_log("qemu: Unsupported syscall: %d\n", num);
8044 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8045 unimplemented_nowarn:
8046 #endif
8047 ret = -TARGET_ENOSYS;
8048 break;
8050 fail:
8051 #ifdef DEBUG
8052 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8053 #endif
8054 if(do_strace)
8055 print_syscall_ret(num, ret);
8056 return ret;
8057 efault:
8058 ret = -TARGET_EFAULT;
8059 goto fail;