linux-user: Don't use MAP_FIXED in do_brk()
[qemu.git] / linux-user / syscall.c
blobb9757306acb063349dc2840727578a455f22f947
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #include <linux/vt.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
98 #include "qemu.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
104 #else
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
107 #endif
109 //#define DEBUG
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
116 #undef _syscall0
117 #undef _syscall1
118 #undef _syscall2
119 #undef _syscall3
120 #undef _syscall4
121 #undef _syscall5
122 #undef _syscall6
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
155 type5,arg5) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
165 type6 arg6) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 defined(__s390x__)
202 #define __NR__llseek __NR_lseek
203 #endif
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
213 #endif
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 _syscall2(int, sys_getpriority, int, which, int, who);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #endif
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #endif
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #endif
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
250 static bitmask_transtbl fcntl_flags_tbl[] = {
251 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
252 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
253 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
254 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
255 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
256 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
257 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
258 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
259 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
260 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
261 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
262 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
263 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
266 #endif
267 { 0, 0, 0, 0 }
270 #define COPY_UTSNAME_FIELD(dest, src) \
271 do { \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
275 } while (0)
277 static int sys_uname(struct new_utsname *buf)
279 struct utsname uts_buf;
281 if (uname(&uts_buf) < 0)
282 return (-1);
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 memset(buf, 0, sizeof(*buf));
291 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
292 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
293 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
294 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
295 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
296 #ifdef _GNU_SOURCE
297 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
298 #endif
299 return (0);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf, size_t size)
306 if (getcwd(buf, size) == NULL) {
307 /* getcwd() sets errno */
308 return (-1);
310 return strlen(buf)+1;
313 #ifdef CONFIG_ATFILE
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd, const char *pathname, int mode)
322 return (faccessat(dirfd, pathname, mode, 0));
324 #endif
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
328 return (fchmodat(dirfd, pathname, mode, 0));
330 #endif
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
333 gid_t group, int flags)
335 return (fchownat(dirfd, pathname, owner, group, flags));
337 #endif
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
340 int flags)
342 return (fstatat(dirfd, pathname, buf, flags));
344 #endif
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
347 int flags)
349 return (fstatat(dirfd, pathname, buf, flags));
351 #endif
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd, const char *pathname,
354 const struct timeval times[2])
356 return (futimesat(dirfd, pathname, times));
358 #endif
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd, const char *oldpath,
361 int newdirfd, const char *newpath, int flags)
363 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
365 #endif
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
369 return (mkdirat(dirfd, pathname, mode));
371 #endif
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
374 dev_t dev)
376 return (mknodat(dirfd, pathname, mode, dev));
378 #endif
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
383 * open(2) has extra parameter 'mode' when called with
384 * flag O_CREAT.
386 if ((flags & O_CREAT) != 0) {
387 va_list ap;
388 mode_t mode;
391 * Get the 'mode' parameter and translate it to
392 * host bits.
394 va_start(ap, flags);
395 mode = va_arg(ap, mode_t);
396 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
397 va_end(ap);
399 return (openat(dirfd, pathname, flags, mode));
401 return (openat(dirfd, pathname, flags));
403 #endif
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
407 return (readlinkat(dirfd, pathname, buf, bufsiz));
409 #endif
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd, const char *oldpath,
412 int newdirfd, const char *newpath)
414 return (renameat(olddirfd, oldpath, newdirfd, newpath));
416 #endif
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
420 return (symlinkat(oldpath, newdirfd, newpath));
422 #endif
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
426 return (unlinkat(dirfd, pathname, flags));
428 #endif
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
436 #endif
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
439 #endif
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
442 uid_t,owner,gid_t,group,int,flags)
443 #endif
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
447 struct stat *,buf,int,flags)
448 #endif
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
451 const struct timeval *,times)
452 #endif
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
456 struct stat *,buf,int,flags)
457 #endif
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
460 int,newdirfd,const char *,newpath,int,flags)
461 #endif
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
464 #endif
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
467 mode_t,mode,dev_t,dev)
468 #endif
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
471 #endif
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
474 char *,buf,size_t,bufsize)
475 #endif
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
478 int,newdirfd,const char *,newpath)
479 #endif
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat,const char *,oldpath,
482 int,newdirfd,const char *,newpath)
483 #endif
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
486 #endif
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd, const char *pathname,
492 const struct timespec times[2], int flags)
494 if (pathname == NULL)
495 return futimens(dirfd, times);
496 else
497 return utimensat(dirfd, pathname, times, flags);
499 #else
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
502 const struct timespec *,tsp,int,flags)
503 #endif
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
514 #endif
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
518 return (inotify_add_watch(fd, pathname, mask));
520 #endif
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd, int32_t wd)
524 return (inotify_rm_watch(fd, wd));
526 #endif
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags)
531 return (inotify_init1(flags));
533 #endif
534 #endif
535 #else
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
544 #ifndef __NR_ppoll
545 # define __NR_ppoll -1
546 #endif
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
549 struct timespec *, timeout, const __sigset_t *, sigmask,
550 size_t, sigsetsize)
551 #endif
553 extern int personality(int);
554 extern int flock(int, int);
555 extern int setfsuid(int);
556 extern int setfsgid(int);
557 extern int setgroups(int, gid_t *);
559 #define ERRNO_TABLE_SIZE 1200
561 /* target_to_host_errno_table[] is initialized from
562 * host_to_target_errno_table[] in syscall_init(). */
563 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
567 * This list is the union of errno values overridden in asm-<arch>/errno.h
568 * minus the errnos that are not actually generic to all archs.
570 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
571 [EIDRM] = TARGET_EIDRM,
572 [ECHRNG] = TARGET_ECHRNG,
573 [EL2NSYNC] = TARGET_EL2NSYNC,
574 [EL3HLT] = TARGET_EL3HLT,
575 [EL3RST] = TARGET_EL3RST,
576 [ELNRNG] = TARGET_ELNRNG,
577 [EUNATCH] = TARGET_EUNATCH,
578 [ENOCSI] = TARGET_ENOCSI,
579 [EL2HLT] = TARGET_EL2HLT,
580 [EDEADLK] = TARGET_EDEADLK,
581 [ENOLCK] = TARGET_ENOLCK,
582 [EBADE] = TARGET_EBADE,
583 [EBADR] = TARGET_EBADR,
584 [EXFULL] = TARGET_EXFULL,
585 [ENOANO] = TARGET_ENOANO,
586 [EBADRQC] = TARGET_EBADRQC,
587 [EBADSLT] = TARGET_EBADSLT,
588 [EBFONT] = TARGET_EBFONT,
589 [ENOSTR] = TARGET_ENOSTR,
590 [ENODATA] = TARGET_ENODATA,
591 [ETIME] = TARGET_ETIME,
592 [ENOSR] = TARGET_ENOSR,
593 [ENONET] = TARGET_ENONET,
594 [ENOPKG] = TARGET_ENOPKG,
595 [EREMOTE] = TARGET_EREMOTE,
596 [ENOLINK] = TARGET_ENOLINK,
597 [EADV] = TARGET_EADV,
598 [ESRMNT] = TARGET_ESRMNT,
599 [ECOMM] = TARGET_ECOMM,
600 [EPROTO] = TARGET_EPROTO,
601 [EDOTDOT] = TARGET_EDOTDOT,
602 [EMULTIHOP] = TARGET_EMULTIHOP,
603 [EBADMSG] = TARGET_EBADMSG,
604 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
605 [EOVERFLOW] = TARGET_EOVERFLOW,
606 [ENOTUNIQ] = TARGET_ENOTUNIQ,
607 [EBADFD] = TARGET_EBADFD,
608 [EREMCHG] = TARGET_EREMCHG,
609 [ELIBACC] = TARGET_ELIBACC,
610 [ELIBBAD] = TARGET_ELIBBAD,
611 [ELIBSCN] = TARGET_ELIBSCN,
612 [ELIBMAX] = TARGET_ELIBMAX,
613 [ELIBEXEC] = TARGET_ELIBEXEC,
614 [EILSEQ] = TARGET_EILSEQ,
615 [ENOSYS] = TARGET_ENOSYS,
616 [ELOOP] = TARGET_ELOOP,
617 [ERESTART] = TARGET_ERESTART,
618 [ESTRPIPE] = TARGET_ESTRPIPE,
619 [ENOTEMPTY] = TARGET_ENOTEMPTY,
620 [EUSERS] = TARGET_EUSERS,
621 [ENOTSOCK] = TARGET_ENOTSOCK,
622 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
623 [EMSGSIZE] = TARGET_EMSGSIZE,
624 [EPROTOTYPE] = TARGET_EPROTOTYPE,
625 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
626 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
627 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
628 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
629 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
630 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
631 [EADDRINUSE] = TARGET_EADDRINUSE,
632 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
633 [ENETDOWN] = TARGET_ENETDOWN,
634 [ENETUNREACH] = TARGET_ENETUNREACH,
635 [ENETRESET] = TARGET_ENETRESET,
636 [ECONNABORTED] = TARGET_ECONNABORTED,
637 [ECONNRESET] = TARGET_ECONNRESET,
638 [ENOBUFS] = TARGET_ENOBUFS,
639 [EISCONN] = TARGET_EISCONN,
640 [ENOTCONN] = TARGET_ENOTCONN,
641 [EUCLEAN] = TARGET_EUCLEAN,
642 [ENOTNAM] = TARGET_ENOTNAM,
643 [ENAVAIL] = TARGET_ENAVAIL,
644 [EISNAM] = TARGET_EISNAM,
645 [EREMOTEIO] = TARGET_EREMOTEIO,
646 [ESHUTDOWN] = TARGET_ESHUTDOWN,
647 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
648 [ETIMEDOUT] = TARGET_ETIMEDOUT,
649 [ECONNREFUSED] = TARGET_ECONNREFUSED,
650 [EHOSTDOWN] = TARGET_EHOSTDOWN,
651 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
652 [EALREADY] = TARGET_EALREADY,
653 [EINPROGRESS] = TARGET_EINPROGRESS,
654 [ESTALE] = TARGET_ESTALE,
655 [ECANCELED] = TARGET_ECANCELED,
656 [ENOMEDIUM] = TARGET_ENOMEDIUM,
657 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
658 #ifdef ENOKEY
659 [ENOKEY] = TARGET_ENOKEY,
660 #endif
661 #ifdef EKEYEXPIRED
662 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
663 #endif
664 #ifdef EKEYREVOKED
665 [EKEYREVOKED] = TARGET_EKEYREVOKED,
666 #endif
667 #ifdef EKEYREJECTED
668 [EKEYREJECTED] = TARGET_EKEYREJECTED,
669 #endif
670 #ifdef EOWNERDEAD
671 [EOWNERDEAD] = TARGET_EOWNERDEAD,
672 #endif
673 #ifdef ENOTRECOVERABLE
674 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
675 #endif
678 static inline int host_to_target_errno(int err)
680 if(host_to_target_errno_table[err])
681 return host_to_target_errno_table[err];
682 return err;
685 static inline int target_to_host_errno(int err)
687 if (target_to_host_errno_table[err])
688 return target_to_host_errno_table[err];
689 return err;
692 static inline abi_long get_errno(abi_long ret)
694 if (ret == -1)
695 return -host_to_target_errno(errno);
696 else
697 return ret;
700 static inline int is_error(abi_long ret)
702 return (abi_ulong)ret >= (abi_ulong)(-4096);
705 char *target_strerror(int err)
707 return strerror(target_to_host_errno(err));
710 static abi_ulong target_brk;
711 static abi_ulong target_original_brk;
713 void target_set_brk(abi_ulong new_brk)
715 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
718 /* do_brk() must return target values and target errnos. */
719 abi_long do_brk(abi_ulong new_brk)
721 abi_ulong brk_page;
722 abi_long mapped_addr;
723 int new_alloc_size;
725 if (!new_brk)
726 return target_brk;
727 if (new_brk < target_original_brk)
728 return target_brk;
730 brk_page = HOST_PAGE_ALIGN(target_brk);
732 /* If the new brk is less than this, set it and we're done... */
733 if (new_brk < brk_page) {
734 target_brk = new_brk;
735 return target_brk;
738 /* We need to allocate more memory after the brk... Note that
739 * we don't use MAP_FIXED because that will map over the top of
740 * any existing mapping (like the one with the host libc or qemu
741 * itself); instead we treat "mapped but at wrong address" as
742 * a failure and unmap again.
744 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
745 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
746 PROT_READ|PROT_WRITE,
747 MAP_ANON|MAP_PRIVATE, 0, 0));
749 if (mapped_addr == brk_page) {
750 target_brk = new_brk;
751 return target_brk;
752 } else if (mapped_addr != -1) {
753 /* Mapped but at wrong address, meaning there wasn't actually
754 * enough space for this brk.
756 target_munmap(mapped_addr, new_alloc_size);
757 mapped_addr = -1;
760 #if defined(TARGET_ALPHA)
761 /* We (partially) emulate OSF/1 on Alpha, which requires we
762 return a proper errno, not an unchanged brk value. */
763 return -TARGET_ENOMEM;
764 #endif
765 /* For everything else, return the previous break. */
766 return target_brk;
769 static inline abi_long copy_from_user_fdset(fd_set *fds,
770 abi_ulong target_fds_addr,
771 int n)
773 int i, nw, j, k;
774 abi_ulong b, *target_fds;
776 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
777 if (!(target_fds = lock_user(VERIFY_READ,
778 target_fds_addr,
779 sizeof(abi_ulong) * nw,
780 1)))
781 return -TARGET_EFAULT;
783 FD_ZERO(fds);
784 k = 0;
785 for (i = 0; i < nw; i++) {
786 /* grab the abi_ulong */
787 __get_user(b, &target_fds[i]);
788 for (j = 0; j < TARGET_ABI_BITS; j++) {
789 /* check the bit inside the abi_ulong */
790 if ((b >> j) & 1)
791 FD_SET(k, fds);
792 k++;
796 unlock_user(target_fds, target_fds_addr, 0);
798 return 0;
801 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
802 const fd_set *fds,
803 int n)
805 int i, nw, j, k;
806 abi_long v;
807 abi_ulong *target_fds;
809 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
810 if (!(target_fds = lock_user(VERIFY_WRITE,
811 target_fds_addr,
812 sizeof(abi_ulong) * nw,
813 0)))
814 return -TARGET_EFAULT;
816 k = 0;
817 for (i = 0; i < nw; i++) {
818 v = 0;
819 for (j = 0; j < TARGET_ABI_BITS; j++) {
820 v |= ((FD_ISSET(k, fds) != 0) << j);
821 k++;
823 __put_user(v, &target_fds[i]);
826 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
828 return 0;
831 #if defined(__alpha__)
832 #define HOST_HZ 1024
833 #else
834 #define HOST_HZ 100
835 #endif
837 static inline abi_long host_to_target_clock_t(long ticks)
839 #if HOST_HZ == TARGET_HZ
840 return ticks;
841 #else
842 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
843 #endif
846 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
847 const struct rusage *rusage)
849 struct target_rusage *target_rusage;
851 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
852 return -TARGET_EFAULT;
853 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
854 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
855 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
856 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
857 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
858 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
859 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
860 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
861 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
862 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
863 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
864 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
865 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
866 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
867 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
868 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
869 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
870 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
871 unlock_user_struct(target_rusage, target_addr, 1);
873 return 0;
876 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
878 if (target_rlim == TARGET_RLIM_INFINITY)
879 return RLIM_INFINITY;
880 else
881 return tswapl(target_rlim);
884 static inline target_ulong host_to_target_rlim(rlim_t rlim)
886 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
887 return TARGET_RLIM_INFINITY;
888 else
889 return tswapl(rlim);
892 static inline abi_long copy_from_user_timeval(struct timeval *tv,
893 abi_ulong target_tv_addr)
895 struct target_timeval *target_tv;
897 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
898 return -TARGET_EFAULT;
900 __get_user(tv->tv_sec, &target_tv->tv_sec);
901 __get_user(tv->tv_usec, &target_tv->tv_usec);
903 unlock_user_struct(target_tv, target_tv_addr, 0);
905 return 0;
908 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
909 const struct timeval *tv)
911 struct target_timeval *target_tv;
913 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
914 return -TARGET_EFAULT;
916 __put_user(tv->tv_sec, &target_tv->tv_sec);
917 __put_user(tv->tv_usec, &target_tv->tv_usec);
919 unlock_user_struct(target_tv, target_tv_addr, 1);
921 return 0;
924 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
925 #include <mqueue.h>
927 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
928 abi_ulong target_mq_attr_addr)
930 struct target_mq_attr *target_mq_attr;
932 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
933 target_mq_attr_addr, 1))
934 return -TARGET_EFAULT;
936 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
937 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
938 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
939 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
941 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
943 return 0;
946 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
947 const struct mq_attr *attr)
949 struct target_mq_attr *target_mq_attr;
951 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
952 target_mq_attr_addr, 0))
953 return -TARGET_EFAULT;
955 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
956 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
957 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
958 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
960 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
962 return 0;
964 #endif
966 /* do_select() must return target values and target errnos. */
967 static abi_long do_select(int n,
968 abi_ulong rfd_addr, abi_ulong wfd_addr,
969 abi_ulong efd_addr, abi_ulong target_tv_addr)
971 fd_set rfds, wfds, efds;
972 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
973 struct timeval tv, *tv_ptr;
974 abi_long ret;
976 if (rfd_addr) {
977 if (copy_from_user_fdset(&rfds, rfd_addr, n))
978 return -TARGET_EFAULT;
979 rfds_ptr = &rfds;
980 } else {
981 rfds_ptr = NULL;
983 if (wfd_addr) {
984 if (copy_from_user_fdset(&wfds, wfd_addr, n))
985 return -TARGET_EFAULT;
986 wfds_ptr = &wfds;
987 } else {
988 wfds_ptr = NULL;
990 if (efd_addr) {
991 if (copy_from_user_fdset(&efds, efd_addr, n))
992 return -TARGET_EFAULT;
993 efds_ptr = &efds;
994 } else {
995 efds_ptr = NULL;
998 if (target_tv_addr) {
999 if (copy_from_user_timeval(&tv, target_tv_addr))
1000 return -TARGET_EFAULT;
1001 tv_ptr = &tv;
1002 } else {
1003 tv_ptr = NULL;
1006 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1008 if (!is_error(ret)) {
1009 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1010 return -TARGET_EFAULT;
1011 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1012 return -TARGET_EFAULT;
1013 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1014 return -TARGET_EFAULT;
1016 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1017 return -TARGET_EFAULT;
1020 return ret;
1023 static abi_long do_pipe2(int host_pipe[], int flags)
1025 #ifdef CONFIG_PIPE2
1026 return pipe2(host_pipe, flags);
1027 #else
1028 return -ENOSYS;
1029 #endif
1032 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1033 int flags, int is_pipe2)
1035 int host_pipe[2];
1036 abi_long ret;
1037 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1039 if (is_error(ret))
1040 return get_errno(ret);
1042 /* Several targets have special calling conventions for the original
1043 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1044 if (!is_pipe2) {
1045 #if defined(TARGET_ALPHA)
1046 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1047 return host_pipe[0];
1048 #elif defined(TARGET_MIPS)
1049 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1050 return host_pipe[0];
1051 #elif defined(TARGET_SH4)
1052 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1053 return host_pipe[0];
1054 #endif
1057 if (put_user_s32(host_pipe[0], pipedes)
1058 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1059 return -TARGET_EFAULT;
1060 return get_errno(ret);
1063 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1064 abi_ulong target_addr,
1065 socklen_t len)
1067 struct target_ip_mreqn *target_smreqn;
1069 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1070 if (!target_smreqn)
1071 return -TARGET_EFAULT;
1072 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1073 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1074 if (len == sizeof(struct target_ip_mreqn))
1075 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1076 unlock_user(target_smreqn, target_addr, 0);
1078 return 0;
1081 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1082 abi_ulong target_addr,
1083 socklen_t len)
1085 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1086 sa_family_t sa_family;
1087 struct target_sockaddr *target_saddr;
1089 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1090 if (!target_saddr)
1091 return -TARGET_EFAULT;
1093 sa_family = tswap16(target_saddr->sa_family);
1095 /* Oops. The caller might send a incomplete sun_path; sun_path
1096 * must be terminated by \0 (see the manual page), but
1097 * unfortunately it is quite common to specify sockaddr_un
1098 * length as "strlen(x->sun_path)" while it should be
1099 * "strlen(...) + 1". We'll fix that here if needed.
1100 * Linux kernel has a similar feature.
1103 if (sa_family == AF_UNIX) {
1104 if (len < unix_maxlen && len > 0) {
1105 char *cp = (char*)target_saddr;
1107 if ( cp[len-1] && !cp[len] )
1108 len++;
1110 if (len > unix_maxlen)
1111 len = unix_maxlen;
1114 memcpy(addr, target_saddr, len);
1115 addr->sa_family = sa_family;
1116 unlock_user(target_saddr, target_addr, 0);
1118 return 0;
1121 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1122 struct sockaddr *addr,
1123 socklen_t len)
1125 struct target_sockaddr *target_saddr;
1127 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1128 if (!target_saddr)
1129 return -TARGET_EFAULT;
1130 memcpy(target_saddr, addr, len);
1131 target_saddr->sa_family = tswap16(addr->sa_family);
1132 unlock_user(target_saddr, target_addr, len);
1134 return 0;
1137 /* ??? Should this also swap msgh->name? */
1138 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1139 struct target_msghdr *target_msgh)
1141 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1142 abi_long msg_controllen;
1143 abi_ulong target_cmsg_addr;
1144 struct target_cmsghdr *target_cmsg;
1145 socklen_t space = 0;
1147 msg_controllen = tswapl(target_msgh->msg_controllen);
1148 if (msg_controllen < sizeof (struct target_cmsghdr))
1149 goto the_end;
1150 target_cmsg_addr = tswapl(target_msgh->msg_control);
1151 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1152 if (!target_cmsg)
1153 return -TARGET_EFAULT;
1155 while (cmsg && target_cmsg) {
1156 void *data = CMSG_DATA(cmsg);
1157 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1159 int len = tswapl(target_cmsg->cmsg_len)
1160 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1162 space += CMSG_SPACE(len);
1163 if (space > msgh->msg_controllen) {
1164 space -= CMSG_SPACE(len);
1165 gemu_log("Host cmsg overflow\n");
1166 break;
1169 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1170 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1171 cmsg->cmsg_len = CMSG_LEN(len);
1173 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1174 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1175 memcpy(data, target_data, len);
1176 } else {
1177 int *fd = (int *)data;
1178 int *target_fd = (int *)target_data;
1179 int i, numfds = len / sizeof(int);
1181 for (i = 0; i < numfds; i++)
1182 fd[i] = tswap32(target_fd[i]);
1185 cmsg = CMSG_NXTHDR(msgh, cmsg);
1186 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1188 unlock_user(target_cmsg, target_cmsg_addr, 0);
1189 the_end:
1190 msgh->msg_controllen = space;
1191 return 0;
1194 /* ??? Should this also swap msgh->name? */
1195 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1196 struct msghdr *msgh)
1198 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1199 abi_long msg_controllen;
1200 abi_ulong target_cmsg_addr;
1201 struct target_cmsghdr *target_cmsg;
1202 socklen_t space = 0;
1204 msg_controllen = tswapl(target_msgh->msg_controllen);
1205 if (msg_controllen < sizeof (struct target_cmsghdr))
1206 goto the_end;
1207 target_cmsg_addr = tswapl(target_msgh->msg_control);
1208 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1209 if (!target_cmsg)
1210 return -TARGET_EFAULT;
1212 while (cmsg && target_cmsg) {
1213 void *data = CMSG_DATA(cmsg);
1214 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1216 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1218 space += TARGET_CMSG_SPACE(len);
1219 if (space > msg_controllen) {
1220 space -= TARGET_CMSG_SPACE(len);
1221 gemu_log("Target cmsg overflow\n");
1222 break;
1225 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1226 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1227 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1229 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1230 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1231 memcpy(target_data, data, len);
1232 } else {
1233 int *fd = (int *)data;
1234 int *target_fd = (int *)target_data;
1235 int i, numfds = len / sizeof(int);
1237 for (i = 0; i < numfds; i++)
1238 target_fd[i] = tswap32(fd[i]);
1241 cmsg = CMSG_NXTHDR(msgh, cmsg);
1242 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1244 unlock_user(target_cmsg, target_cmsg_addr, space);
1245 the_end:
1246 target_msgh->msg_controllen = tswapl(space);
1247 return 0;
1250 /* do_setsockopt() Must return target values and target errnos. */
1251 static abi_long do_setsockopt(int sockfd, int level, int optname,
1252 abi_ulong optval_addr, socklen_t optlen)
1254 abi_long ret;
1255 int val;
1256 struct ip_mreqn *ip_mreq;
1257 struct ip_mreq_source *ip_mreq_source;
1259 switch(level) {
1260 case SOL_TCP:
1261 /* TCP options all take an 'int' value. */
1262 if (optlen < sizeof(uint32_t))
1263 return -TARGET_EINVAL;
1265 if (get_user_u32(val, optval_addr))
1266 return -TARGET_EFAULT;
1267 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1268 break;
1269 case SOL_IP:
1270 switch(optname) {
1271 case IP_TOS:
1272 case IP_TTL:
1273 case IP_HDRINCL:
1274 case IP_ROUTER_ALERT:
1275 case IP_RECVOPTS:
1276 case IP_RETOPTS:
1277 case IP_PKTINFO:
1278 case IP_MTU_DISCOVER:
1279 case IP_RECVERR:
1280 case IP_RECVTOS:
1281 #ifdef IP_FREEBIND
1282 case IP_FREEBIND:
1283 #endif
1284 case IP_MULTICAST_TTL:
1285 case IP_MULTICAST_LOOP:
1286 val = 0;
1287 if (optlen >= sizeof(uint32_t)) {
1288 if (get_user_u32(val, optval_addr))
1289 return -TARGET_EFAULT;
1290 } else if (optlen >= 1) {
1291 if (get_user_u8(val, optval_addr))
1292 return -TARGET_EFAULT;
1294 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1295 break;
1296 case IP_ADD_MEMBERSHIP:
1297 case IP_DROP_MEMBERSHIP:
1298 if (optlen < sizeof (struct target_ip_mreq) ||
1299 optlen > sizeof (struct target_ip_mreqn))
1300 return -TARGET_EINVAL;
1302 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1303 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1304 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1305 break;
1307 case IP_BLOCK_SOURCE:
1308 case IP_UNBLOCK_SOURCE:
1309 case IP_ADD_SOURCE_MEMBERSHIP:
1310 case IP_DROP_SOURCE_MEMBERSHIP:
1311 if (optlen != sizeof (struct target_ip_mreq_source))
1312 return -TARGET_EINVAL;
1314 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1315 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1316 unlock_user (ip_mreq_source, optval_addr, 0);
1317 break;
1319 default:
1320 goto unimplemented;
1322 break;
1323 case TARGET_SOL_SOCKET:
1324 switch (optname) {
1325 /* Options with 'int' argument. */
1326 case TARGET_SO_DEBUG:
1327 optname = SO_DEBUG;
1328 break;
1329 case TARGET_SO_REUSEADDR:
1330 optname = SO_REUSEADDR;
1331 break;
1332 case TARGET_SO_TYPE:
1333 optname = SO_TYPE;
1334 break;
1335 case TARGET_SO_ERROR:
1336 optname = SO_ERROR;
1337 break;
1338 case TARGET_SO_DONTROUTE:
1339 optname = SO_DONTROUTE;
1340 break;
1341 case TARGET_SO_BROADCAST:
1342 optname = SO_BROADCAST;
1343 break;
1344 case TARGET_SO_SNDBUF:
1345 optname = SO_SNDBUF;
1346 break;
1347 case TARGET_SO_RCVBUF:
1348 optname = SO_RCVBUF;
1349 break;
1350 case TARGET_SO_KEEPALIVE:
1351 optname = SO_KEEPALIVE;
1352 break;
1353 case TARGET_SO_OOBINLINE:
1354 optname = SO_OOBINLINE;
1355 break;
1356 case TARGET_SO_NO_CHECK:
1357 optname = SO_NO_CHECK;
1358 break;
1359 case TARGET_SO_PRIORITY:
1360 optname = SO_PRIORITY;
1361 break;
1362 #ifdef SO_BSDCOMPAT
1363 case TARGET_SO_BSDCOMPAT:
1364 optname = SO_BSDCOMPAT;
1365 break;
1366 #endif
1367 case TARGET_SO_PASSCRED:
1368 optname = SO_PASSCRED;
1369 break;
1370 case TARGET_SO_TIMESTAMP:
1371 optname = SO_TIMESTAMP;
1372 break;
1373 case TARGET_SO_RCVLOWAT:
1374 optname = SO_RCVLOWAT;
1375 break;
1376 case TARGET_SO_RCVTIMEO:
1377 optname = SO_RCVTIMEO;
1378 break;
1379 case TARGET_SO_SNDTIMEO:
1380 optname = SO_SNDTIMEO;
1381 break;
1382 break;
1383 default:
1384 goto unimplemented;
1386 if (optlen < sizeof(uint32_t))
1387 return -TARGET_EINVAL;
1389 if (get_user_u32(val, optval_addr))
1390 return -TARGET_EFAULT;
1391 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1392 break;
1393 default:
1394 unimplemented:
1395 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1396 ret = -TARGET_ENOPROTOOPT;
1398 return ret;
1401 /* do_getsockopt() Must return target values and target errnos. */
1402 static abi_long do_getsockopt(int sockfd, int level, int optname,
1403 abi_ulong optval_addr, abi_ulong optlen)
1405 abi_long ret;
1406 int len, val;
1407 socklen_t lv;
1409 switch(level) {
1410 case TARGET_SOL_SOCKET:
1411 level = SOL_SOCKET;
1412 switch (optname) {
1413 /* These don't just return a single integer */
1414 case TARGET_SO_LINGER:
1415 case TARGET_SO_RCVTIMEO:
1416 case TARGET_SO_SNDTIMEO:
1417 case TARGET_SO_PEERCRED:
1418 case TARGET_SO_PEERNAME:
1419 goto unimplemented;
1420 /* Options with 'int' argument. */
1421 case TARGET_SO_DEBUG:
1422 optname = SO_DEBUG;
1423 goto int_case;
1424 case TARGET_SO_REUSEADDR:
1425 optname = SO_REUSEADDR;
1426 goto int_case;
1427 case TARGET_SO_TYPE:
1428 optname = SO_TYPE;
1429 goto int_case;
1430 case TARGET_SO_ERROR:
1431 optname = SO_ERROR;
1432 goto int_case;
1433 case TARGET_SO_DONTROUTE:
1434 optname = SO_DONTROUTE;
1435 goto int_case;
1436 case TARGET_SO_BROADCAST:
1437 optname = SO_BROADCAST;
1438 goto int_case;
1439 case TARGET_SO_SNDBUF:
1440 optname = SO_SNDBUF;
1441 goto int_case;
1442 case TARGET_SO_RCVBUF:
1443 optname = SO_RCVBUF;
1444 goto int_case;
1445 case TARGET_SO_KEEPALIVE:
1446 optname = SO_KEEPALIVE;
1447 goto int_case;
1448 case TARGET_SO_OOBINLINE:
1449 optname = SO_OOBINLINE;
1450 goto int_case;
1451 case TARGET_SO_NO_CHECK:
1452 optname = SO_NO_CHECK;
1453 goto int_case;
1454 case TARGET_SO_PRIORITY:
1455 optname = SO_PRIORITY;
1456 goto int_case;
1457 #ifdef SO_BSDCOMPAT
1458 case TARGET_SO_BSDCOMPAT:
1459 optname = SO_BSDCOMPAT;
1460 goto int_case;
1461 #endif
1462 case TARGET_SO_PASSCRED:
1463 optname = SO_PASSCRED;
1464 goto int_case;
1465 case TARGET_SO_TIMESTAMP:
1466 optname = SO_TIMESTAMP;
1467 goto int_case;
1468 case TARGET_SO_RCVLOWAT:
1469 optname = SO_RCVLOWAT;
1470 goto int_case;
1471 default:
1472 goto int_case;
1474 break;
1475 case SOL_TCP:
1476 /* TCP options all take an 'int' value. */
1477 int_case:
1478 if (get_user_u32(len, optlen))
1479 return -TARGET_EFAULT;
1480 if (len < 0)
1481 return -TARGET_EINVAL;
1482 lv = sizeof(lv);
1483 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1484 if (ret < 0)
1485 return ret;
1486 if (len > lv)
1487 len = lv;
1488 if (len == 4) {
1489 if (put_user_u32(val, optval_addr))
1490 return -TARGET_EFAULT;
1491 } else {
1492 if (put_user_u8(val, optval_addr))
1493 return -TARGET_EFAULT;
1495 if (put_user_u32(len, optlen))
1496 return -TARGET_EFAULT;
1497 break;
1498 case SOL_IP:
1499 switch(optname) {
1500 case IP_TOS:
1501 case IP_TTL:
1502 case IP_HDRINCL:
1503 case IP_ROUTER_ALERT:
1504 case IP_RECVOPTS:
1505 case IP_RETOPTS:
1506 case IP_PKTINFO:
1507 case IP_MTU_DISCOVER:
1508 case IP_RECVERR:
1509 case IP_RECVTOS:
1510 #ifdef IP_FREEBIND
1511 case IP_FREEBIND:
1512 #endif
1513 case IP_MULTICAST_TTL:
1514 case IP_MULTICAST_LOOP:
1515 if (get_user_u32(len, optlen))
1516 return -TARGET_EFAULT;
1517 if (len < 0)
1518 return -TARGET_EINVAL;
1519 lv = sizeof(lv);
1520 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1521 if (ret < 0)
1522 return ret;
1523 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1524 len = 1;
1525 if (put_user_u32(len, optlen)
1526 || put_user_u8(val, optval_addr))
1527 return -TARGET_EFAULT;
1528 } else {
1529 if (len > sizeof(int))
1530 len = sizeof(int);
1531 if (put_user_u32(len, optlen)
1532 || put_user_u32(val, optval_addr))
1533 return -TARGET_EFAULT;
1535 break;
1536 default:
1537 ret = -TARGET_ENOPROTOOPT;
1538 break;
1540 break;
1541 default:
1542 unimplemented:
1543 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1544 level, optname);
1545 ret = -TARGET_EOPNOTSUPP;
1546 break;
1548 return ret;
1551 /* FIXME
1552 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1553 * other lock functions have a return code of 0 for failure.
1555 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1556 int count, int copy)
1558 struct target_iovec *target_vec;
1559 abi_ulong base;
1560 int i;
1562 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1563 if (!target_vec)
1564 return -TARGET_EFAULT;
1565 for(i = 0;i < count; i++) {
1566 base = tswapl(target_vec[i].iov_base);
1567 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1568 if (vec[i].iov_len != 0) {
1569 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1570 /* Don't check lock_user return value. We must call writev even
1571 if a element has invalid base address. */
1572 } else {
1573 /* zero length pointer is ignored */
1574 vec[i].iov_base = NULL;
1577 unlock_user (target_vec, target_addr, 0);
1578 return 0;
1581 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1582 int count, int copy)
1584 struct target_iovec *target_vec;
1585 abi_ulong base;
1586 int i;
1588 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1589 if (!target_vec)
1590 return -TARGET_EFAULT;
1591 for(i = 0;i < count; i++) {
1592 if (target_vec[i].iov_base) {
1593 base = tswapl(target_vec[i].iov_base);
1594 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1597 unlock_user (target_vec, target_addr, 0);
1599 return 0;
1602 /* do_socket() Must return target values and target errnos. */
1603 static abi_long do_socket(int domain, int type, int protocol)
1605 #if defined(TARGET_MIPS)
1606 switch(type) {
1607 case TARGET_SOCK_DGRAM:
1608 type = SOCK_DGRAM;
1609 break;
1610 case TARGET_SOCK_STREAM:
1611 type = SOCK_STREAM;
1612 break;
1613 case TARGET_SOCK_RAW:
1614 type = SOCK_RAW;
1615 break;
1616 case TARGET_SOCK_RDM:
1617 type = SOCK_RDM;
1618 break;
1619 case TARGET_SOCK_SEQPACKET:
1620 type = SOCK_SEQPACKET;
1621 break;
1622 case TARGET_SOCK_PACKET:
1623 type = SOCK_PACKET;
1624 break;
1626 #endif
1627 if (domain == PF_NETLINK)
1628 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1629 return get_errno(socket(domain, type, protocol));
1632 /* do_bind() Must return target values and target errnos. */
1633 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1634 socklen_t addrlen)
1636 void *addr;
1637 abi_long ret;
1639 if ((int)addrlen < 0) {
1640 return -TARGET_EINVAL;
1643 addr = alloca(addrlen+1);
1645 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1646 if (ret)
1647 return ret;
1649 return get_errno(bind(sockfd, addr, addrlen));
1652 /* do_connect() Must return target values and target errnos. */
1653 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1654 socklen_t addrlen)
1656 void *addr;
1657 abi_long ret;
1659 if ((int)addrlen < 0) {
1660 return -TARGET_EINVAL;
1663 addr = alloca(addrlen);
1665 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1666 if (ret)
1667 return ret;
1669 return get_errno(connect(sockfd, addr, addrlen));
1672 /* do_sendrecvmsg() Must return target values and target errnos. */
1673 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1674 int flags, int send)
1676 abi_long ret, len;
1677 struct target_msghdr *msgp;
1678 struct msghdr msg;
1679 int count;
1680 struct iovec *vec;
1681 abi_ulong target_vec;
1683 /* FIXME */
1684 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1685 msgp,
1686 target_msg,
1687 send ? 1 : 0))
1688 return -TARGET_EFAULT;
1689 if (msgp->msg_name) {
1690 msg.msg_namelen = tswap32(msgp->msg_namelen);
1691 msg.msg_name = alloca(msg.msg_namelen);
1692 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1693 msg.msg_namelen);
1694 if (ret) {
1695 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1696 return ret;
1698 } else {
1699 msg.msg_name = NULL;
1700 msg.msg_namelen = 0;
1702 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1703 msg.msg_control = alloca(msg.msg_controllen);
1704 msg.msg_flags = tswap32(msgp->msg_flags);
1706 count = tswapl(msgp->msg_iovlen);
1707 vec = alloca(count * sizeof(struct iovec));
1708 target_vec = tswapl(msgp->msg_iov);
1709 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1710 msg.msg_iovlen = count;
1711 msg.msg_iov = vec;
1713 if (send) {
1714 ret = target_to_host_cmsg(&msg, msgp);
1715 if (ret == 0)
1716 ret = get_errno(sendmsg(fd, &msg, flags));
1717 } else {
1718 ret = get_errno(recvmsg(fd, &msg, flags));
1719 if (!is_error(ret)) {
1720 len = ret;
1721 ret = host_to_target_cmsg(msgp, &msg);
1722 if (!is_error(ret))
1723 ret = len;
1726 unlock_iovec(vec, target_vec, count, !send);
1727 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1728 return ret;
1731 /* do_accept() Must return target values and target errnos. */
1732 static abi_long do_accept(int fd, abi_ulong target_addr,
1733 abi_ulong target_addrlen_addr)
1735 socklen_t addrlen;
1736 void *addr;
1737 abi_long ret;
1739 if (target_addr == 0)
1740 return get_errno(accept(fd, NULL, NULL));
1742 /* linux returns EINVAL if addrlen pointer is invalid */
1743 if (get_user_u32(addrlen, target_addrlen_addr))
1744 return -TARGET_EINVAL;
1746 if ((int)addrlen < 0) {
1747 return -TARGET_EINVAL;
1750 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1751 return -TARGET_EINVAL;
1753 addr = alloca(addrlen);
1755 ret = get_errno(accept(fd, addr, &addrlen));
1756 if (!is_error(ret)) {
1757 host_to_target_sockaddr(target_addr, addr, addrlen);
1758 if (put_user_u32(addrlen, target_addrlen_addr))
1759 ret = -TARGET_EFAULT;
1761 return ret;
1764 /* do_getpeername() Must return target values and target errnos. */
1765 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1766 abi_ulong target_addrlen_addr)
1768 socklen_t addrlen;
1769 void *addr;
1770 abi_long ret;
1772 if (get_user_u32(addrlen, target_addrlen_addr))
1773 return -TARGET_EFAULT;
1775 if ((int)addrlen < 0) {
1776 return -TARGET_EINVAL;
1779 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1780 return -TARGET_EFAULT;
1782 addr = alloca(addrlen);
1784 ret = get_errno(getpeername(fd, addr, &addrlen));
1785 if (!is_error(ret)) {
1786 host_to_target_sockaddr(target_addr, addr, addrlen);
1787 if (put_user_u32(addrlen, target_addrlen_addr))
1788 ret = -TARGET_EFAULT;
1790 return ret;
1793 /* do_getsockname() Must return target values and target errnos. */
1794 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1795 abi_ulong target_addrlen_addr)
1797 socklen_t addrlen;
1798 void *addr;
1799 abi_long ret;
1801 if (get_user_u32(addrlen, target_addrlen_addr))
1802 return -TARGET_EFAULT;
1804 if ((int)addrlen < 0) {
1805 return -TARGET_EINVAL;
1808 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1809 return -TARGET_EFAULT;
1811 addr = alloca(addrlen);
1813 ret = get_errno(getsockname(fd, addr, &addrlen));
1814 if (!is_error(ret)) {
1815 host_to_target_sockaddr(target_addr, addr, addrlen);
1816 if (put_user_u32(addrlen, target_addrlen_addr))
1817 ret = -TARGET_EFAULT;
1819 return ret;
1822 /* do_socketpair() Must return target values and target errnos. */
1823 static abi_long do_socketpair(int domain, int type, int protocol,
1824 abi_ulong target_tab_addr)
1826 int tab[2];
1827 abi_long ret;
1829 ret = get_errno(socketpair(domain, type, protocol, tab));
1830 if (!is_error(ret)) {
1831 if (put_user_s32(tab[0], target_tab_addr)
1832 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1833 ret = -TARGET_EFAULT;
1835 return ret;
1838 /* do_sendto() Must return target values and target errnos. */
1839 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1840 abi_ulong target_addr, socklen_t addrlen)
1842 void *addr;
1843 void *host_msg;
1844 abi_long ret;
1846 if ((int)addrlen < 0) {
1847 return -TARGET_EINVAL;
1850 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1851 if (!host_msg)
1852 return -TARGET_EFAULT;
1853 if (target_addr) {
1854 addr = alloca(addrlen);
1855 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1856 if (ret) {
1857 unlock_user(host_msg, msg, 0);
1858 return ret;
1860 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1861 } else {
1862 ret = get_errno(send(fd, host_msg, len, flags));
1864 unlock_user(host_msg, msg, 0);
1865 return ret;
1868 /* do_recvfrom() Must return target values and target errnos. */
1869 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1870 abi_ulong target_addr,
1871 abi_ulong target_addrlen)
1873 socklen_t addrlen;
1874 void *addr;
1875 void *host_msg;
1876 abi_long ret;
1878 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1879 if (!host_msg)
1880 return -TARGET_EFAULT;
1881 if (target_addr) {
1882 if (get_user_u32(addrlen, target_addrlen)) {
1883 ret = -TARGET_EFAULT;
1884 goto fail;
1886 if ((int)addrlen < 0) {
1887 ret = -TARGET_EINVAL;
1888 goto fail;
1890 addr = alloca(addrlen);
1891 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1892 } else {
1893 addr = NULL; /* To keep compiler quiet. */
1894 ret = get_errno(recv(fd, host_msg, len, flags));
1896 if (!is_error(ret)) {
1897 if (target_addr) {
1898 host_to_target_sockaddr(target_addr, addr, addrlen);
1899 if (put_user_u32(addrlen, target_addrlen)) {
1900 ret = -TARGET_EFAULT;
1901 goto fail;
1904 unlock_user(host_msg, msg, len);
1905 } else {
1906 fail:
1907 unlock_user(host_msg, msg, 0);
1909 return ret;
1912 #ifdef TARGET_NR_socketcall
1913 /* do_socketcall() Must return target values and target errnos. */
1914 static abi_long do_socketcall(int num, abi_ulong vptr)
1916 abi_long ret;
1917 const int n = sizeof(abi_ulong);
1919 switch(num) {
1920 case SOCKOP_socket:
1922 abi_ulong domain, type, protocol;
1924 if (get_user_ual(domain, vptr)
1925 || get_user_ual(type, vptr + n)
1926 || get_user_ual(protocol, vptr + 2 * n))
1927 return -TARGET_EFAULT;
1929 ret = do_socket(domain, type, protocol);
1931 break;
1932 case SOCKOP_bind:
1934 abi_ulong sockfd;
1935 abi_ulong target_addr;
1936 socklen_t addrlen;
1938 if (get_user_ual(sockfd, vptr)
1939 || get_user_ual(target_addr, vptr + n)
1940 || get_user_ual(addrlen, vptr + 2 * n))
1941 return -TARGET_EFAULT;
1943 ret = do_bind(sockfd, target_addr, addrlen);
1945 break;
1946 case SOCKOP_connect:
1948 abi_ulong sockfd;
1949 abi_ulong target_addr;
1950 socklen_t addrlen;
1952 if (get_user_ual(sockfd, vptr)
1953 || get_user_ual(target_addr, vptr + n)
1954 || get_user_ual(addrlen, vptr + 2 * n))
1955 return -TARGET_EFAULT;
1957 ret = do_connect(sockfd, target_addr, addrlen);
1959 break;
1960 case SOCKOP_listen:
1962 abi_ulong sockfd, backlog;
1964 if (get_user_ual(sockfd, vptr)
1965 || get_user_ual(backlog, vptr + n))
1966 return -TARGET_EFAULT;
1968 ret = get_errno(listen(sockfd, backlog));
1970 break;
1971 case SOCKOP_accept:
1973 abi_ulong sockfd;
1974 abi_ulong target_addr, target_addrlen;
1976 if (get_user_ual(sockfd, vptr)
1977 || get_user_ual(target_addr, vptr + n)
1978 || get_user_ual(target_addrlen, vptr + 2 * n))
1979 return -TARGET_EFAULT;
1981 ret = do_accept(sockfd, target_addr, target_addrlen);
1983 break;
1984 case SOCKOP_getsockname:
1986 abi_ulong sockfd;
1987 abi_ulong target_addr, target_addrlen;
1989 if (get_user_ual(sockfd, vptr)
1990 || get_user_ual(target_addr, vptr + n)
1991 || get_user_ual(target_addrlen, vptr + 2 * n))
1992 return -TARGET_EFAULT;
1994 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1996 break;
1997 case SOCKOP_getpeername:
1999 abi_ulong sockfd;
2000 abi_ulong target_addr, target_addrlen;
2002 if (get_user_ual(sockfd, vptr)
2003 || get_user_ual(target_addr, vptr + n)
2004 || get_user_ual(target_addrlen, vptr + 2 * n))
2005 return -TARGET_EFAULT;
2007 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2009 break;
2010 case SOCKOP_socketpair:
2012 abi_ulong domain, type, protocol;
2013 abi_ulong tab;
2015 if (get_user_ual(domain, vptr)
2016 || get_user_ual(type, vptr + n)
2017 || get_user_ual(protocol, vptr + 2 * n)
2018 || get_user_ual(tab, vptr + 3 * n))
2019 return -TARGET_EFAULT;
2021 ret = do_socketpair(domain, type, protocol, tab);
2023 break;
2024 case SOCKOP_send:
2026 abi_ulong sockfd;
2027 abi_ulong msg;
2028 size_t len;
2029 abi_ulong flags;
2031 if (get_user_ual(sockfd, vptr)
2032 || get_user_ual(msg, vptr + n)
2033 || get_user_ual(len, vptr + 2 * n)
2034 || get_user_ual(flags, vptr + 3 * n))
2035 return -TARGET_EFAULT;
2037 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2039 break;
2040 case SOCKOP_recv:
2042 abi_ulong sockfd;
2043 abi_ulong msg;
2044 size_t len;
2045 abi_ulong flags;
2047 if (get_user_ual(sockfd, vptr)
2048 || get_user_ual(msg, vptr + n)
2049 || get_user_ual(len, vptr + 2 * n)
2050 || get_user_ual(flags, vptr + 3 * n))
2051 return -TARGET_EFAULT;
2053 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2055 break;
2056 case SOCKOP_sendto:
2058 abi_ulong sockfd;
2059 abi_ulong msg;
2060 size_t len;
2061 abi_ulong flags;
2062 abi_ulong addr;
2063 socklen_t addrlen;
2065 if (get_user_ual(sockfd, vptr)
2066 || get_user_ual(msg, vptr + n)
2067 || get_user_ual(len, vptr + 2 * n)
2068 || get_user_ual(flags, vptr + 3 * n)
2069 || get_user_ual(addr, vptr + 4 * n)
2070 || get_user_ual(addrlen, vptr + 5 * n))
2071 return -TARGET_EFAULT;
2073 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2075 break;
2076 case SOCKOP_recvfrom:
2078 abi_ulong sockfd;
2079 abi_ulong msg;
2080 size_t len;
2081 abi_ulong flags;
2082 abi_ulong addr;
2083 socklen_t addrlen;
2085 if (get_user_ual(sockfd, vptr)
2086 || get_user_ual(msg, vptr + n)
2087 || get_user_ual(len, vptr + 2 * n)
2088 || get_user_ual(flags, vptr + 3 * n)
2089 || get_user_ual(addr, vptr + 4 * n)
2090 || get_user_ual(addrlen, vptr + 5 * n))
2091 return -TARGET_EFAULT;
2093 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2095 break;
2096 case SOCKOP_shutdown:
2098 abi_ulong sockfd, how;
2100 if (get_user_ual(sockfd, vptr)
2101 || get_user_ual(how, vptr + n))
2102 return -TARGET_EFAULT;
2104 ret = get_errno(shutdown(sockfd, how));
2106 break;
2107 case SOCKOP_sendmsg:
2108 case SOCKOP_recvmsg:
2110 abi_ulong fd;
2111 abi_ulong target_msg;
2112 abi_ulong flags;
2114 if (get_user_ual(fd, vptr)
2115 || get_user_ual(target_msg, vptr + n)
2116 || get_user_ual(flags, vptr + 2 * n))
2117 return -TARGET_EFAULT;
2119 ret = do_sendrecvmsg(fd, target_msg, flags,
2120 (num == SOCKOP_sendmsg));
2122 break;
2123 case SOCKOP_setsockopt:
2125 abi_ulong sockfd;
2126 abi_ulong level;
2127 abi_ulong optname;
2128 abi_ulong optval;
2129 socklen_t optlen;
2131 if (get_user_ual(sockfd, vptr)
2132 || get_user_ual(level, vptr + n)
2133 || get_user_ual(optname, vptr + 2 * n)
2134 || get_user_ual(optval, vptr + 3 * n)
2135 || get_user_ual(optlen, vptr + 4 * n))
2136 return -TARGET_EFAULT;
2138 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2140 break;
2141 case SOCKOP_getsockopt:
2143 abi_ulong sockfd;
2144 abi_ulong level;
2145 abi_ulong optname;
2146 abi_ulong optval;
2147 socklen_t optlen;
2149 if (get_user_ual(sockfd, vptr)
2150 || get_user_ual(level, vptr + n)
2151 || get_user_ual(optname, vptr + 2 * n)
2152 || get_user_ual(optval, vptr + 3 * n)
2153 || get_user_ual(optlen, vptr + 4 * n))
2154 return -TARGET_EFAULT;
2156 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2158 break;
2159 default:
2160 gemu_log("Unsupported socketcall: %d\n", num);
2161 ret = -TARGET_ENOSYS;
2162 break;
2164 return ret;
2166 #endif
2168 #define N_SHM_REGIONS 32
2170 static struct shm_region {
2171 abi_ulong start;
2172 abi_ulong size;
2173 } shm_regions[N_SHM_REGIONS];
2175 struct target_ipc_perm
2177 abi_long __key;
2178 abi_ulong uid;
2179 abi_ulong gid;
2180 abi_ulong cuid;
2181 abi_ulong cgid;
2182 unsigned short int mode;
2183 unsigned short int __pad1;
2184 unsigned short int __seq;
2185 unsigned short int __pad2;
2186 abi_ulong __unused1;
2187 abi_ulong __unused2;
2190 struct target_semid_ds
2192 struct target_ipc_perm sem_perm;
2193 abi_ulong sem_otime;
2194 abi_ulong __unused1;
2195 abi_ulong sem_ctime;
2196 abi_ulong __unused2;
2197 abi_ulong sem_nsems;
2198 abi_ulong __unused3;
2199 abi_ulong __unused4;
2202 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2203 abi_ulong target_addr)
2205 struct target_ipc_perm *target_ip;
2206 struct target_semid_ds *target_sd;
2208 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2209 return -TARGET_EFAULT;
2210 target_ip = &(target_sd->sem_perm);
2211 host_ip->__key = tswapl(target_ip->__key);
2212 host_ip->uid = tswapl(target_ip->uid);
2213 host_ip->gid = tswapl(target_ip->gid);
2214 host_ip->cuid = tswapl(target_ip->cuid);
2215 host_ip->cgid = tswapl(target_ip->cgid);
2216 host_ip->mode = tswapl(target_ip->mode);
2217 unlock_user_struct(target_sd, target_addr, 0);
2218 return 0;
2221 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2222 struct ipc_perm *host_ip)
2224 struct target_ipc_perm *target_ip;
2225 struct target_semid_ds *target_sd;
2227 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2228 return -TARGET_EFAULT;
2229 target_ip = &(target_sd->sem_perm);
2230 target_ip->__key = tswapl(host_ip->__key);
2231 target_ip->uid = tswapl(host_ip->uid);
2232 target_ip->gid = tswapl(host_ip->gid);
2233 target_ip->cuid = tswapl(host_ip->cuid);
2234 target_ip->cgid = tswapl(host_ip->cgid);
2235 target_ip->mode = tswapl(host_ip->mode);
2236 unlock_user_struct(target_sd, target_addr, 1);
2237 return 0;
2240 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2241 abi_ulong target_addr)
2243 struct target_semid_ds *target_sd;
2245 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2246 return -TARGET_EFAULT;
2247 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2248 return -TARGET_EFAULT;
2249 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2250 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2251 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2252 unlock_user_struct(target_sd, target_addr, 0);
2253 return 0;
2256 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2257 struct semid_ds *host_sd)
2259 struct target_semid_ds *target_sd;
2261 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2262 return -TARGET_EFAULT;
2263 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2264 return -TARGET_EFAULT;;
2265 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2266 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2267 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2268 unlock_user_struct(target_sd, target_addr, 1);
2269 return 0;
2272 struct target_seminfo {
2273 int semmap;
2274 int semmni;
2275 int semmns;
2276 int semmnu;
2277 int semmsl;
2278 int semopm;
2279 int semume;
2280 int semusz;
2281 int semvmx;
2282 int semaem;
2285 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2286 struct seminfo *host_seminfo)
2288 struct target_seminfo *target_seminfo;
2289 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2290 return -TARGET_EFAULT;
2291 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2292 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2293 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2294 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2295 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2296 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2297 __put_user(host_seminfo->semume, &target_seminfo->semume);
2298 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2299 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2300 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2301 unlock_user_struct(target_seminfo, target_addr, 1);
2302 return 0;
2305 union semun {
2306 int val;
2307 struct semid_ds *buf;
2308 unsigned short *array;
2309 struct seminfo *__buf;
2312 union target_semun {
2313 int val;
2314 abi_ulong buf;
2315 abi_ulong array;
2316 abi_ulong __buf;
2319 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2320 abi_ulong target_addr)
2322 int nsems;
2323 unsigned short *array;
2324 union semun semun;
2325 struct semid_ds semid_ds;
2326 int i, ret;
2328 semun.buf = &semid_ds;
2330 ret = semctl(semid, 0, IPC_STAT, semun);
2331 if (ret == -1)
2332 return get_errno(ret);
2334 nsems = semid_ds.sem_nsems;
2336 *host_array = malloc(nsems*sizeof(unsigned short));
2337 array = lock_user(VERIFY_READ, target_addr,
2338 nsems*sizeof(unsigned short), 1);
2339 if (!array)
2340 return -TARGET_EFAULT;
2342 for(i=0; i<nsems; i++) {
2343 __get_user((*host_array)[i], &array[i]);
2345 unlock_user(array, target_addr, 0);
2347 return 0;
2350 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2351 unsigned short **host_array)
2353 int nsems;
2354 unsigned short *array;
2355 union semun semun;
2356 struct semid_ds semid_ds;
2357 int i, ret;
2359 semun.buf = &semid_ds;
2361 ret = semctl(semid, 0, IPC_STAT, semun);
2362 if (ret == -1)
2363 return get_errno(ret);
2365 nsems = semid_ds.sem_nsems;
2367 array = lock_user(VERIFY_WRITE, target_addr,
2368 nsems*sizeof(unsigned short), 0);
2369 if (!array)
2370 return -TARGET_EFAULT;
2372 for(i=0; i<nsems; i++) {
2373 __put_user((*host_array)[i], &array[i]);
2375 free(*host_array);
2376 unlock_user(array, target_addr, 1);
2378 return 0;
2381 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2382 union target_semun target_su)
2384 union semun arg;
2385 struct semid_ds dsarg;
2386 unsigned short *array = NULL;
2387 struct seminfo seminfo;
2388 abi_long ret = -TARGET_EINVAL;
2389 abi_long err;
2390 cmd &= 0xff;
2392 switch( cmd ) {
2393 case GETVAL:
2394 case SETVAL:
2395 arg.val = tswapl(target_su.val);
2396 ret = get_errno(semctl(semid, semnum, cmd, arg));
2397 target_su.val = tswapl(arg.val);
2398 break;
2399 case GETALL:
2400 case SETALL:
2401 err = target_to_host_semarray(semid, &array, target_su.array);
2402 if (err)
2403 return err;
2404 arg.array = array;
2405 ret = get_errno(semctl(semid, semnum, cmd, arg));
2406 err = host_to_target_semarray(semid, target_su.array, &array);
2407 if (err)
2408 return err;
2409 break;
2410 case IPC_STAT:
2411 case IPC_SET:
2412 case SEM_STAT:
2413 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2414 if (err)
2415 return err;
2416 arg.buf = &dsarg;
2417 ret = get_errno(semctl(semid, semnum, cmd, arg));
2418 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2419 if (err)
2420 return err;
2421 break;
2422 case IPC_INFO:
2423 case SEM_INFO:
2424 arg.__buf = &seminfo;
2425 ret = get_errno(semctl(semid, semnum, cmd, arg));
2426 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2427 if (err)
2428 return err;
2429 break;
2430 case IPC_RMID:
2431 case GETPID:
2432 case GETNCNT:
2433 case GETZCNT:
2434 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2435 break;
2438 return ret;
2441 struct target_sembuf {
2442 unsigned short sem_num;
2443 short sem_op;
2444 short sem_flg;
2447 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2448 abi_ulong target_addr,
2449 unsigned nsops)
2451 struct target_sembuf *target_sembuf;
2452 int i;
2454 target_sembuf = lock_user(VERIFY_READ, target_addr,
2455 nsops*sizeof(struct target_sembuf), 1);
2456 if (!target_sembuf)
2457 return -TARGET_EFAULT;
2459 for(i=0; i<nsops; i++) {
2460 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2461 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2462 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2465 unlock_user(target_sembuf, target_addr, 0);
2467 return 0;
2470 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2472 struct sembuf sops[nsops];
2474 if (target_to_host_sembuf(sops, ptr, nsops))
2475 return -TARGET_EFAULT;
2477 return semop(semid, sops, nsops);
2480 struct target_msqid_ds
2482 struct target_ipc_perm msg_perm;
2483 abi_ulong msg_stime;
2484 #if TARGET_ABI_BITS == 32
2485 abi_ulong __unused1;
2486 #endif
2487 abi_ulong msg_rtime;
2488 #if TARGET_ABI_BITS == 32
2489 abi_ulong __unused2;
2490 #endif
2491 abi_ulong msg_ctime;
2492 #if TARGET_ABI_BITS == 32
2493 abi_ulong __unused3;
2494 #endif
2495 abi_ulong __msg_cbytes;
2496 abi_ulong msg_qnum;
2497 abi_ulong msg_qbytes;
2498 abi_ulong msg_lspid;
2499 abi_ulong msg_lrpid;
2500 abi_ulong __unused4;
2501 abi_ulong __unused5;
2504 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2505 abi_ulong target_addr)
2507 struct target_msqid_ds *target_md;
2509 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2510 return -TARGET_EFAULT;
2511 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2512 return -TARGET_EFAULT;
2513 host_md->msg_stime = tswapl(target_md->msg_stime);
2514 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2515 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2516 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2517 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2518 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2519 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2520 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2521 unlock_user_struct(target_md, target_addr, 0);
2522 return 0;
2525 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2526 struct msqid_ds *host_md)
2528 struct target_msqid_ds *target_md;
2530 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2531 return -TARGET_EFAULT;
2532 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2533 return -TARGET_EFAULT;
2534 target_md->msg_stime = tswapl(host_md->msg_stime);
2535 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2536 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2537 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2538 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2539 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2540 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2541 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2542 unlock_user_struct(target_md, target_addr, 1);
2543 return 0;
2546 struct target_msginfo {
2547 int msgpool;
2548 int msgmap;
2549 int msgmax;
2550 int msgmnb;
2551 int msgmni;
2552 int msgssz;
2553 int msgtql;
2554 unsigned short int msgseg;
2557 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2558 struct msginfo *host_msginfo)
2560 struct target_msginfo *target_msginfo;
2561 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2562 return -TARGET_EFAULT;
2563 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2564 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2565 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2566 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2567 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2568 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2569 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2570 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2571 unlock_user_struct(target_msginfo, target_addr, 1);
2572 return 0;
2575 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2577 struct msqid_ds dsarg;
2578 struct msginfo msginfo;
2579 abi_long ret = -TARGET_EINVAL;
2581 cmd &= 0xff;
2583 switch (cmd) {
2584 case IPC_STAT:
2585 case IPC_SET:
2586 case MSG_STAT:
2587 if (target_to_host_msqid_ds(&dsarg,ptr))
2588 return -TARGET_EFAULT;
2589 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2590 if (host_to_target_msqid_ds(ptr,&dsarg))
2591 return -TARGET_EFAULT;
2592 break;
2593 case IPC_RMID:
2594 ret = get_errno(msgctl(msgid, cmd, NULL));
2595 break;
2596 case IPC_INFO:
2597 case MSG_INFO:
2598 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2599 if (host_to_target_msginfo(ptr, &msginfo))
2600 return -TARGET_EFAULT;
2601 break;
2604 return ret;
2607 struct target_msgbuf {
2608 abi_long mtype;
2609 char mtext[1];
2612 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2613 unsigned int msgsz, int msgflg)
2615 struct target_msgbuf *target_mb;
2616 struct msgbuf *host_mb;
2617 abi_long ret = 0;
2619 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2620 return -TARGET_EFAULT;
2621 host_mb = malloc(msgsz+sizeof(long));
2622 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2623 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2624 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2625 free(host_mb);
2626 unlock_user_struct(target_mb, msgp, 0);
2628 return ret;
2631 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2632 unsigned int msgsz, abi_long msgtyp,
2633 int msgflg)
2635 struct target_msgbuf *target_mb;
2636 char *target_mtext;
2637 struct msgbuf *host_mb;
2638 abi_long ret = 0;
2640 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2641 return -TARGET_EFAULT;
2643 host_mb = malloc(msgsz+sizeof(long));
2644 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2646 if (ret > 0) {
2647 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2648 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2649 if (!target_mtext) {
2650 ret = -TARGET_EFAULT;
2651 goto end;
2653 memcpy(target_mb->mtext, host_mb->mtext, ret);
2654 unlock_user(target_mtext, target_mtext_addr, ret);
2657 target_mb->mtype = tswapl(host_mb->mtype);
2658 free(host_mb);
2660 end:
2661 if (target_mb)
2662 unlock_user_struct(target_mb, msgp, 1);
2663 return ret;
2666 struct target_shmid_ds
2668 struct target_ipc_perm shm_perm;
2669 abi_ulong shm_segsz;
2670 abi_ulong shm_atime;
2671 #if TARGET_ABI_BITS == 32
2672 abi_ulong __unused1;
2673 #endif
2674 abi_ulong shm_dtime;
2675 #if TARGET_ABI_BITS == 32
2676 abi_ulong __unused2;
2677 #endif
2678 abi_ulong shm_ctime;
2679 #if TARGET_ABI_BITS == 32
2680 abi_ulong __unused3;
2681 #endif
2682 int shm_cpid;
2683 int shm_lpid;
2684 abi_ulong shm_nattch;
2685 unsigned long int __unused4;
2686 unsigned long int __unused5;
2689 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2690 abi_ulong target_addr)
2692 struct target_shmid_ds *target_sd;
2694 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2695 return -TARGET_EFAULT;
2696 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2697 return -TARGET_EFAULT;
2698 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2699 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2700 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2701 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2702 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2703 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2704 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2705 unlock_user_struct(target_sd, target_addr, 0);
2706 return 0;
2709 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2710 struct shmid_ds *host_sd)
2712 struct target_shmid_ds *target_sd;
2714 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2715 return -TARGET_EFAULT;
2716 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2717 return -TARGET_EFAULT;
2718 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2719 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2720 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2721 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2722 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2723 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2724 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2725 unlock_user_struct(target_sd, target_addr, 1);
2726 return 0;
2729 struct target_shminfo {
2730 abi_ulong shmmax;
2731 abi_ulong shmmin;
2732 abi_ulong shmmni;
2733 abi_ulong shmseg;
2734 abi_ulong shmall;
2737 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2738 struct shminfo *host_shminfo)
2740 struct target_shminfo *target_shminfo;
2741 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2742 return -TARGET_EFAULT;
2743 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2744 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2745 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2746 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2747 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2748 unlock_user_struct(target_shminfo, target_addr, 1);
2749 return 0;
2752 struct target_shm_info {
2753 int used_ids;
2754 abi_ulong shm_tot;
2755 abi_ulong shm_rss;
2756 abi_ulong shm_swp;
2757 abi_ulong swap_attempts;
2758 abi_ulong swap_successes;
2761 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2762 struct shm_info *host_shm_info)
2764 struct target_shm_info *target_shm_info;
2765 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2766 return -TARGET_EFAULT;
2767 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2768 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2769 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2770 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2771 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2772 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2773 unlock_user_struct(target_shm_info, target_addr, 1);
2774 return 0;
2777 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2779 struct shmid_ds dsarg;
2780 struct shminfo shminfo;
2781 struct shm_info shm_info;
2782 abi_long ret = -TARGET_EINVAL;
2784 cmd &= 0xff;
2786 switch(cmd) {
2787 case IPC_STAT:
2788 case IPC_SET:
2789 case SHM_STAT:
2790 if (target_to_host_shmid_ds(&dsarg, buf))
2791 return -TARGET_EFAULT;
2792 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2793 if (host_to_target_shmid_ds(buf, &dsarg))
2794 return -TARGET_EFAULT;
2795 break;
2796 case IPC_INFO:
2797 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2798 if (host_to_target_shminfo(buf, &shminfo))
2799 return -TARGET_EFAULT;
2800 break;
2801 case SHM_INFO:
2802 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2803 if (host_to_target_shm_info(buf, &shm_info))
2804 return -TARGET_EFAULT;
2805 break;
2806 case IPC_RMID:
2807 case SHM_LOCK:
2808 case SHM_UNLOCK:
2809 ret = get_errno(shmctl(shmid, cmd, NULL));
2810 break;
2813 return ret;
2816 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2818 abi_long raddr;
2819 void *host_raddr;
2820 struct shmid_ds shm_info;
2821 int i,ret;
2823 /* find out the length of the shared memory segment */
2824 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2825 if (is_error(ret)) {
2826 /* can't get length, bail out */
2827 return ret;
2830 mmap_lock();
2832 if (shmaddr)
2833 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2834 else {
2835 abi_ulong mmap_start;
2837 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2839 if (mmap_start == -1) {
2840 errno = ENOMEM;
2841 host_raddr = (void *)-1;
2842 } else
2843 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2846 if (host_raddr == (void *)-1) {
2847 mmap_unlock();
2848 return get_errno((long)host_raddr);
2850 raddr=h2g((unsigned long)host_raddr);
2852 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2853 PAGE_VALID | PAGE_READ |
2854 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2856 for (i = 0; i < N_SHM_REGIONS; i++) {
2857 if (shm_regions[i].start == 0) {
2858 shm_regions[i].start = raddr;
2859 shm_regions[i].size = shm_info.shm_segsz;
2860 break;
2864 mmap_unlock();
2865 return raddr;
2869 static inline abi_long do_shmdt(abi_ulong shmaddr)
2871 int i;
2873 for (i = 0; i < N_SHM_REGIONS; ++i) {
2874 if (shm_regions[i].start == shmaddr) {
2875 shm_regions[i].start = 0;
2876 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2877 break;
2881 return get_errno(shmdt(g2h(shmaddr)));
2884 #ifdef TARGET_NR_ipc
2885 /* ??? This only works with linear mappings. */
2886 /* do_ipc() must return target values and target errnos. */
2887 static abi_long do_ipc(unsigned int call, int first,
2888 int second, int third,
2889 abi_long ptr, abi_long fifth)
2891 int version;
2892 abi_long ret = 0;
2894 version = call >> 16;
2895 call &= 0xffff;
2897 switch (call) {
2898 case IPCOP_semop:
2899 ret = do_semop(first, ptr, second);
2900 break;
2902 case IPCOP_semget:
2903 ret = get_errno(semget(first, second, third));
2904 break;
2906 case IPCOP_semctl:
2907 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2908 break;
2910 case IPCOP_msgget:
2911 ret = get_errno(msgget(first, second));
2912 break;
2914 case IPCOP_msgsnd:
2915 ret = do_msgsnd(first, ptr, second, third);
2916 break;
2918 case IPCOP_msgctl:
2919 ret = do_msgctl(first, second, ptr);
2920 break;
2922 case IPCOP_msgrcv:
2923 switch (version) {
2924 case 0:
2926 struct target_ipc_kludge {
2927 abi_long msgp;
2928 abi_long msgtyp;
2929 } *tmp;
2931 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2932 ret = -TARGET_EFAULT;
2933 break;
2936 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2938 unlock_user_struct(tmp, ptr, 0);
2939 break;
2941 default:
2942 ret = do_msgrcv(first, ptr, second, fifth, third);
2944 break;
2946 case IPCOP_shmat:
2947 switch (version) {
2948 default:
2950 abi_ulong raddr;
2951 raddr = do_shmat(first, ptr, second);
2952 if (is_error(raddr))
2953 return get_errno(raddr);
2954 if (put_user_ual(raddr, third))
2955 return -TARGET_EFAULT;
2956 break;
2958 case 1:
2959 ret = -TARGET_EINVAL;
2960 break;
2962 break;
2963 case IPCOP_shmdt:
2964 ret = do_shmdt(ptr);
2965 break;
2967 case IPCOP_shmget:
2968 /* IPC_* flag values are the same on all linux platforms */
2969 ret = get_errno(shmget(first, second, third));
2970 break;
2972 /* IPC_* and SHM_* command values are the same on all linux platforms */
2973 case IPCOP_shmctl:
2974 ret = do_shmctl(first, second, third);
2975 break;
2976 default:
2977 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2978 ret = -TARGET_ENOSYS;
2979 break;
2981 return ret;
2983 #endif
2985 /* kernel structure types definitions */
2987 #define STRUCT(name, ...) STRUCT_ ## name,
2988 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2989 enum {
2990 #include "syscall_types.h"
2992 #undef STRUCT
2993 #undef STRUCT_SPECIAL
2995 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2996 #define STRUCT_SPECIAL(name)
2997 #include "syscall_types.h"
2998 #undef STRUCT
2999 #undef STRUCT_SPECIAL
3001 typedef struct IOCTLEntry IOCTLEntry;
3003 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3004 int fd, abi_long cmd, abi_long arg);
3006 struct IOCTLEntry {
3007 unsigned int target_cmd;
3008 unsigned int host_cmd;
3009 const char *name;
3010 int access;
3011 do_ioctl_fn *do_ioctl;
3012 const argtype arg_type[5];
3015 #define IOC_R 0x0001
3016 #define IOC_W 0x0002
3017 #define IOC_RW (IOC_R | IOC_W)
3019 #define MAX_STRUCT_SIZE 4096
3021 #ifdef CONFIG_FIEMAP
3022 /* So fiemap access checks don't overflow on 32 bit systems.
3023 * This is very slightly smaller than the limit imposed by
3024 * the underlying kernel.
3026 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3027 / sizeof(struct fiemap_extent))
3029 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3030 int fd, abi_long cmd, abi_long arg)
3032 /* The parameter for this ioctl is a struct fiemap followed
3033 * by an array of struct fiemap_extent whose size is set
3034 * in fiemap->fm_extent_count. The array is filled in by the
3035 * ioctl.
3037 int target_size_in, target_size_out;
3038 struct fiemap *fm;
3039 const argtype *arg_type = ie->arg_type;
3040 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3041 void *argptr, *p;
3042 abi_long ret;
3043 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3044 uint32_t outbufsz;
3045 int free_fm = 0;
3047 assert(arg_type[0] == TYPE_PTR);
3048 assert(ie->access == IOC_RW);
3049 arg_type++;
3050 target_size_in = thunk_type_size(arg_type, 0);
3051 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3052 if (!argptr) {
3053 return -TARGET_EFAULT;
3055 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3056 unlock_user(argptr, arg, 0);
3057 fm = (struct fiemap *)buf_temp;
3058 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3059 return -TARGET_EINVAL;
3062 outbufsz = sizeof (*fm) +
3063 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3065 if (outbufsz > MAX_STRUCT_SIZE) {
3066 /* We can't fit all the extents into the fixed size buffer.
3067 * Allocate one that is large enough and use it instead.
3069 fm = malloc(outbufsz);
3070 if (!fm) {
3071 return -TARGET_ENOMEM;
3073 memcpy(fm, buf_temp, sizeof(struct fiemap));
3074 free_fm = 1;
3076 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3077 if (!is_error(ret)) {
3078 target_size_out = target_size_in;
3079 /* An extent_count of 0 means we were only counting the extents
3080 * so there are no structs to copy
3082 if (fm->fm_extent_count != 0) {
3083 target_size_out += fm->fm_mapped_extents * extent_size;
3085 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3086 if (!argptr) {
3087 ret = -TARGET_EFAULT;
3088 } else {
3089 /* Convert the struct fiemap */
3090 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3091 if (fm->fm_extent_count != 0) {
3092 p = argptr + target_size_in;
3093 /* ...and then all the struct fiemap_extents */
3094 for (i = 0; i < fm->fm_mapped_extents; i++) {
3095 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3096 THUNK_TARGET);
3097 p += extent_size;
3100 unlock_user(argptr, arg, target_size_out);
3103 if (free_fm) {
3104 free(fm);
3106 return ret;
3108 #endif
3110 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3111 int fd, abi_long cmd, abi_long arg)
3113 const argtype *arg_type = ie->arg_type;
3114 int target_size;
3115 void *argptr;
3116 int ret;
3117 struct ifconf *host_ifconf;
3118 uint32_t outbufsz;
3119 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3120 int target_ifreq_size;
3121 int nb_ifreq;
3122 int free_buf = 0;
3123 int i;
3124 int target_ifc_len;
3125 abi_long target_ifc_buf;
3126 int host_ifc_len;
3127 char *host_ifc_buf;
3129 assert(arg_type[0] == TYPE_PTR);
3130 assert(ie->access == IOC_RW);
3132 arg_type++;
3133 target_size = thunk_type_size(arg_type, 0);
3135 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3136 if (!argptr)
3137 return -TARGET_EFAULT;
3138 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3139 unlock_user(argptr, arg, 0);
3141 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3142 target_ifc_len = host_ifconf->ifc_len;
3143 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3145 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3146 nb_ifreq = target_ifc_len / target_ifreq_size;
3147 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3149 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3150 if (outbufsz > MAX_STRUCT_SIZE) {
3151 /* We can't fit all the extents into the fixed size buffer.
3152 * Allocate one that is large enough and use it instead.
3154 host_ifconf = malloc(outbufsz);
3155 if (!host_ifconf) {
3156 return -TARGET_ENOMEM;
3158 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3159 free_buf = 1;
3161 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3163 host_ifconf->ifc_len = host_ifc_len;
3164 host_ifconf->ifc_buf = host_ifc_buf;
3166 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3167 if (!is_error(ret)) {
3168 /* convert host ifc_len to target ifc_len */
3170 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3171 target_ifc_len = nb_ifreq * target_ifreq_size;
3172 host_ifconf->ifc_len = target_ifc_len;
3174 /* restore target ifc_buf */
3176 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3178 /* copy struct ifconf to target user */
3180 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3181 if (!argptr)
3182 return -TARGET_EFAULT;
3183 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3184 unlock_user(argptr, arg, target_size);
3186 /* copy ifreq[] to target user */
3188 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3189 for (i = 0; i < nb_ifreq ; i++) {
3190 thunk_convert(argptr + i * target_ifreq_size,
3191 host_ifc_buf + i * sizeof(struct ifreq),
3192 ifreq_arg_type, THUNK_TARGET);
3194 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3197 if (free_buf) {
3198 free(host_ifconf);
3201 return ret;
3204 static IOCTLEntry ioctl_entries[] = {
3205 #define IOCTL(cmd, access, ...) \
3206 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3207 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3208 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3209 #include "ioctls.h"
3210 { 0, 0, },
3213 /* ??? Implement proper locking for ioctls. */
3214 /* do_ioctl() Must return target values and target errnos. */
3215 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3217 const IOCTLEntry *ie;
3218 const argtype *arg_type;
3219 abi_long ret;
3220 uint8_t buf_temp[MAX_STRUCT_SIZE];
3221 int target_size;
3222 void *argptr;
3224 ie = ioctl_entries;
3225 for(;;) {
3226 if (ie->target_cmd == 0) {
3227 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3228 return -TARGET_ENOSYS;
3230 if (ie->target_cmd == cmd)
3231 break;
3232 ie++;
3234 arg_type = ie->arg_type;
3235 #if defined(DEBUG)
3236 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3237 #endif
3238 if (ie->do_ioctl) {
3239 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3242 switch(arg_type[0]) {
3243 case TYPE_NULL:
3244 /* no argument */
3245 ret = get_errno(ioctl(fd, ie->host_cmd));
3246 break;
3247 case TYPE_PTRVOID:
3248 case TYPE_INT:
3249 /* int argment */
3250 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3251 break;
3252 case TYPE_PTR:
3253 arg_type++;
3254 target_size = thunk_type_size(arg_type, 0);
3255 switch(ie->access) {
3256 case IOC_R:
3257 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3258 if (!is_error(ret)) {
3259 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3260 if (!argptr)
3261 return -TARGET_EFAULT;
3262 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3263 unlock_user(argptr, arg, target_size);
3265 break;
3266 case IOC_W:
3267 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3268 if (!argptr)
3269 return -TARGET_EFAULT;
3270 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3271 unlock_user(argptr, arg, 0);
3272 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3273 break;
3274 default:
3275 case IOC_RW:
3276 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3277 if (!argptr)
3278 return -TARGET_EFAULT;
3279 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3280 unlock_user(argptr, arg, 0);
3281 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3282 if (!is_error(ret)) {
3283 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3284 if (!argptr)
3285 return -TARGET_EFAULT;
3286 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3287 unlock_user(argptr, arg, target_size);
3289 break;
3291 break;
3292 default:
3293 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3294 (long)cmd, arg_type[0]);
3295 ret = -TARGET_ENOSYS;
3296 break;
3298 return ret;
3301 static const bitmask_transtbl iflag_tbl[] = {
3302 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3303 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3304 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3305 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3306 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3307 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3308 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3309 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3310 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3311 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3312 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3313 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3314 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3315 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3316 { 0, 0, 0, 0 }
3319 static const bitmask_transtbl oflag_tbl[] = {
3320 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3321 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3322 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3323 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3324 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3325 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3326 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3327 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3328 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3329 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3330 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3331 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3332 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3333 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3334 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3335 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3336 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3337 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3338 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3339 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3340 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3341 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3342 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3343 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3344 { 0, 0, 0, 0 }
3347 static const bitmask_transtbl cflag_tbl[] = {
3348 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3349 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3350 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3351 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3352 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3353 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3354 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3355 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3356 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3357 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3358 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3359 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3360 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3361 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3362 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3363 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3364 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3365 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3366 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3367 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3368 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3369 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3370 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3371 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3372 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3373 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3374 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3375 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3376 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3377 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3378 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3379 { 0, 0, 0, 0 }
3382 static const bitmask_transtbl lflag_tbl[] = {
3383 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3384 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3385 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3386 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3387 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3388 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3389 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3390 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3391 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3392 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3393 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3394 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3395 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3396 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3397 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3398 { 0, 0, 0, 0 }
3401 static void target_to_host_termios (void *dst, const void *src)
3403 struct host_termios *host = dst;
3404 const struct target_termios *target = src;
3406 host->c_iflag =
3407 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3408 host->c_oflag =
3409 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3410 host->c_cflag =
3411 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3412 host->c_lflag =
3413 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3414 host->c_line = target->c_line;
3416 memset(host->c_cc, 0, sizeof(host->c_cc));
3417 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3418 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3419 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3420 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3421 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3422 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3423 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3424 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3425 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3426 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3427 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3428 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3429 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3430 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3431 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3432 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3433 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3436 static void host_to_target_termios (void *dst, const void *src)
3438 struct target_termios *target = dst;
3439 const struct host_termios *host = src;
3441 target->c_iflag =
3442 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3443 target->c_oflag =
3444 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3445 target->c_cflag =
3446 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3447 target->c_lflag =
3448 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3449 target->c_line = host->c_line;
3451 memset(target->c_cc, 0, sizeof(target->c_cc));
3452 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3453 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3454 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3455 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3456 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3457 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3458 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3459 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3460 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3461 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3462 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3463 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3464 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3465 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3466 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3467 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3468 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3471 static const StructEntry struct_termios_def = {
3472 .convert = { host_to_target_termios, target_to_host_termios },
3473 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3474 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3477 static bitmask_transtbl mmap_flags_tbl[] = {
3478 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3479 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3480 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3481 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3482 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3483 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3484 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3485 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3486 { 0, 0, 0, 0 }
3489 #if defined(TARGET_I386)
3491 /* NOTE: there is really one LDT for all the threads */
3492 static uint8_t *ldt_table;
3494 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3496 int size;
3497 void *p;
3499 if (!ldt_table)
3500 return 0;
3501 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3502 if (size > bytecount)
3503 size = bytecount;
3504 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3505 if (!p)
3506 return -TARGET_EFAULT;
3507 /* ??? Should this by byteswapped? */
3508 memcpy(p, ldt_table, size);
3509 unlock_user(p, ptr, size);
3510 return size;
3513 /* XXX: add locking support */
3514 static abi_long write_ldt(CPUX86State *env,
3515 abi_ulong ptr, unsigned long bytecount, int oldmode)
3517 struct target_modify_ldt_ldt_s ldt_info;
3518 struct target_modify_ldt_ldt_s *target_ldt_info;
3519 int seg_32bit, contents, read_exec_only, limit_in_pages;
3520 int seg_not_present, useable, lm;
3521 uint32_t *lp, entry_1, entry_2;
3523 if (bytecount != sizeof(ldt_info))
3524 return -TARGET_EINVAL;
3525 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3526 return -TARGET_EFAULT;
3527 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3528 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3529 ldt_info.limit = tswap32(target_ldt_info->limit);
3530 ldt_info.flags = tswap32(target_ldt_info->flags);
3531 unlock_user_struct(target_ldt_info, ptr, 0);
3533 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3534 return -TARGET_EINVAL;
3535 seg_32bit = ldt_info.flags & 1;
3536 contents = (ldt_info.flags >> 1) & 3;
3537 read_exec_only = (ldt_info.flags >> 3) & 1;
3538 limit_in_pages = (ldt_info.flags >> 4) & 1;
3539 seg_not_present = (ldt_info.flags >> 5) & 1;
3540 useable = (ldt_info.flags >> 6) & 1;
3541 #ifdef TARGET_ABI32
3542 lm = 0;
3543 #else
3544 lm = (ldt_info.flags >> 7) & 1;
3545 #endif
3546 if (contents == 3) {
3547 if (oldmode)
3548 return -TARGET_EINVAL;
3549 if (seg_not_present == 0)
3550 return -TARGET_EINVAL;
3552 /* allocate the LDT */
3553 if (!ldt_table) {
3554 env->ldt.base = target_mmap(0,
3555 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3556 PROT_READ|PROT_WRITE,
3557 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3558 if (env->ldt.base == -1)
3559 return -TARGET_ENOMEM;
3560 memset(g2h(env->ldt.base), 0,
3561 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3562 env->ldt.limit = 0xffff;
3563 ldt_table = g2h(env->ldt.base);
3566 /* NOTE: same code as Linux kernel */
3567 /* Allow LDTs to be cleared by the user. */
3568 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3569 if (oldmode ||
3570 (contents == 0 &&
3571 read_exec_only == 1 &&
3572 seg_32bit == 0 &&
3573 limit_in_pages == 0 &&
3574 seg_not_present == 1 &&
3575 useable == 0 )) {
3576 entry_1 = 0;
3577 entry_2 = 0;
3578 goto install;
3582 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3583 (ldt_info.limit & 0x0ffff);
3584 entry_2 = (ldt_info.base_addr & 0xff000000) |
3585 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3586 (ldt_info.limit & 0xf0000) |
3587 ((read_exec_only ^ 1) << 9) |
3588 (contents << 10) |
3589 ((seg_not_present ^ 1) << 15) |
3590 (seg_32bit << 22) |
3591 (limit_in_pages << 23) |
3592 (lm << 21) |
3593 0x7000;
3594 if (!oldmode)
3595 entry_2 |= (useable << 20);
3597 /* Install the new entry ... */
3598 install:
3599 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3600 lp[0] = tswap32(entry_1);
3601 lp[1] = tswap32(entry_2);
3602 return 0;
3605 /* specific and weird i386 syscalls */
3606 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3607 unsigned long bytecount)
3609 abi_long ret;
3611 switch (func) {
3612 case 0:
3613 ret = read_ldt(ptr, bytecount);
3614 break;
3615 case 1:
3616 ret = write_ldt(env, ptr, bytecount, 1);
3617 break;
3618 case 0x11:
3619 ret = write_ldt(env, ptr, bytecount, 0);
3620 break;
3621 default:
3622 ret = -TARGET_ENOSYS;
3623 break;
3625 return ret;
3628 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3629 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3631 uint64_t *gdt_table = g2h(env->gdt.base);
3632 struct target_modify_ldt_ldt_s ldt_info;
3633 struct target_modify_ldt_ldt_s *target_ldt_info;
3634 int seg_32bit, contents, read_exec_only, limit_in_pages;
3635 int seg_not_present, useable, lm;
3636 uint32_t *lp, entry_1, entry_2;
3637 int i;
3639 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3640 if (!target_ldt_info)
3641 return -TARGET_EFAULT;
3642 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3643 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3644 ldt_info.limit = tswap32(target_ldt_info->limit);
3645 ldt_info.flags = tswap32(target_ldt_info->flags);
3646 if (ldt_info.entry_number == -1) {
3647 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3648 if (gdt_table[i] == 0) {
3649 ldt_info.entry_number = i;
3650 target_ldt_info->entry_number = tswap32(i);
3651 break;
3655 unlock_user_struct(target_ldt_info, ptr, 1);
3657 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3658 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3659 return -TARGET_EINVAL;
3660 seg_32bit = ldt_info.flags & 1;
3661 contents = (ldt_info.flags >> 1) & 3;
3662 read_exec_only = (ldt_info.flags >> 3) & 1;
3663 limit_in_pages = (ldt_info.flags >> 4) & 1;
3664 seg_not_present = (ldt_info.flags >> 5) & 1;
3665 useable = (ldt_info.flags >> 6) & 1;
3666 #ifdef TARGET_ABI32
3667 lm = 0;
3668 #else
3669 lm = (ldt_info.flags >> 7) & 1;
3670 #endif
3672 if (contents == 3) {
3673 if (seg_not_present == 0)
3674 return -TARGET_EINVAL;
3677 /* NOTE: same code as Linux kernel */
3678 /* Allow LDTs to be cleared by the user. */
3679 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3680 if ((contents == 0 &&
3681 read_exec_only == 1 &&
3682 seg_32bit == 0 &&
3683 limit_in_pages == 0 &&
3684 seg_not_present == 1 &&
3685 useable == 0 )) {
3686 entry_1 = 0;
3687 entry_2 = 0;
3688 goto install;
3692 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3693 (ldt_info.limit & 0x0ffff);
3694 entry_2 = (ldt_info.base_addr & 0xff000000) |
3695 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3696 (ldt_info.limit & 0xf0000) |
3697 ((read_exec_only ^ 1) << 9) |
3698 (contents << 10) |
3699 ((seg_not_present ^ 1) << 15) |
3700 (seg_32bit << 22) |
3701 (limit_in_pages << 23) |
3702 (useable << 20) |
3703 (lm << 21) |
3704 0x7000;
3706 /* Install the new entry ... */
3707 install:
3708 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3709 lp[0] = tswap32(entry_1);
3710 lp[1] = tswap32(entry_2);
3711 return 0;
3714 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3716 struct target_modify_ldt_ldt_s *target_ldt_info;
3717 uint64_t *gdt_table = g2h(env->gdt.base);
3718 uint32_t base_addr, limit, flags;
3719 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3720 int seg_not_present, useable, lm;
3721 uint32_t *lp, entry_1, entry_2;
3723 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3724 if (!target_ldt_info)
3725 return -TARGET_EFAULT;
3726 idx = tswap32(target_ldt_info->entry_number);
3727 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3728 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3729 unlock_user_struct(target_ldt_info, ptr, 1);
3730 return -TARGET_EINVAL;
3732 lp = (uint32_t *)(gdt_table + idx);
3733 entry_1 = tswap32(lp[0]);
3734 entry_2 = tswap32(lp[1]);
3736 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3737 contents = (entry_2 >> 10) & 3;
3738 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3739 seg_32bit = (entry_2 >> 22) & 1;
3740 limit_in_pages = (entry_2 >> 23) & 1;
3741 useable = (entry_2 >> 20) & 1;
3742 #ifdef TARGET_ABI32
3743 lm = 0;
3744 #else
3745 lm = (entry_2 >> 21) & 1;
3746 #endif
3747 flags = (seg_32bit << 0) | (contents << 1) |
3748 (read_exec_only << 3) | (limit_in_pages << 4) |
3749 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3750 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3751 base_addr = (entry_1 >> 16) |
3752 (entry_2 & 0xff000000) |
3753 ((entry_2 & 0xff) << 16);
3754 target_ldt_info->base_addr = tswapl(base_addr);
3755 target_ldt_info->limit = tswap32(limit);
3756 target_ldt_info->flags = tswap32(flags);
3757 unlock_user_struct(target_ldt_info, ptr, 1);
3758 return 0;
3760 #endif /* TARGET_I386 && TARGET_ABI32 */
3762 #ifndef TARGET_ABI32
3763 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3765 abi_long ret;
3766 abi_ulong val;
3767 int idx;
3769 switch(code) {
3770 case TARGET_ARCH_SET_GS:
3771 case TARGET_ARCH_SET_FS:
3772 if (code == TARGET_ARCH_SET_GS)
3773 idx = R_GS;
3774 else
3775 idx = R_FS;
3776 cpu_x86_load_seg(env, idx, 0);
3777 env->segs[idx].base = addr;
3778 break;
3779 case TARGET_ARCH_GET_GS:
3780 case TARGET_ARCH_GET_FS:
3781 if (code == TARGET_ARCH_GET_GS)
3782 idx = R_GS;
3783 else
3784 idx = R_FS;
3785 val = env->segs[idx].base;
3786 if (put_user(val, addr, abi_ulong))
3787 return -TARGET_EFAULT;
3788 break;
3789 default:
3790 ret = -TARGET_EINVAL;
3791 break;
3793 return 0;
3795 #endif
3797 #endif /* defined(TARGET_I386) */
3799 #define NEW_STACK_SIZE 0x40000
3801 #if defined(CONFIG_USE_NPTL)
3803 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3804 typedef struct {
3805 CPUState *env;
3806 pthread_mutex_t mutex;
3807 pthread_cond_t cond;
3808 pthread_t thread;
3809 uint32_t tid;
3810 abi_ulong child_tidptr;
3811 abi_ulong parent_tidptr;
3812 sigset_t sigmask;
3813 } new_thread_info;
3815 static void *clone_func(void *arg)
3817 new_thread_info *info = arg;
3818 CPUState *env;
3819 TaskState *ts;
3821 env = info->env;
3822 thread_env = env;
3823 ts = (TaskState *)thread_env->opaque;
3824 info->tid = gettid();
3825 env->host_tid = info->tid;
3826 task_settid(ts);
3827 if (info->child_tidptr)
3828 put_user_u32(info->tid, info->child_tidptr);
3829 if (info->parent_tidptr)
3830 put_user_u32(info->tid, info->parent_tidptr);
3831 /* Enable signals. */
3832 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3833 /* Signal to the parent that we're ready. */
3834 pthread_mutex_lock(&info->mutex);
3835 pthread_cond_broadcast(&info->cond);
3836 pthread_mutex_unlock(&info->mutex);
3837 /* Wait until the parent has finshed initializing the tls state. */
3838 pthread_mutex_lock(&clone_lock);
3839 pthread_mutex_unlock(&clone_lock);
3840 cpu_loop(env);
3841 /* never exits */
3842 return NULL;
3844 #else
3846 static int clone_func(void *arg)
3848 CPUState *env = arg;
3849 cpu_loop(env);
3850 /* never exits */
3851 return 0;
3853 #endif
3855 /* do_fork() Must return host values and target errnos (unlike most
3856 do_*() functions). */
3857 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3858 abi_ulong parent_tidptr, target_ulong newtls,
3859 abi_ulong child_tidptr)
3861 int ret;
3862 TaskState *ts;
3863 CPUState *new_env;
3864 #if defined(CONFIG_USE_NPTL)
3865 unsigned int nptl_flags;
3866 sigset_t sigmask;
3867 #else
3868 uint8_t *new_stack;
3869 #endif
3871 /* Emulate vfork() with fork() */
3872 if (flags & CLONE_VFORK)
3873 flags &= ~(CLONE_VFORK | CLONE_VM);
3875 if (flags & CLONE_VM) {
3876 TaskState *parent_ts = (TaskState *)env->opaque;
3877 #if defined(CONFIG_USE_NPTL)
3878 new_thread_info info;
3879 pthread_attr_t attr;
3880 #endif
3881 ts = qemu_mallocz(sizeof(TaskState));
3882 init_task_state(ts);
3883 /* we create a new CPU instance. */
3884 new_env = cpu_copy(env);
3885 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3886 cpu_reset(new_env);
3887 #endif
3888 /* Init regs that differ from the parent. */
3889 cpu_clone_regs(new_env, newsp);
3890 new_env->opaque = ts;
3891 ts->bprm = parent_ts->bprm;
3892 ts->info = parent_ts->info;
3893 #if defined(CONFIG_USE_NPTL)
3894 nptl_flags = flags;
3895 flags &= ~CLONE_NPTL_FLAGS2;
3897 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3898 ts->child_tidptr = child_tidptr;
3901 if (nptl_flags & CLONE_SETTLS)
3902 cpu_set_tls (new_env, newtls);
3904 /* Grab a mutex so that thread setup appears atomic. */
3905 pthread_mutex_lock(&clone_lock);
3907 memset(&info, 0, sizeof(info));
3908 pthread_mutex_init(&info.mutex, NULL);
3909 pthread_mutex_lock(&info.mutex);
3910 pthread_cond_init(&info.cond, NULL);
3911 info.env = new_env;
3912 if (nptl_flags & CLONE_CHILD_SETTID)
3913 info.child_tidptr = child_tidptr;
3914 if (nptl_flags & CLONE_PARENT_SETTID)
3915 info.parent_tidptr = parent_tidptr;
3917 ret = pthread_attr_init(&attr);
3918 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
3919 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3920 /* It is not safe to deliver signals until the child has finished
3921 initializing, so temporarily block all signals. */
3922 sigfillset(&sigmask);
3923 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3925 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3926 /* TODO: Free new CPU state if thread creation failed. */
3928 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3929 pthread_attr_destroy(&attr);
3930 if (ret == 0) {
3931 /* Wait for the child to initialize. */
3932 pthread_cond_wait(&info.cond, &info.mutex);
3933 ret = info.tid;
3934 if (flags & CLONE_PARENT_SETTID)
3935 put_user_u32(ret, parent_tidptr);
3936 } else {
3937 ret = -1;
3939 pthread_mutex_unlock(&info.mutex);
3940 pthread_cond_destroy(&info.cond);
3941 pthread_mutex_destroy(&info.mutex);
3942 pthread_mutex_unlock(&clone_lock);
3943 #else
3944 if (flags & CLONE_NPTL_FLAGS2)
3945 return -EINVAL;
3946 /* This is probably going to die very quickly, but do it anyway. */
3947 new_stack = qemu_mallocz (NEW_STACK_SIZE);
3948 #ifdef __ia64__
3949 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
3950 #else
3951 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3952 #endif
3953 #endif
3954 } else {
3955 /* if no CLONE_VM, we consider it is a fork */
3956 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3957 return -EINVAL;
3958 fork_start();
3959 ret = fork();
3960 if (ret == 0) {
3961 /* Child Process. */
3962 cpu_clone_regs(env, newsp);
3963 fork_end(1);
3964 #if defined(CONFIG_USE_NPTL)
3965 /* There is a race condition here. The parent process could
3966 theoretically read the TID in the child process before the child
3967 tid is set. This would require using either ptrace
3968 (not implemented) or having *_tidptr to point at a shared memory
3969 mapping. We can't repeat the spinlock hack used above because
3970 the child process gets its own copy of the lock. */
3971 if (flags & CLONE_CHILD_SETTID)
3972 put_user_u32(gettid(), child_tidptr);
3973 if (flags & CLONE_PARENT_SETTID)
3974 put_user_u32(gettid(), parent_tidptr);
3975 ts = (TaskState *)env->opaque;
3976 if (flags & CLONE_SETTLS)
3977 cpu_set_tls (env, newtls);
3978 if (flags & CLONE_CHILD_CLEARTID)
3979 ts->child_tidptr = child_tidptr;
3980 #endif
3981 } else {
3982 fork_end(0);
3985 return ret;
3988 /* warning : doesn't handle linux specific flags... */
3989 static int target_to_host_fcntl_cmd(int cmd)
3991 switch(cmd) {
3992 case TARGET_F_DUPFD:
3993 case TARGET_F_GETFD:
3994 case TARGET_F_SETFD:
3995 case TARGET_F_GETFL:
3996 case TARGET_F_SETFL:
3997 return cmd;
3998 case TARGET_F_GETLK:
3999 return F_GETLK;
4000 case TARGET_F_SETLK:
4001 return F_SETLK;
4002 case TARGET_F_SETLKW:
4003 return F_SETLKW;
4004 case TARGET_F_GETOWN:
4005 return F_GETOWN;
4006 case TARGET_F_SETOWN:
4007 return F_SETOWN;
4008 case TARGET_F_GETSIG:
4009 return F_GETSIG;
4010 case TARGET_F_SETSIG:
4011 return F_SETSIG;
4012 #if TARGET_ABI_BITS == 32
4013 case TARGET_F_GETLK64:
4014 return F_GETLK64;
4015 case TARGET_F_SETLK64:
4016 return F_SETLK64;
4017 case TARGET_F_SETLKW64:
4018 return F_SETLKW64;
4019 #endif
4020 case TARGET_F_SETLEASE:
4021 return F_SETLEASE;
4022 case TARGET_F_GETLEASE:
4023 return F_GETLEASE;
4024 #ifdef F_DUPFD_CLOEXEC
4025 case TARGET_F_DUPFD_CLOEXEC:
4026 return F_DUPFD_CLOEXEC;
4027 #endif
4028 case TARGET_F_NOTIFY:
4029 return F_NOTIFY;
4030 default:
4031 return -TARGET_EINVAL;
4033 return -TARGET_EINVAL;
4036 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4038 struct flock fl;
4039 struct target_flock *target_fl;
4040 struct flock64 fl64;
4041 struct target_flock64 *target_fl64;
4042 abi_long ret;
4043 int host_cmd = target_to_host_fcntl_cmd(cmd);
4045 if (host_cmd == -TARGET_EINVAL)
4046 return host_cmd;
4048 switch(cmd) {
4049 case TARGET_F_GETLK:
4050 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4051 return -TARGET_EFAULT;
4052 fl.l_type = tswap16(target_fl->l_type);
4053 fl.l_whence = tswap16(target_fl->l_whence);
4054 fl.l_start = tswapl(target_fl->l_start);
4055 fl.l_len = tswapl(target_fl->l_len);
4056 fl.l_pid = tswap32(target_fl->l_pid);
4057 unlock_user_struct(target_fl, arg, 0);
4058 ret = get_errno(fcntl(fd, host_cmd, &fl));
4059 if (ret == 0) {
4060 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4061 return -TARGET_EFAULT;
4062 target_fl->l_type = tswap16(fl.l_type);
4063 target_fl->l_whence = tswap16(fl.l_whence);
4064 target_fl->l_start = tswapl(fl.l_start);
4065 target_fl->l_len = tswapl(fl.l_len);
4066 target_fl->l_pid = tswap32(fl.l_pid);
4067 unlock_user_struct(target_fl, arg, 1);
4069 break;
4071 case TARGET_F_SETLK:
4072 case TARGET_F_SETLKW:
4073 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4074 return -TARGET_EFAULT;
4075 fl.l_type = tswap16(target_fl->l_type);
4076 fl.l_whence = tswap16(target_fl->l_whence);
4077 fl.l_start = tswapl(target_fl->l_start);
4078 fl.l_len = tswapl(target_fl->l_len);
4079 fl.l_pid = tswap32(target_fl->l_pid);
4080 unlock_user_struct(target_fl, arg, 0);
4081 ret = get_errno(fcntl(fd, host_cmd, &fl));
4082 break;
4084 case TARGET_F_GETLK64:
4085 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4086 return -TARGET_EFAULT;
4087 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4088 fl64.l_whence = tswap16(target_fl64->l_whence);
4089 fl64.l_start = tswapl(target_fl64->l_start);
4090 fl64.l_len = tswapl(target_fl64->l_len);
4091 fl64.l_pid = tswap32(target_fl64->l_pid);
4092 unlock_user_struct(target_fl64, arg, 0);
4093 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4094 if (ret == 0) {
4095 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4096 return -TARGET_EFAULT;
4097 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4098 target_fl64->l_whence = tswap16(fl64.l_whence);
4099 target_fl64->l_start = tswapl(fl64.l_start);
4100 target_fl64->l_len = tswapl(fl64.l_len);
4101 target_fl64->l_pid = tswap32(fl64.l_pid);
4102 unlock_user_struct(target_fl64, arg, 1);
4104 break;
4105 case TARGET_F_SETLK64:
4106 case TARGET_F_SETLKW64:
4107 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4108 return -TARGET_EFAULT;
4109 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4110 fl64.l_whence = tswap16(target_fl64->l_whence);
4111 fl64.l_start = tswapl(target_fl64->l_start);
4112 fl64.l_len = tswapl(target_fl64->l_len);
4113 fl64.l_pid = tswap32(target_fl64->l_pid);
4114 unlock_user_struct(target_fl64, arg, 0);
4115 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4116 break;
4118 case TARGET_F_GETFL:
4119 ret = get_errno(fcntl(fd, host_cmd, arg));
4120 if (ret >= 0) {
4121 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4123 break;
4125 case TARGET_F_SETFL:
4126 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4127 break;
4129 case TARGET_F_SETOWN:
4130 case TARGET_F_GETOWN:
4131 case TARGET_F_SETSIG:
4132 case TARGET_F_GETSIG:
4133 case TARGET_F_SETLEASE:
4134 case TARGET_F_GETLEASE:
4135 ret = get_errno(fcntl(fd, host_cmd, arg));
4136 break;
4138 default:
4139 ret = get_errno(fcntl(fd, cmd, arg));
4140 break;
4142 return ret;
4145 #ifdef USE_UID16
4147 static inline int high2lowuid(int uid)
4149 if (uid > 65535)
4150 return 65534;
4151 else
4152 return uid;
4155 static inline int high2lowgid(int gid)
4157 if (gid > 65535)
4158 return 65534;
4159 else
4160 return gid;
4163 static inline int low2highuid(int uid)
4165 if ((int16_t)uid == -1)
4166 return -1;
4167 else
4168 return uid;
4171 static inline int low2highgid(int gid)
4173 if ((int16_t)gid == -1)
4174 return -1;
4175 else
4176 return gid;
4178 static inline int tswapid(int id)
4180 return tswap16(id);
4182 #else /* !USE_UID16 */
4183 static inline int high2lowuid(int uid)
4185 return uid;
4187 static inline int high2lowgid(int gid)
4189 return gid;
4191 static inline int low2highuid(int uid)
4193 return uid;
4195 static inline int low2highgid(int gid)
4197 return gid;
4199 static inline int tswapid(int id)
4201 return tswap32(id);
4203 #endif /* USE_UID16 */
4205 void syscall_init(void)
4207 IOCTLEntry *ie;
4208 const argtype *arg_type;
4209 int size;
4210 int i;
4212 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4213 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4214 #include "syscall_types.h"
4215 #undef STRUCT
4216 #undef STRUCT_SPECIAL
4218 /* we patch the ioctl size if necessary. We rely on the fact that
4219 no ioctl has all the bits at '1' in the size field */
4220 ie = ioctl_entries;
4221 while (ie->target_cmd != 0) {
4222 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4223 TARGET_IOC_SIZEMASK) {
4224 arg_type = ie->arg_type;
4225 if (arg_type[0] != TYPE_PTR) {
4226 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4227 ie->target_cmd);
4228 exit(1);
4230 arg_type++;
4231 size = thunk_type_size(arg_type, 0);
4232 ie->target_cmd = (ie->target_cmd &
4233 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4234 (size << TARGET_IOC_SIZESHIFT);
4237 /* Build target_to_host_errno_table[] table from
4238 * host_to_target_errno_table[]. */
4239 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4240 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4242 /* automatic consistency check if same arch */
4243 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4244 (defined(__x86_64__) && defined(TARGET_X86_64))
4245 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4246 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4247 ie->name, ie->target_cmd, ie->host_cmd);
4249 #endif
4250 ie++;
4254 #if TARGET_ABI_BITS == 32
4255 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4257 #ifdef TARGET_WORDS_BIGENDIAN
4258 return ((uint64_t)word0 << 32) | word1;
4259 #else
4260 return ((uint64_t)word1 << 32) | word0;
4261 #endif
4263 #else /* TARGET_ABI_BITS == 32 */
4264 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4266 return word0;
4268 #endif /* TARGET_ABI_BITS != 32 */
4270 #ifdef TARGET_NR_truncate64
4271 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4272 abi_long arg2,
4273 abi_long arg3,
4274 abi_long arg4)
4276 #ifdef TARGET_ARM
4277 if (((CPUARMState *)cpu_env)->eabi)
4279 arg2 = arg3;
4280 arg3 = arg4;
4282 #endif
4283 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4285 #endif
4287 #ifdef TARGET_NR_ftruncate64
4288 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4289 abi_long arg2,
4290 abi_long arg3,
4291 abi_long arg4)
4293 #ifdef TARGET_ARM
4294 if (((CPUARMState *)cpu_env)->eabi)
4296 arg2 = arg3;
4297 arg3 = arg4;
4299 #endif
4300 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4302 #endif
4304 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4305 abi_ulong target_addr)
4307 struct target_timespec *target_ts;
4309 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4310 return -TARGET_EFAULT;
4311 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4312 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4313 unlock_user_struct(target_ts, target_addr, 0);
4314 return 0;
4317 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4318 struct timespec *host_ts)
4320 struct target_timespec *target_ts;
4322 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4323 return -TARGET_EFAULT;
4324 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4325 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4326 unlock_user_struct(target_ts, target_addr, 1);
4327 return 0;
4330 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4331 static inline abi_long host_to_target_stat64(void *cpu_env,
4332 abi_ulong target_addr,
4333 struct stat *host_st)
4335 #ifdef TARGET_ARM
4336 if (((CPUARMState *)cpu_env)->eabi) {
4337 struct target_eabi_stat64 *target_st;
4339 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4340 return -TARGET_EFAULT;
4341 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4342 __put_user(host_st->st_dev, &target_st->st_dev);
4343 __put_user(host_st->st_ino, &target_st->st_ino);
4344 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4345 __put_user(host_st->st_ino, &target_st->__st_ino);
4346 #endif
4347 __put_user(host_st->st_mode, &target_st->st_mode);
4348 __put_user(host_st->st_nlink, &target_st->st_nlink);
4349 __put_user(host_st->st_uid, &target_st->st_uid);
4350 __put_user(host_st->st_gid, &target_st->st_gid);
4351 __put_user(host_st->st_rdev, &target_st->st_rdev);
4352 __put_user(host_st->st_size, &target_st->st_size);
4353 __put_user(host_st->st_blksize, &target_st->st_blksize);
4354 __put_user(host_st->st_blocks, &target_st->st_blocks);
4355 __put_user(host_st->st_atime, &target_st->target_st_atime);
4356 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4357 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4358 unlock_user_struct(target_st, target_addr, 1);
4359 } else
4360 #endif
4362 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4363 struct target_stat *target_st;
4364 #else
4365 struct target_stat64 *target_st;
4366 #endif
4368 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4369 return -TARGET_EFAULT;
4370 memset(target_st, 0, sizeof(*target_st));
4371 __put_user(host_st->st_dev, &target_st->st_dev);
4372 __put_user(host_st->st_ino, &target_st->st_ino);
4373 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4374 __put_user(host_st->st_ino, &target_st->__st_ino);
4375 #endif
4376 __put_user(host_st->st_mode, &target_st->st_mode);
4377 __put_user(host_st->st_nlink, &target_st->st_nlink);
4378 __put_user(host_st->st_uid, &target_st->st_uid);
4379 __put_user(host_st->st_gid, &target_st->st_gid);
4380 __put_user(host_st->st_rdev, &target_st->st_rdev);
4381 /* XXX: better use of kernel struct */
4382 __put_user(host_st->st_size, &target_st->st_size);
4383 __put_user(host_st->st_blksize, &target_st->st_blksize);
4384 __put_user(host_st->st_blocks, &target_st->st_blocks);
4385 __put_user(host_st->st_atime, &target_st->target_st_atime);
4386 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4387 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4388 unlock_user_struct(target_st, target_addr, 1);
4391 return 0;
4393 #endif
4395 #if defined(CONFIG_USE_NPTL)
4396 /* ??? Using host futex calls even when target atomic operations
4397 are not really atomic probably breaks things. However implementing
4398 futexes locally would make futexes shared between multiple processes
4399 tricky. However they're probably useless because guest atomic
4400 operations won't work either. */
4401 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4402 target_ulong uaddr2, int val3)
4404 struct timespec ts, *pts;
4405 int base_op;
4407 /* ??? We assume FUTEX_* constants are the same on both host
4408 and target. */
4409 #ifdef FUTEX_CMD_MASK
4410 base_op = op & FUTEX_CMD_MASK;
4411 #else
4412 base_op = op;
4413 #endif
4414 switch (base_op) {
4415 case FUTEX_WAIT:
4416 if (timeout) {
4417 pts = &ts;
4418 target_to_host_timespec(pts, timeout);
4419 } else {
4420 pts = NULL;
4422 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4423 pts, NULL, 0));
4424 case FUTEX_WAKE:
4425 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4426 case FUTEX_FD:
4427 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4428 case FUTEX_REQUEUE:
4429 case FUTEX_CMP_REQUEUE:
4430 case FUTEX_WAKE_OP:
4431 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4432 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4433 But the prototype takes a `struct timespec *'; insert casts
4434 to satisfy the compiler. We do not need to tswap TIMEOUT
4435 since it's not compared to guest memory. */
4436 pts = (struct timespec *)(uintptr_t) timeout;
4437 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4438 g2h(uaddr2),
4439 (base_op == FUTEX_CMP_REQUEUE
4440 ? tswap32(val3)
4441 : val3)));
4442 default:
4443 return -TARGET_ENOSYS;
4446 #endif
4448 /* Map host to target signal numbers for the wait family of syscalls.
4449 Assume all other status bits are the same. */
4450 static int host_to_target_waitstatus(int status)
4452 if (WIFSIGNALED(status)) {
4453 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4455 if (WIFSTOPPED(status)) {
4456 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4457 | (status & 0xff);
4459 return status;
4462 int get_osversion(void)
4464 static int osversion;
4465 struct new_utsname buf;
4466 const char *s;
4467 int i, n, tmp;
4468 if (osversion)
4469 return osversion;
4470 if (qemu_uname_release && *qemu_uname_release) {
4471 s = qemu_uname_release;
4472 } else {
4473 if (sys_uname(&buf))
4474 return 0;
4475 s = buf.release;
4477 tmp = 0;
4478 for (i = 0; i < 3; i++) {
4479 n = 0;
4480 while (*s >= '0' && *s <= '9') {
4481 n *= 10;
4482 n += *s - '0';
4483 s++;
4485 tmp = (tmp << 8) + n;
4486 if (*s == '.')
4487 s++;
4489 osversion = tmp;
4490 return osversion;
4493 /* do_syscall() should always have a single exit point at the end so
4494 that actions, such as logging of syscall results, can be performed.
4495 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4496 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4497 abi_long arg2, abi_long arg3, abi_long arg4,
4498 abi_long arg5, abi_long arg6)
4500 abi_long ret;
4501 struct stat st;
4502 struct statfs stfs;
4503 void *p;
4505 #ifdef DEBUG
4506 gemu_log("syscall %d", num);
4507 #endif
4508 if(do_strace)
4509 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4511 switch(num) {
4512 case TARGET_NR_exit:
4513 #ifdef CONFIG_USE_NPTL
4514 /* In old applications this may be used to implement _exit(2).
4515 However in threaded applictions it is used for thread termination,
4516 and _exit_group is used for application termination.
4517 Do thread termination if we have more then one thread. */
4518 /* FIXME: This probably breaks if a signal arrives. We should probably
4519 be disabling signals. */
4520 if (first_cpu->next_cpu) {
4521 TaskState *ts;
4522 CPUState **lastp;
4523 CPUState *p;
4525 cpu_list_lock();
4526 lastp = &first_cpu;
4527 p = first_cpu;
4528 while (p && p != (CPUState *)cpu_env) {
4529 lastp = &p->next_cpu;
4530 p = p->next_cpu;
4532 /* If we didn't find the CPU for this thread then something is
4533 horribly wrong. */
4534 if (!p)
4535 abort();
4536 /* Remove the CPU from the list. */
4537 *lastp = p->next_cpu;
4538 cpu_list_unlock();
4539 ts = ((CPUState *)cpu_env)->opaque;
4540 if (ts->child_tidptr) {
4541 put_user_u32(0, ts->child_tidptr);
4542 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4543 NULL, NULL, 0);
4545 thread_env = NULL;
4546 qemu_free(cpu_env);
4547 qemu_free(ts);
4548 pthread_exit(NULL);
4550 #endif
4551 #ifdef TARGET_GPROF
4552 _mcleanup();
4553 #endif
4554 gdb_exit(cpu_env, arg1);
4555 _exit(arg1);
4556 ret = 0; /* avoid warning */
4557 break;
4558 case TARGET_NR_read:
4559 if (arg3 == 0)
4560 ret = 0;
4561 else {
4562 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4563 goto efault;
4564 ret = get_errno(read(arg1, p, arg3));
4565 unlock_user(p, arg2, ret);
4567 break;
4568 case TARGET_NR_write:
4569 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4570 goto efault;
4571 ret = get_errno(write(arg1, p, arg3));
4572 unlock_user(p, arg2, 0);
4573 break;
4574 case TARGET_NR_open:
4575 if (!(p = lock_user_string(arg1)))
4576 goto efault;
4577 ret = get_errno(open(path(p),
4578 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4579 arg3));
4580 unlock_user(p, arg1, 0);
4581 break;
4582 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4583 case TARGET_NR_openat:
4584 if (!(p = lock_user_string(arg2)))
4585 goto efault;
4586 ret = get_errno(sys_openat(arg1,
4587 path(p),
4588 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4589 arg4));
4590 unlock_user(p, arg2, 0);
4591 break;
4592 #endif
4593 case TARGET_NR_close:
4594 ret = get_errno(close(arg1));
4595 break;
4596 case TARGET_NR_brk:
4597 ret = do_brk(arg1);
4598 break;
4599 case TARGET_NR_fork:
4600 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4601 break;
4602 #ifdef TARGET_NR_waitpid
4603 case TARGET_NR_waitpid:
4605 int status;
4606 ret = get_errno(waitpid(arg1, &status, arg3));
4607 if (!is_error(ret) && arg2
4608 && put_user_s32(host_to_target_waitstatus(status), arg2))
4609 goto efault;
4611 break;
4612 #endif
4613 #ifdef TARGET_NR_waitid
4614 case TARGET_NR_waitid:
4616 siginfo_t info;
4617 info.si_pid = 0;
4618 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4619 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4620 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4621 goto efault;
4622 host_to_target_siginfo(p, &info);
4623 unlock_user(p, arg3, sizeof(target_siginfo_t));
4626 break;
4627 #endif
4628 #ifdef TARGET_NR_creat /* not on alpha */
4629 case TARGET_NR_creat:
4630 if (!(p = lock_user_string(arg1)))
4631 goto efault;
4632 ret = get_errno(creat(p, arg2));
4633 unlock_user(p, arg1, 0);
4634 break;
4635 #endif
4636 case TARGET_NR_link:
4638 void * p2;
4639 p = lock_user_string(arg1);
4640 p2 = lock_user_string(arg2);
4641 if (!p || !p2)
4642 ret = -TARGET_EFAULT;
4643 else
4644 ret = get_errno(link(p, p2));
4645 unlock_user(p2, arg2, 0);
4646 unlock_user(p, arg1, 0);
4648 break;
4649 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4650 case TARGET_NR_linkat:
4652 void * p2 = NULL;
4653 if (!arg2 || !arg4)
4654 goto efault;
4655 p = lock_user_string(arg2);
4656 p2 = lock_user_string(arg4);
4657 if (!p || !p2)
4658 ret = -TARGET_EFAULT;
4659 else
4660 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4661 unlock_user(p, arg2, 0);
4662 unlock_user(p2, arg4, 0);
4664 break;
4665 #endif
4666 case TARGET_NR_unlink:
4667 if (!(p = lock_user_string(arg1)))
4668 goto efault;
4669 ret = get_errno(unlink(p));
4670 unlock_user(p, arg1, 0);
4671 break;
4672 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4673 case TARGET_NR_unlinkat:
4674 if (!(p = lock_user_string(arg2)))
4675 goto efault;
4676 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4677 unlock_user(p, arg2, 0);
4678 break;
4679 #endif
4680 case TARGET_NR_execve:
4682 char **argp, **envp;
4683 int argc, envc;
4684 abi_ulong gp;
4685 abi_ulong guest_argp;
4686 abi_ulong guest_envp;
4687 abi_ulong addr;
4688 char **q;
4690 argc = 0;
4691 guest_argp = arg2;
4692 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4693 if (get_user_ual(addr, gp))
4694 goto efault;
4695 if (!addr)
4696 break;
4697 argc++;
4699 envc = 0;
4700 guest_envp = arg3;
4701 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4702 if (get_user_ual(addr, gp))
4703 goto efault;
4704 if (!addr)
4705 break;
4706 envc++;
4709 argp = alloca((argc + 1) * sizeof(void *));
4710 envp = alloca((envc + 1) * sizeof(void *));
4712 for (gp = guest_argp, q = argp; gp;
4713 gp += sizeof(abi_ulong), q++) {
4714 if (get_user_ual(addr, gp))
4715 goto execve_efault;
4716 if (!addr)
4717 break;
4718 if (!(*q = lock_user_string(addr)))
4719 goto execve_efault;
4721 *q = NULL;
4723 for (gp = guest_envp, q = envp; gp;
4724 gp += sizeof(abi_ulong), q++) {
4725 if (get_user_ual(addr, gp))
4726 goto execve_efault;
4727 if (!addr)
4728 break;
4729 if (!(*q = lock_user_string(addr)))
4730 goto execve_efault;
4732 *q = NULL;
4734 if (!(p = lock_user_string(arg1)))
4735 goto execve_efault;
4736 ret = get_errno(execve(p, argp, envp));
4737 unlock_user(p, arg1, 0);
4739 goto execve_end;
4741 execve_efault:
4742 ret = -TARGET_EFAULT;
4744 execve_end:
4745 for (gp = guest_argp, q = argp; *q;
4746 gp += sizeof(abi_ulong), q++) {
4747 if (get_user_ual(addr, gp)
4748 || !addr)
4749 break;
4750 unlock_user(*q, addr, 0);
4752 for (gp = guest_envp, q = envp; *q;
4753 gp += sizeof(abi_ulong), q++) {
4754 if (get_user_ual(addr, gp)
4755 || !addr)
4756 break;
4757 unlock_user(*q, addr, 0);
4760 break;
4761 case TARGET_NR_chdir:
4762 if (!(p = lock_user_string(arg1)))
4763 goto efault;
4764 ret = get_errno(chdir(p));
4765 unlock_user(p, arg1, 0);
4766 break;
4767 #ifdef TARGET_NR_time
4768 case TARGET_NR_time:
4770 time_t host_time;
4771 ret = get_errno(time(&host_time));
4772 if (!is_error(ret)
4773 && arg1
4774 && put_user_sal(host_time, arg1))
4775 goto efault;
4777 break;
4778 #endif
4779 case TARGET_NR_mknod:
4780 if (!(p = lock_user_string(arg1)))
4781 goto efault;
4782 ret = get_errno(mknod(p, arg2, arg3));
4783 unlock_user(p, arg1, 0);
4784 break;
4785 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4786 case TARGET_NR_mknodat:
4787 if (!(p = lock_user_string(arg2)))
4788 goto efault;
4789 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4790 unlock_user(p, arg2, 0);
4791 break;
4792 #endif
4793 case TARGET_NR_chmod:
4794 if (!(p = lock_user_string(arg1)))
4795 goto efault;
4796 ret = get_errno(chmod(p, arg2));
4797 unlock_user(p, arg1, 0);
4798 break;
4799 #ifdef TARGET_NR_break
4800 case TARGET_NR_break:
4801 goto unimplemented;
4802 #endif
4803 #ifdef TARGET_NR_oldstat
4804 case TARGET_NR_oldstat:
4805 goto unimplemented;
4806 #endif
4807 case TARGET_NR_lseek:
4808 ret = get_errno(lseek(arg1, arg2, arg3));
4809 break;
4810 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4811 /* Alpha specific */
4812 case TARGET_NR_getxpid:
4813 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4814 ret = get_errno(getpid());
4815 break;
4816 #endif
4817 #ifdef TARGET_NR_getpid
4818 case TARGET_NR_getpid:
4819 ret = get_errno(getpid());
4820 break;
4821 #endif
4822 case TARGET_NR_mount:
4824 /* need to look at the data field */
4825 void *p2, *p3;
4826 p = lock_user_string(arg1);
4827 p2 = lock_user_string(arg2);
4828 p3 = lock_user_string(arg3);
4829 if (!p || !p2 || !p3)
4830 ret = -TARGET_EFAULT;
4831 else {
4832 /* FIXME - arg5 should be locked, but it isn't clear how to
4833 * do that since it's not guaranteed to be a NULL-terminated
4834 * string.
4836 if ( ! arg5 )
4837 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4838 else
4839 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4841 unlock_user(p, arg1, 0);
4842 unlock_user(p2, arg2, 0);
4843 unlock_user(p3, arg3, 0);
4844 break;
4846 #ifdef TARGET_NR_umount
4847 case TARGET_NR_umount:
4848 if (!(p = lock_user_string(arg1)))
4849 goto efault;
4850 ret = get_errno(umount(p));
4851 unlock_user(p, arg1, 0);
4852 break;
4853 #endif
4854 #ifdef TARGET_NR_stime /* not on alpha */
4855 case TARGET_NR_stime:
4857 time_t host_time;
4858 if (get_user_sal(host_time, arg1))
4859 goto efault;
4860 ret = get_errno(stime(&host_time));
4862 break;
4863 #endif
4864 case TARGET_NR_ptrace:
4865 goto unimplemented;
4866 #ifdef TARGET_NR_alarm /* not on alpha */
4867 case TARGET_NR_alarm:
4868 ret = alarm(arg1);
4869 break;
4870 #endif
4871 #ifdef TARGET_NR_oldfstat
4872 case TARGET_NR_oldfstat:
4873 goto unimplemented;
4874 #endif
4875 #ifdef TARGET_NR_pause /* not on alpha */
4876 case TARGET_NR_pause:
4877 ret = get_errno(pause());
4878 break;
4879 #endif
4880 #ifdef TARGET_NR_utime
4881 case TARGET_NR_utime:
4883 struct utimbuf tbuf, *host_tbuf;
4884 struct target_utimbuf *target_tbuf;
4885 if (arg2) {
4886 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4887 goto efault;
4888 tbuf.actime = tswapl(target_tbuf->actime);
4889 tbuf.modtime = tswapl(target_tbuf->modtime);
4890 unlock_user_struct(target_tbuf, arg2, 0);
4891 host_tbuf = &tbuf;
4892 } else {
4893 host_tbuf = NULL;
4895 if (!(p = lock_user_string(arg1)))
4896 goto efault;
4897 ret = get_errno(utime(p, host_tbuf));
4898 unlock_user(p, arg1, 0);
4900 break;
4901 #endif
4902 case TARGET_NR_utimes:
4904 struct timeval *tvp, tv[2];
4905 if (arg2) {
4906 if (copy_from_user_timeval(&tv[0], arg2)
4907 || copy_from_user_timeval(&tv[1],
4908 arg2 + sizeof(struct target_timeval)))
4909 goto efault;
4910 tvp = tv;
4911 } else {
4912 tvp = NULL;
4914 if (!(p = lock_user_string(arg1)))
4915 goto efault;
4916 ret = get_errno(utimes(p, tvp));
4917 unlock_user(p, arg1, 0);
4919 break;
4920 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4921 case TARGET_NR_futimesat:
4923 struct timeval *tvp, tv[2];
4924 if (arg3) {
4925 if (copy_from_user_timeval(&tv[0], arg3)
4926 || copy_from_user_timeval(&tv[1],
4927 arg3 + sizeof(struct target_timeval)))
4928 goto efault;
4929 tvp = tv;
4930 } else {
4931 tvp = NULL;
4933 if (!(p = lock_user_string(arg2)))
4934 goto efault;
4935 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4936 unlock_user(p, arg2, 0);
4938 break;
4939 #endif
4940 #ifdef TARGET_NR_stty
4941 case TARGET_NR_stty:
4942 goto unimplemented;
4943 #endif
4944 #ifdef TARGET_NR_gtty
4945 case TARGET_NR_gtty:
4946 goto unimplemented;
4947 #endif
4948 case TARGET_NR_access:
4949 if (!(p = lock_user_string(arg1)))
4950 goto efault;
4951 ret = get_errno(access(path(p), arg2));
4952 unlock_user(p, arg1, 0);
4953 break;
4954 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4955 case TARGET_NR_faccessat:
4956 if (!(p = lock_user_string(arg2)))
4957 goto efault;
4958 ret = get_errno(sys_faccessat(arg1, p, arg3));
4959 unlock_user(p, arg2, 0);
4960 break;
4961 #endif
4962 #ifdef TARGET_NR_nice /* not on alpha */
4963 case TARGET_NR_nice:
4964 ret = get_errno(nice(arg1));
4965 break;
4966 #endif
4967 #ifdef TARGET_NR_ftime
4968 case TARGET_NR_ftime:
4969 goto unimplemented;
4970 #endif
4971 case TARGET_NR_sync:
4972 sync();
4973 ret = 0;
4974 break;
4975 case TARGET_NR_kill:
4976 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4977 break;
4978 case TARGET_NR_rename:
4980 void *p2;
4981 p = lock_user_string(arg1);
4982 p2 = lock_user_string(arg2);
4983 if (!p || !p2)
4984 ret = -TARGET_EFAULT;
4985 else
4986 ret = get_errno(rename(p, p2));
4987 unlock_user(p2, arg2, 0);
4988 unlock_user(p, arg1, 0);
4990 break;
4991 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4992 case TARGET_NR_renameat:
4994 void *p2;
4995 p = lock_user_string(arg2);
4996 p2 = lock_user_string(arg4);
4997 if (!p || !p2)
4998 ret = -TARGET_EFAULT;
4999 else
5000 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5001 unlock_user(p2, arg4, 0);
5002 unlock_user(p, arg2, 0);
5004 break;
5005 #endif
5006 case TARGET_NR_mkdir:
5007 if (!(p = lock_user_string(arg1)))
5008 goto efault;
5009 ret = get_errno(mkdir(p, arg2));
5010 unlock_user(p, arg1, 0);
5011 break;
5012 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5013 case TARGET_NR_mkdirat:
5014 if (!(p = lock_user_string(arg2)))
5015 goto efault;
5016 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5017 unlock_user(p, arg2, 0);
5018 break;
5019 #endif
5020 case TARGET_NR_rmdir:
5021 if (!(p = lock_user_string(arg1)))
5022 goto efault;
5023 ret = get_errno(rmdir(p));
5024 unlock_user(p, arg1, 0);
5025 break;
5026 case TARGET_NR_dup:
5027 ret = get_errno(dup(arg1));
5028 break;
5029 case TARGET_NR_pipe:
5030 ret = do_pipe(cpu_env, arg1, 0, 0);
5031 break;
5032 #ifdef TARGET_NR_pipe2
5033 case TARGET_NR_pipe2:
5034 ret = do_pipe(cpu_env, arg1, arg2, 1);
5035 break;
5036 #endif
5037 case TARGET_NR_times:
5039 struct target_tms *tmsp;
5040 struct tms tms;
5041 ret = get_errno(times(&tms));
5042 if (arg1) {
5043 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5044 if (!tmsp)
5045 goto efault;
5046 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5047 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5048 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5049 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5051 if (!is_error(ret))
5052 ret = host_to_target_clock_t(ret);
5054 break;
5055 #ifdef TARGET_NR_prof
5056 case TARGET_NR_prof:
5057 goto unimplemented;
5058 #endif
5059 #ifdef TARGET_NR_signal
5060 case TARGET_NR_signal:
5061 goto unimplemented;
5062 #endif
5063 case TARGET_NR_acct:
5064 if (arg1 == 0) {
5065 ret = get_errno(acct(NULL));
5066 } else {
5067 if (!(p = lock_user_string(arg1)))
5068 goto efault;
5069 ret = get_errno(acct(path(p)));
5070 unlock_user(p, arg1, 0);
5072 break;
5073 #ifdef TARGET_NR_umount2 /* not on alpha */
5074 case TARGET_NR_umount2:
5075 if (!(p = lock_user_string(arg1)))
5076 goto efault;
5077 ret = get_errno(umount2(p, arg2));
5078 unlock_user(p, arg1, 0);
5079 break;
5080 #endif
5081 #ifdef TARGET_NR_lock
5082 case TARGET_NR_lock:
5083 goto unimplemented;
5084 #endif
5085 case TARGET_NR_ioctl:
5086 ret = do_ioctl(arg1, arg2, arg3);
5087 break;
5088 case TARGET_NR_fcntl:
5089 ret = do_fcntl(arg1, arg2, arg3);
5090 break;
5091 #ifdef TARGET_NR_mpx
5092 case TARGET_NR_mpx:
5093 goto unimplemented;
5094 #endif
5095 case TARGET_NR_setpgid:
5096 ret = get_errno(setpgid(arg1, arg2));
5097 break;
5098 #ifdef TARGET_NR_ulimit
5099 case TARGET_NR_ulimit:
5100 goto unimplemented;
5101 #endif
5102 #ifdef TARGET_NR_oldolduname
5103 case TARGET_NR_oldolduname:
5104 goto unimplemented;
5105 #endif
5106 case TARGET_NR_umask:
5107 ret = get_errno(umask(arg1));
5108 break;
5109 case TARGET_NR_chroot:
5110 if (!(p = lock_user_string(arg1)))
5111 goto efault;
5112 ret = get_errno(chroot(p));
5113 unlock_user(p, arg1, 0);
5114 break;
5115 case TARGET_NR_ustat:
5116 goto unimplemented;
5117 case TARGET_NR_dup2:
5118 ret = get_errno(dup2(arg1, arg2));
5119 break;
5120 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5121 case TARGET_NR_dup3:
5122 ret = get_errno(dup3(arg1, arg2, arg3));
5123 break;
5124 #endif
5125 #ifdef TARGET_NR_getppid /* not on alpha */
5126 case TARGET_NR_getppid:
5127 ret = get_errno(getppid());
5128 break;
5129 #endif
5130 case TARGET_NR_getpgrp:
5131 ret = get_errno(getpgrp());
5132 break;
5133 case TARGET_NR_setsid:
5134 ret = get_errno(setsid());
5135 break;
5136 #ifdef TARGET_NR_sigaction
5137 case TARGET_NR_sigaction:
5139 #if defined(TARGET_ALPHA)
5140 struct target_sigaction act, oact, *pact = 0;
5141 struct target_old_sigaction *old_act;
5142 if (arg2) {
5143 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5144 goto efault;
5145 act._sa_handler = old_act->_sa_handler;
5146 target_siginitset(&act.sa_mask, old_act->sa_mask);
5147 act.sa_flags = old_act->sa_flags;
5148 act.sa_restorer = 0;
5149 unlock_user_struct(old_act, arg2, 0);
5150 pact = &act;
5152 ret = get_errno(do_sigaction(arg1, pact, &oact));
5153 if (!is_error(ret) && arg3) {
5154 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5155 goto efault;
5156 old_act->_sa_handler = oact._sa_handler;
5157 old_act->sa_mask = oact.sa_mask.sig[0];
5158 old_act->sa_flags = oact.sa_flags;
5159 unlock_user_struct(old_act, arg3, 1);
5161 #elif defined(TARGET_MIPS)
5162 struct target_sigaction act, oact, *pact, *old_act;
5164 if (arg2) {
5165 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5166 goto efault;
5167 act._sa_handler = old_act->_sa_handler;
5168 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5169 act.sa_flags = old_act->sa_flags;
5170 unlock_user_struct(old_act, arg2, 0);
5171 pact = &act;
5172 } else {
5173 pact = NULL;
5176 ret = get_errno(do_sigaction(arg1, pact, &oact));
5178 if (!is_error(ret) && arg3) {
5179 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5180 goto efault;
5181 old_act->_sa_handler = oact._sa_handler;
5182 old_act->sa_flags = oact.sa_flags;
5183 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5184 old_act->sa_mask.sig[1] = 0;
5185 old_act->sa_mask.sig[2] = 0;
5186 old_act->sa_mask.sig[3] = 0;
5187 unlock_user_struct(old_act, arg3, 1);
5189 #else
5190 struct target_old_sigaction *old_act;
5191 struct target_sigaction act, oact, *pact;
5192 if (arg2) {
5193 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5194 goto efault;
5195 act._sa_handler = old_act->_sa_handler;
5196 target_siginitset(&act.sa_mask, old_act->sa_mask);
5197 act.sa_flags = old_act->sa_flags;
5198 act.sa_restorer = old_act->sa_restorer;
5199 unlock_user_struct(old_act, arg2, 0);
5200 pact = &act;
5201 } else {
5202 pact = NULL;
5204 ret = get_errno(do_sigaction(arg1, pact, &oact));
5205 if (!is_error(ret) && arg3) {
5206 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5207 goto efault;
5208 old_act->_sa_handler = oact._sa_handler;
5209 old_act->sa_mask = oact.sa_mask.sig[0];
5210 old_act->sa_flags = oact.sa_flags;
5211 old_act->sa_restorer = oact.sa_restorer;
5212 unlock_user_struct(old_act, arg3, 1);
5214 #endif
5216 break;
5217 #endif
5218 case TARGET_NR_rt_sigaction:
5220 #if defined(TARGET_ALPHA)
5221 struct target_sigaction act, oact, *pact = 0;
5222 struct target_rt_sigaction *rt_act;
5223 /* ??? arg4 == sizeof(sigset_t). */
5224 if (arg2) {
5225 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5226 goto efault;
5227 act._sa_handler = rt_act->_sa_handler;
5228 act.sa_mask = rt_act->sa_mask;
5229 act.sa_flags = rt_act->sa_flags;
5230 act.sa_restorer = arg5;
5231 unlock_user_struct(rt_act, arg2, 0);
5232 pact = &act;
5234 ret = get_errno(do_sigaction(arg1, pact, &oact));
5235 if (!is_error(ret) && arg3) {
5236 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5237 goto efault;
5238 rt_act->_sa_handler = oact._sa_handler;
5239 rt_act->sa_mask = oact.sa_mask;
5240 rt_act->sa_flags = oact.sa_flags;
5241 unlock_user_struct(rt_act, arg3, 1);
5243 #else
5244 struct target_sigaction *act;
5245 struct target_sigaction *oact;
5247 if (arg2) {
5248 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5249 goto efault;
5250 } else
5251 act = NULL;
5252 if (arg3) {
5253 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5254 ret = -TARGET_EFAULT;
5255 goto rt_sigaction_fail;
5257 } else
5258 oact = NULL;
5259 ret = get_errno(do_sigaction(arg1, act, oact));
5260 rt_sigaction_fail:
5261 if (act)
5262 unlock_user_struct(act, arg2, 0);
5263 if (oact)
5264 unlock_user_struct(oact, arg3, 1);
5265 #endif
5267 break;
5268 #ifdef TARGET_NR_sgetmask /* not on alpha */
5269 case TARGET_NR_sgetmask:
5271 sigset_t cur_set;
5272 abi_ulong target_set;
5273 sigprocmask(0, NULL, &cur_set);
5274 host_to_target_old_sigset(&target_set, &cur_set);
5275 ret = target_set;
5277 break;
5278 #endif
5279 #ifdef TARGET_NR_ssetmask /* not on alpha */
5280 case TARGET_NR_ssetmask:
5282 sigset_t set, oset, cur_set;
5283 abi_ulong target_set = arg1;
5284 sigprocmask(0, NULL, &cur_set);
5285 target_to_host_old_sigset(&set, &target_set);
5286 sigorset(&set, &set, &cur_set);
5287 sigprocmask(SIG_SETMASK, &set, &oset);
5288 host_to_target_old_sigset(&target_set, &oset);
5289 ret = target_set;
5291 break;
5292 #endif
5293 #ifdef TARGET_NR_sigprocmask
5294 case TARGET_NR_sigprocmask:
5296 #if defined(TARGET_ALPHA)
5297 sigset_t set, oldset;
5298 abi_ulong mask;
5299 int how;
5301 switch (arg1) {
5302 case TARGET_SIG_BLOCK:
5303 how = SIG_BLOCK;
5304 break;
5305 case TARGET_SIG_UNBLOCK:
5306 how = SIG_UNBLOCK;
5307 break;
5308 case TARGET_SIG_SETMASK:
5309 how = SIG_SETMASK;
5310 break;
5311 default:
5312 ret = -TARGET_EINVAL;
5313 goto fail;
5315 mask = arg2;
5316 target_to_host_old_sigset(&set, &mask);
5318 ret = get_errno(sigprocmask(how, &set, &oldset));
5320 if (!is_error(ret)) {
5321 host_to_target_old_sigset(&mask, &oldset);
5322 ret = mask;
5323 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5325 #else
5326 sigset_t set, oldset, *set_ptr;
5327 int how;
5329 if (arg2) {
5330 switch (arg1) {
5331 case TARGET_SIG_BLOCK:
5332 how = SIG_BLOCK;
5333 break;
5334 case TARGET_SIG_UNBLOCK:
5335 how = SIG_UNBLOCK;
5336 break;
5337 case TARGET_SIG_SETMASK:
5338 how = SIG_SETMASK;
5339 break;
5340 default:
5341 ret = -TARGET_EINVAL;
5342 goto fail;
5344 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5345 goto efault;
5346 target_to_host_old_sigset(&set, p);
5347 unlock_user(p, arg2, 0);
5348 set_ptr = &set;
5349 } else {
5350 how = 0;
5351 set_ptr = NULL;
5353 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5354 if (!is_error(ret) && arg3) {
5355 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5356 goto efault;
5357 host_to_target_old_sigset(p, &oldset);
5358 unlock_user(p, arg3, sizeof(target_sigset_t));
5360 #endif
5362 break;
5363 #endif
5364 case TARGET_NR_rt_sigprocmask:
5366 int how = arg1;
5367 sigset_t set, oldset, *set_ptr;
5369 if (arg2) {
5370 switch(how) {
5371 case TARGET_SIG_BLOCK:
5372 how = SIG_BLOCK;
5373 break;
5374 case TARGET_SIG_UNBLOCK:
5375 how = SIG_UNBLOCK;
5376 break;
5377 case TARGET_SIG_SETMASK:
5378 how = SIG_SETMASK;
5379 break;
5380 default:
5381 ret = -TARGET_EINVAL;
5382 goto fail;
5384 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5385 goto efault;
5386 target_to_host_sigset(&set, p);
5387 unlock_user(p, arg2, 0);
5388 set_ptr = &set;
5389 } else {
5390 how = 0;
5391 set_ptr = NULL;
5393 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5394 if (!is_error(ret) && arg3) {
5395 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5396 goto efault;
5397 host_to_target_sigset(p, &oldset);
5398 unlock_user(p, arg3, sizeof(target_sigset_t));
5401 break;
5402 #ifdef TARGET_NR_sigpending
5403 case TARGET_NR_sigpending:
5405 sigset_t set;
5406 ret = get_errno(sigpending(&set));
5407 if (!is_error(ret)) {
5408 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5409 goto efault;
5410 host_to_target_old_sigset(p, &set);
5411 unlock_user(p, arg1, sizeof(target_sigset_t));
5414 break;
5415 #endif
5416 case TARGET_NR_rt_sigpending:
5418 sigset_t set;
5419 ret = get_errno(sigpending(&set));
5420 if (!is_error(ret)) {
5421 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5422 goto efault;
5423 host_to_target_sigset(p, &set);
5424 unlock_user(p, arg1, sizeof(target_sigset_t));
5427 break;
5428 #ifdef TARGET_NR_sigsuspend
5429 case TARGET_NR_sigsuspend:
5431 sigset_t set;
5432 #if defined(TARGET_ALPHA)
5433 abi_ulong mask = arg1;
5434 target_to_host_old_sigset(&set, &mask);
5435 #else
5436 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5437 goto efault;
5438 target_to_host_old_sigset(&set, p);
5439 unlock_user(p, arg1, 0);
5440 #endif
5441 ret = get_errno(sigsuspend(&set));
5443 break;
5444 #endif
5445 case TARGET_NR_rt_sigsuspend:
5447 sigset_t set;
5448 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5449 goto efault;
5450 target_to_host_sigset(&set, p);
5451 unlock_user(p, arg1, 0);
5452 ret = get_errno(sigsuspend(&set));
5454 break;
5455 case TARGET_NR_rt_sigtimedwait:
5457 sigset_t set;
5458 struct timespec uts, *puts;
5459 siginfo_t uinfo;
5461 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5462 goto efault;
5463 target_to_host_sigset(&set, p);
5464 unlock_user(p, arg1, 0);
5465 if (arg3) {
5466 puts = &uts;
5467 target_to_host_timespec(puts, arg3);
5468 } else {
5469 puts = NULL;
5471 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5472 if (!is_error(ret) && arg2) {
5473 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5474 goto efault;
5475 host_to_target_siginfo(p, &uinfo);
5476 unlock_user(p, arg2, sizeof(target_siginfo_t));
5479 break;
5480 case TARGET_NR_rt_sigqueueinfo:
5482 siginfo_t uinfo;
5483 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5484 goto efault;
5485 target_to_host_siginfo(&uinfo, p);
5486 unlock_user(p, arg1, 0);
5487 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5489 break;
5490 #ifdef TARGET_NR_sigreturn
5491 case TARGET_NR_sigreturn:
5492 /* NOTE: ret is eax, so not transcoding must be done */
5493 ret = do_sigreturn(cpu_env);
5494 break;
5495 #endif
5496 case TARGET_NR_rt_sigreturn:
5497 /* NOTE: ret is eax, so not transcoding must be done */
5498 ret = do_rt_sigreturn(cpu_env);
5499 break;
5500 case TARGET_NR_sethostname:
5501 if (!(p = lock_user_string(arg1)))
5502 goto efault;
5503 ret = get_errno(sethostname(p, arg2));
5504 unlock_user(p, arg1, 0);
5505 break;
5506 case TARGET_NR_setrlimit:
5508 int resource = arg1;
5509 struct target_rlimit *target_rlim;
5510 struct rlimit rlim;
5511 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5512 goto efault;
5513 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5514 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5515 unlock_user_struct(target_rlim, arg2, 0);
5516 ret = get_errno(setrlimit(resource, &rlim));
5518 break;
5519 case TARGET_NR_getrlimit:
5521 int resource = arg1;
5522 struct target_rlimit *target_rlim;
5523 struct rlimit rlim;
5525 ret = get_errno(getrlimit(resource, &rlim));
5526 if (!is_error(ret)) {
5527 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5528 goto efault;
5529 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5530 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5531 unlock_user_struct(target_rlim, arg2, 1);
5534 break;
5535 case TARGET_NR_getrusage:
5537 struct rusage rusage;
5538 ret = get_errno(getrusage(arg1, &rusage));
5539 if (!is_error(ret)) {
5540 host_to_target_rusage(arg2, &rusage);
5543 break;
5544 case TARGET_NR_gettimeofday:
5546 struct timeval tv;
5547 ret = get_errno(gettimeofday(&tv, NULL));
5548 if (!is_error(ret)) {
5549 if (copy_to_user_timeval(arg1, &tv))
5550 goto efault;
5553 break;
5554 case TARGET_NR_settimeofday:
5556 struct timeval tv;
5557 if (copy_from_user_timeval(&tv, arg1))
5558 goto efault;
5559 ret = get_errno(settimeofday(&tv, NULL));
5561 break;
5562 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5563 case TARGET_NR_select:
5565 struct target_sel_arg_struct *sel;
5566 abi_ulong inp, outp, exp, tvp;
5567 long nsel;
5569 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5570 goto efault;
5571 nsel = tswapl(sel->n);
5572 inp = tswapl(sel->inp);
5573 outp = tswapl(sel->outp);
5574 exp = tswapl(sel->exp);
5575 tvp = tswapl(sel->tvp);
5576 unlock_user_struct(sel, arg1, 0);
5577 ret = do_select(nsel, inp, outp, exp, tvp);
5579 break;
5580 #endif
5581 #ifdef TARGET_NR_pselect6
5582 case TARGET_NR_pselect6:
5583 goto unimplemented_nowarn;
5584 #endif
5585 case TARGET_NR_symlink:
5587 void *p2;
5588 p = lock_user_string(arg1);
5589 p2 = lock_user_string(arg2);
5590 if (!p || !p2)
5591 ret = -TARGET_EFAULT;
5592 else
5593 ret = get_errno(symlink(p, p2));
5594 unlock_user(p2, arg2, 0);
5595 unlock_user(p, arg1, 0);
5597 break;
5598 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5599 case TARGET_NR_symlinkat:
5601 void *p2;
5602 p = lock_user_string(arg1);
5603 p2 = lock_user_string(arg3);
5604 if (!p || !p2)
5605 ret = -TARGET_EFAULT;
5606 else
5607 ret = get_errno(sys_symlinkat(p, arg2, p2));
5608 unlock_user(p2, arg3, 0);
5609 unlock_user(p, arg1, 0);
5611 break;
5612 #endif
5613 #ifdef TARGET_NR_oldlstat
5614 case TARGET_NR_oldlstat:
5615 goto unimplemented;
5616 #endif
5617 case TARGET_NR_readlink:
5619 void *p2, *temp;
5620 p = lock_user_string(arg1);
5621 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5622 if (!p || !p2)
5623 ret = -TARGET_EFAULT;
5624 else {
5625 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5626 char real[PATH_MAX];
5627 temp = realpath(exec_path,real);
5628 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5629 snprintf((char *)p2, arg3, "%s", real);
5631 else
5632 ret = get_errno(readlink(path(p), p2, arg3));
5634 unlock_user(p2, arg2, ret);
5635 unlock_user(p, arg1, 0);
5637 break;
5638 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5639 case TARGET_NR_readlinkat:
5641 void *p2;
5642 p = lock_user_string(arg2);
5643 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5644 if (!p || !p2)
5645 ret = -TARGET_EFAULT;
5646 else
5647 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5648 unlock_user(p2, arg3, ret);
5649 unlock_user(p, arg2, 0);
5651 break;
5652 #endif
5653 #ifdef TARGET_NR_uselib
5654 case TARGET_NR_uselib:
5655 goto unimplemented;
5656 #endif
5657 #ifdef TARGET_NR_swapon
5658 case TARGET_NR_swapon:
5659 if (!(p = lock_user_string(arg1)))
5660 goto efault;
5661 ret = get_errno(swapon(p, arg2));
5662 unlock_user(p, arg1, 0);
5663 break;
5664 #endif
5665 case TARGET_NR_reboot:
5666 goto unimplemented;
5667 #ifdef TARGET_NR_readdir
5668 case TARGET_NR_readdir:
5669 goto unimplemented;
5670 #endif
5671 #ifdef TARGET_NR_mmap
5672 case TARGET_NR_mmap:
5673 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5674 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5675 || defined(TARGET_S390X)
5677 abi_ulong *v;
5678 abi_ulong v1, v2, v3, v4, v5, v6;
5679 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5680 goto efault;
5681 v1 = tswapl(v[0]);
5682 v2 = tswapl(v[1]);
5683 v3 = tswapl(v[2]);
5684 v4 = tswapl(v[3]);
5685 v5 = tswapl(v[4]);
5686 v6 = tswapl(v[5]);
5687 unlock_user(v, arg1, 0);
5688 ret = get_errno(target_mmap(v1, v2, v3,
5689 target_to_host_bitmask(v4, mmap_flags_tbl),
5690 v5, v6));
5692 #else
5693 ret = get_errno(target_mmap(arg1, arg2, arg3,
5694 target_to_host_bitmask(arg4, mmap_flags_tbl),
5695 arg5,
5696 arg6));
5697 #endif
5698 break;
5699 #endif
5700 #ifdef TARGET_NR_mmap2
5701 case TARGET_NR_mmap2:
5702 #ifndef MMAP_SHIFT
5703 #define MMAP_SHIFT 12
5704 #endif
5705 ret = get_errno(target_mmap(arg1, arg2, arg3,
5706 target_to_host_bitmask(arg4, mmap_flags_tbl),
5707 arg5,
5708 arg6 << MMAP_SHIFT));
5709 break;
5710 #endif
5711 case TARGET_NR_munmap:
5712 ret = get_errno(target_munmap(arg1, arg2));
5713 break;
5714 case TARGET_NR_mprotect:
5716 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5717 /* Special hack to detect libc making the stack executable. */
5718 if ((arg3 & PROT_GROWSDOWN)
5719 && arg1 >= ts->info->stack_limit
5720 && arg1 <= ts->info->start_stack) {
5721 arg3 &= ~PROT_GROWSDOWN;
5722 arg2 = arg2 + arg1 - ts->info->stack_limit;
5723 arg1 = ts->info->stack_limit;
5726 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5727 break;
5728 #ifdef TARGET_NR_mremap
5729 case TARGET_NR_mremap:
5730 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5731 break;
5732 #endif
5733 /* ??? msync/mlock/munlock are broken for softmmu. */
5734 #ifdef TARGET_NR_msync
5735 case TARGET_NR_msync:
5736 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5737 break;
5738 #endif
5739 #ifdef TARGET_NR_mlock
5740 case TARGET_NR_mlock:
5741 ret = get_errno(mlock(g2h(arg1), arg2));
5742 break;
5743 #endif
5744 #ifdef TARGET_NR_munlock
5745 case TARGET_NR_munlock:
5746 ret = get_errno(munlock(g2h(arg1), arg2));
5747 break;
5748 #endif
5749 #ifdef TARGET_NR_mlockall
5750 case TARGET_NR_mlockall:
5751 ret = get_errno(mlockall(arg1));
5752 break;
5753 #endif
5754 #ifdef TARGET_NR_munlockall
5755 case TARGET_NR_munlockall:
5756 ret = get_errno(munlockall());
5757 break;
5758 #endif
5759 case TARGET_NR_truncate:
5760 if (!(p = lock_user_string(arg1)))
5761 goto efault;
5762 ret = get_errno(truncate(p, arg2));
5763 unlock_user(p, arg1, 0);
5764 break;
5765 case TARGET_NR_ftruncate:
5766 ret = get_errno(ftruncate(arg1, arg2));
5767 break;
5768 case TARGET_NR_fchmod:
5769 ret = get_errno(fchmod(arg1, arg2));
5770 break;
5771 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5772 case TARGET_NR_fchmodat:
5773 if (!(p = lock_user_string(arg2)))
5774 goto efault;
5775 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5776 unlock_user(p, arg2, 0);
5777 break;
5778 #endif
5779 case TARGET_NR_getpriority:
5780 /* libc does special remapping of the return value of
5781 * sys_getpriority() so it's just easiest to call
5782 * sys_getpriority() directly rather than through libc. */
5783 ret = get_errno(sys_getpriority(arg1, arg2));
5784 break;
5785 case TARGET_NR_setpriority:
5786 ret = get_errno(setpriority(arg1, arg2, arg3));
5787 break;
5788 #ifdef TARGET_NR_profil
5789 case TARGET_NR_profil:
5790 goto unimplemented;
5791 #endif
5792 case TARGET_NR_statfs:
5793 if (!(p = lock_user_string(arg1)))
5794 goto efault;
5795 ret = get_errno(statfs(path(p), &stfs));
5796 unlock_user(p, arg1, 0);
5797 convert_statfs:
5798 if (!is_error(ret)) {
5799 struct target_statfs *target_stfs;
5801 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5802 goto efault;
5803 __put_user(stfs.f_type, &target_stfs->f_type);
5804 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5805 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5806 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5807 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5808 __put_user(stfs.f_files, &target_stfs->f_files);
5809 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5810 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5811 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5812 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5813 unlock_user_struct(target_stfs, arg2, 1);
5815 break;
5816 case TARGET_NR_fstatfs:
5817 ret = get_errno(fstatfs(arg1, &stfs));
5818 goto convert_statfs;
5819 #ifdef TARGET_NR_statfs64
5820 case TARGET_NR_statfs64:
5821 if (!(p = lock_user_string(arg1)))
5822 goto efault;
5823 ret = get_errno(statfs(path(p), &stfs));
5824 unlock_user(p, arg1, 0);
5825 convert_statfs64:
5826 if (!is_error(ret)) {
5827 struct target_statfs64 *target_stfs;
5829 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5830 goto efault;
5831 __put_user(stfs.f_type, &target_stfs->f_type);
5832 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5833 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5834 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5835 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5836 __put_user(stfs.f_files, &target_stfs->f_files);
5837 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5838 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5839 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5840 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5841 unlock_user_struct(target_stfs, arg3, 1);
5843 break;
5844 case TARGET_NR_fstatfs64:
5845 ret = get_errno(fstatfs(arg1, &stfs));
5846 goto convert_statfs64;
5847 #endif
5848 #ifdef TARGET_NR_ioperm
5849 case TARGET_NR_ioperm:
5850 goto unimplemented;
5851 #endif
5852 #ifdef TARGET_NR_socketcall
5853 case TARGET_NR_socketcall:
5854 ret = do_socketcall(arg1, arg2);
5855 break;
5856 #endif
5857 #ifdef TARGET_NR_accept
5858 case TARGET_NR_accept:
5859 ret = do_accept(arg1, arg2, arg3);
5860 break;
5861 #endif
5862 #ifdef TARGET_NR_bind
5863 case TARGET_NR_bind:
5864 ret = do_bind(arg1, arg2, arg3);
5865 break;
5866 #endif
5867 #ifdef TARGET_NR_connect
5868 case TARGET_NR_connect:
5869 ret = do_connect(arg1, arg2, arg3);
5870 break;
5871 #endif
5872 #ifdef TARGET_NR_getpeername
5873 case TARGET_NR_getpeername:
5874 ret = do_getpeername(arg1, arg2, arg3);
5875 break;
5876 #endif
5877 #ifdef TARGET_NR_getsockname
5878 case TARGET_NR_getsockname:
5879 ret = do_getsockname(arg1, arg2, arg3);
5880 break;
5881 #endif
5882 #ifdef TARGET_NR_getsockopt
5883 case TARGET_NR_getsockopt:
5884 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5885 break;
5886 #endif
5887 #ifdef TARGET_NR_listen
5888 case TARGET_NR_listen:
5889 ret = get_errno(listen(arg1, arg2));
5890 break;
5891 #endif
5892 #ifdef TARGET_NR_recv
5893 case TARGET_NR_recv:
5894 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5895 break;
5896 #endif
5897 #ifdef TARGET_NR_recvfrom
5898 case TARGET_NR_recvfrom:
5899 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5900 break;
5901 #endif
5902 #ifdef TARGET_NR_recvmsg
5903 case TARGET_NR_recvmsg:
5904 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5905 break;
5906 #endif
5907 #ifdef TARGET_NR_send
5908 case TARGET_NR_send:
5909 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5910 break;
5911 #endif
5912 #ifdef TARGET_NR_sendmsg
5913 case TARGET_NR_sendmsg:
5914 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5915 break;
5916 #endif
5917 #ifdef TARGET_NR_sendto
5918 case TARGET_NR_sendto:
5919 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5920 break;
5921 #endif
5922 #ifdef TARGET_NR_shutdown
5923 case TARGET_NR_shutdown:
5924 ret = get_errno(shutdown(arg1, arg2));
5925 break;
5926 #endif
5927 #ifdef TARGET_NR_socket
5928 case TARGET_NR_socket:
5929 ret = do_socket(arg1, arg2, arg3);
5930 break;
5931 #endif
5932 #ifdef TARGET_NR_socketpair
5933 case TARGET_NR_socketpair:
5934 ret = do_socketpair(arg1, arg2, arg3, arg4);
5935 break;
5936 #endif
5937 #ifdef TARGET_NR_setsockopt
5938 case TARGET_NR_setsockopt:
5939 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5940 break;
5941 #endif
5943 case TARGET_NR_syslog:
5944 if (!(p = lock_user_string(arg2)))
5945 goto efault;
5946 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5947 unlock_user(p, arg2, 0);
5948 break;
5950 case TARGET_NR_setitimer:
5952 struct itimerval value, ovalue, *pvalue;
5954 if (arg2) {
5955 pvalue = &value;
5956 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5957 || copy_from_user_timeval(&pvalue->it_value,
5958 arg2 + sizeof(struct target_timeval)))
5959 goto efault;
5960 } else {
5961 pvalue = NULL;
5963 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5964 if (!is_error(ret) && arg3) {
5965 if (copy_to_user_timeval(arg3,
5966 &ovalue.it_interval)
5967 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5968 &ovalue.it_value))
5969 goto efault;
5972 break;
5973 case TARGET_NR_getitimer:
5975 struct itimerval value;
5977 ret = get_errno(getitimer(arg1, &value));
5978 if (!is_error(ret) && arg2) {
5979 if (copy_to_user_timeval(arg2,
5980 &value.it_interval)
5981 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5982 &value.it_value))
5983 goto efault;
5986 break;
5987 case TARGET_NR_stat:
5988 if (!(p = lock_user_string(arg1)))
5989 goto efault;
5990 ret = get_errno(stat(path(p), &st));
5991 unlock_user(p, arg1, 0);
5992 goto do_stat;
5993 case TARGET_NR_lstat:
5994 if (!(p = lock_user_string(arg1)))
5995 goto efault;
5996 ret = get_errno(lstat(path(p), &st));
5997 unlock_user(p, arg1, 0);
5998 goto do_stat;
5999 case TARGET_NR_fstat:
6001 ret = get_errno(fstat(arg1, &st));
6002 do_stat:
6003 if (!is_error(ret)) {
6004 struct target_stat *target_st;
6006 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6007 goto efault;
6008 memset(target_st, 0, sizeof(*target_st));
6009 __put_user(st.st_dev, &target_st->st_dev);
6010 __put_user(st.st_ino, &target_st->st_ino);
6011 __put_user(st.st_mode, &target_st->st_mode);
6012 __put_user(st.st_uid, &target_st->st_uid);
6013 __put_user(st.st_gid, &target_st->st_gid);
6014 __put_user(st.st_nlink, &target_st->st_nlink);
6015 __put_user(st.st_rdev, &target_st->st_rdev);
6016 __put_user(st.st_size, &target_st->st_size);
6017 __put_user(st.st_blksize, &target_st->st_blksize);
6018 __put_user(st.st_blocks, &target_st->st_blocks);
6019 __put_user(st.st_atime, &target_st->target_st_atime);
6020 __put_user(st.st_mtime, &target_st->target_st_mtime);
6021 __put_user(st.st_ctime, &target_st->target_st_ctime);
6022 unlock_user_struct(target_st, arg2, 1);
6025 break;
6026 #ifdef TARGET_NR_olduname
6027 case TARGET_NR_olduname:
6028 goto unimplemented;
6029 #endif
6030 #ifdef TARGET_NR_iopl
6031 case TARGET_NR_iopl:
6032 goto unimplemented;
6033 #endif
6034 case TARGET_NR_vhangup:
6035 ret = get_errno(vhangup());
6036 break;
6037 #ifdef TARGET_NR_idle
6038 case TARGET_NR_idle:
6039 goto unimplemented;
6040 #endif
6041 #ifdef TARGET_NR_syscall
6042 case TARGET_NR_syscall:
6043 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
6044 break;
6045 #endif
6046 case TARGET_NR_wait4:
6048 int status;
6049 abi_long status_ptr = arg2;
6050 struct rusage rusage, *rusage_ptr;
6051 abi_ulong target_rusage = arg4;
6052 if (target_rusage)
6053 rusage_ptr = &rusage;
6054 else
6055 rusage_ptr = NULL;
6056 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6057 if (!is_error(ret)) {
6058 if (status_ptr) {
6059 status = host_to_target_waitstatus(status);
6060 if (put_user_s32(status, status_ptr))
6061 goto efault;
6063 if (target_rusage)
6064 host_to_target_rusage(target_rusage, &rusage);
6067 break;
6068 #ifdef TARGET_NR_swapoff
6069 case TARGET_NR_swapoff:
6070 if (!(p = lock_user_string(arg1)))
6071 goto efault;
6072 ret = get_errno(swapoff(p));
6073 unlock_user(p, arg1, 0);
6074 break;
6075 #endif
6076 case TARGET_NR_sysinfo:
6078 struct target_sysinfo *target_value;
6079 struct sysinfo value;
6080 ret = get_errno(sysinfo(&value));
6081 if (!is_error(ret) && arg1)
6083 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6084 goto efault;
6085 __put_user(value.uptime, &target_value->uptime);
6086 __put_user(value.loads[0], &target_value->loads[0]);
6087 __put_user(value.loads[1], &target_value->loads[1]);
6088 __put_user(value.loads[2], &target_value->loads[2]);
6089 __put_user(value.totalram, &target_value->totalram);
6090 __put_user(value.freeram, &target_value->freeram);
6091 __put_user(value.sharedram, &target_value->sharedram);
6092 __put_user(value.bufferram, &target_value->bufferram);
6093 __put_user(value.totalswap, &target_value->totalswap);
6094 __put_user(value.freeswap, &target_value->freeswap);
6095 __put_user(value.procs, &target_value->procs);
6096 __put_user(value.totalhigh, &target_value->totalhigh);
6097 __put_user(value.freehigh, &target_value->freehigh);
6098 __put_user(value.mem_unit, &target_value->mem_unit);
6099 unlock_user_struct(target_value, arg1, 1);
6102 break;
6103 #ifdef TARGET_NR_ipc
6104 case TARGET_NR_ipc:
6105 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6106 break;
6107 #endif
6108 #ifdef TARGET_NR_semget
6109 case TARGET_NR_semget:
6110 ret = get_errno(semget(arg1, arg2, arg3));
6111 break;
6112 #endif
6113 #ifdef TARGET_NR_semop
6114 case TARGET_NR_semop:
6115 ret = get_errno(do_semop(arg1, arg2, arg3));
6116 break;
6117 #endif
6118 #ifdef TARGET_NR_semctl
6119 case TARGET_NR_semctl:
6120 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6121 break;
6122 #endif
6123 #ifdef TARGET_NR_msgctl
6124 case TARGET_NR_msgctl:
6125 ret = do_msgctl(arg1, arg2, arg3);
6126 break;
6127 #endif
6128 #ifdef TARGET_NR_msgget
6129 case TARGET_NR_msgget:
6130 ret = get_errno(msgget(arg1, arg2));
6131 break;
6132 #endif
6133 #ifdef TARGET_NR_msgrcv
6134 case TARGET_NR_msgrcv:
6135 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6136 break;
6137 #endif
6138 #ifdef TARGET_NR_msgsnd
6139 case TARGET_NR_msgsnd:
6140 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6141 break;
6142 #endif
6143 #ifdef TARGET_NR_shmget
6144 case TARGET_NR_shmget:
6145 ret = get_errno(shmget(arg1, arg2, arg3));
6146 break;
6147 #endif
6148 #ifdef TARGET_NR_shmctl
6149 case TARGET_NR_shmctl:
6150 ret = do_shmctl(arg1, arg2, arg3);
6151 break;
6152 #endif
6153 #ifdef TARGET_NR_shmat
6154 case TARGET_NR_shmat:
6155 ret = do_shmat(arg1, arg2, arg3);
6156 break;
6157 #endif
6158 #ifdef TARGET_NR_shmdt
6159 case TARGET_NR_shmdt:
6160 ret = do_shmdt(arg1);
6161 break;
6162 #endif
6163 case TARGET_NR_fsync:
6164 ret = get_errno(fsync(arg1));
6165 break;
6166 case TARGET_NR_clone:
6167 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6168 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6169 #elif defined(TARGET_CRIS)
6170 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6171 #elif defined(TARGET_S390X)
6172 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6173 #else
6174 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6175 #endif
6176 break;
6177 #ifdef __NR_exit_group
6178 /* new thread calls */
6179 case TARGET_NR_exit_group:
6180 #ifdef TARGET_GPROF
6181 _mcleanup();
6182 #endif
6183 gdb_exit(cpu_env, arg1);
6184 ret = get_errno(exit_group(arg1));
6185 break;
6186 #endif
6187 case TARGET_NR_setdomainname:
6188 if (!(p = lock_user_string(arg1)))
6189 goto efault;
6190 ret = get_errno(setdomainname(p, arg2));
6191 unlock_user(p, arg1, 0);
6192 break;
6193 case TARGET_NR_uname:
6194 /* no need to transcode because we use the linux syscall */
6196 struct new_utsname * buf;
6198 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6199 goto efault;
6200 ret = get_errno(sys_uname(buf));
6201 if (!is_error(ret)) {
6202 /* Overrite the native machine name with whatever is being
6203 emulated. */
6204 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6205 /* Allow the user to override the reported release. */
6206 if (qemu_uname_release && *qemu_uname_release)
6207 strcpy (buf->release, qemu_uname_release);
6209 unlock_user_struct(buf, arg1, 1);
6211 break;
6212 #ifdef TARGET_I386
6213 case TARGET_NR_modify_ldt:
6214 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6215 break;
6216 #if !defined(TARGET_X86_64)
6217 case TARGET_NR_vm86old:
6218 goto unimplemented;
6219 case TARGET_NR_vm86:
6220 ret = do_vm86(cpu_env, arg1, arg2);
6221 break;
6222 #endif
6223 #endif
6224 case TARGET_NR_adjtimex:
6225 goto unimplemented;
6226 #ifdef TARGET_NR_create_module
6227 case TARGET_NR_create_module:
6228 #endif
6229 case TARGET_NR_init_module:
6230 case TARGET_NR_delete_module:
6231 #ifdef TARGET_NR_get_kernel_syms
6232 case TARGET_NR_get_kernel_syms:
6233 #endif
6234 goto unimplemented;
6235 case TARGET_NR_quotactl:
6236 goto unimplemented;
6237 case TARGET_NR_getpgid:
6238 ret = get_errno(getpgid(arg1));
6239 break;
6240 case TARGET_NR_fchdir:
6241 ret = get_errno(fchdir(arg1));
6242 break;
6243 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6244 case TARGET_NR_bdflush:
6245 goto unimplemented;
6246 #endif
6247 #ifdef TARGET_NR_sysfs
6248 case TARGET_NR_sysfs:
6249 goto unimplemented;
6250 #endif
6251 case TARGET_NR_personality:
6252 ret = get_errno(personality(arg1));
6253 break;
6254 #ifdef TARGET_NR_afs_syscall
6255 case TARGET_NR_afs_syscall:
6256 goto unimplemented;
6257 #endif
6258 #ifdef TARGET_NR__llseek /* Not on alpha */
6259 case TARGET_NR__llseek:
6261 int64_t res;
6262 #if !defined(__NR_llseek)
6263 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6264 if (res == -1) {
6265 ret = get_errno(res);
6266 } else {
6267 ret = 0;
6269 #else
6270 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6271 #endif
6272 if ((ret == 0) && put_user_s64(res, arg4)) {
6273 goto efault;
6276 break;
6277 #endif
6278 case TARGET_NR_getdents:
6279 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6281 struct target_dirent *target_dirp;
6282 struct linux_dirent *dirp;
6283 abi_long count = arg3;
6285 dirp = malloc(count);
6286 if (!dirp) {
6287 ret = -TARGET_ENOMEM;
6288 goto fail;
6291 ret = get_errno(sys_getdents(arg1, dirp, count));
6292 if (!is_error(ret)) {
6293 struct linux_dirent *de;
6294 struct target_dirent *tde;
6295 int len = ret;
6296 int reclen, treclen;
6297 int count1, tnamelen;
6299 count1 = 0;
6300 de = dirp;
6301 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6302 goto efault;
6303 tde = target_dirp;
6304 while (len > 0) {
6305 reclen = de->d_reclen;
6306 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6307 tde->d_reclen = tswap16(treclen);
6308 tde->d_ino = tswapl(de->d_ino);
6309 tde->d_off = tswapl(de->d_off);
6310 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6311 if (tnamelen > 256)
6312 tnamelen = 256;
6313 /* XXX: may not be correct */
6314 pstrcpy(tde->d_name, tnamelen, de->d_name);
6315 de = (struct linux_dirent *)((char *)de + reclen);
6316 len -= reclen;
6317 tde = (struct target_dirent *)((char *)tde + treclen);
6318 count1 += treclen;
6320 ret = count1;
6321 unlock_user(target_dirp, arg2, ret);
6323 free(dirp);
6325 #else
6327 struct linux_dirent *dirp;
6328 abi_long count = arg3;
6330 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6331 goto efault;
6332 ret = get_errno(sys_getdents(arg1, dirp, count));
6333 if (!is_error(ret)) {
6334 struct linux_dirent *de;
6335 int len = ret;
6336 int reclen;
6337 de = dirp;
6338 while (len > 0) {
6339 reclen = de->d_reclen;
6340 if (reclen > len)
6341 break;
6342 de->d_reclen = tswap16(reclen);
6343 tswapls(&de->d_ino);
6344 tswapls(&de->d_off);
6345 de = (struct linux_dirent *)((char *)de + reclen);
6346 len -= reclen;
6349 unlock_user(dirp, arg2, ret);
6351 #endif
6352 break;
6353 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6354 case TARGET_NR_getdents64:
6356 struct linux_dirent64 *dirp;
6357 abi_long count = arg3;
6358 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6359 goto efault;
6360 ret = get_errno(sys_getdents64(arg1, dirp, count));
6361 if (!is_error(ret)) {
6362 struct linux_dirent64 *de;
6363 int len = ret;
6364 int reclen;
6365 de = dirp;
6366 while (len > 0) {
6367 reclen = de->d_reclen;
6368 if (reclen > len)
6369 break;
6370 de->d_reclen = tswap16(reclen);
6371 tswap64s((uint64_t *)&de->d_ino);
6372 tswap64s((uint64_t *)&de->d_off);
6373 de = (struct linux_dirent64 *)((char *)de + reclen);
6374 len -= reclen;
6377 unlock_user(dirp, arg2, ret);
6379 break;
6380 #endif /* TARGET_NR_getdents64 */
6381 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6382 #ifdef TARGET_S390X
6383 case TARGET_NR_select:
6384 #else
6385 case TARGET_NR__newselect:
6386 #endif
6387 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6388 break;
6389 #endif
6390 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6391 # ifdef TARGET_NR_poll
6392 case TARGET_NR_poll:
6393 # endif
6394 # ifdef TARGET_NR_ppoll
6395 case TARGET_NR_ppoll:
6396 # endif
6398 struct target_pollfd *target_pfd;
6399 unsigned int nfds = arg2;
6400 int timeout = arg3;
6401 struct pollfd *pfd;
6402 unsigned int i;
6404 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6405 if (!target_pfd)
6406 goto efault;
6408 pfd = alloca(sizeof(struct pollfd) * nfds);
6409 for(i = 0; i < nfds; i++) {
6410 pfd[i].fd = tswap32(target_pfd[i].fd);
6411 pfd[i].events = tswap16(target_pfd[i].events);
6414 # ifdef TARGET_NR_ppoll
6415 if (num == TARGET_NR_ppoll) {
6416 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6417 target_sigset_t *target_set;
6418 sigset_t _set, *set = &_set;
6420 if (arg3) {
6421 if (target_to_host_timespec(timeout_ts, arg3)) {
6422 unlock_user(target_pfd, arg1, 0);
6423 goto efault;
6425 } else {
6426 timeout_ts = NULL;
6429 if (arg4) {
6430 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6431 if (!target_set) {
6432 unlock_user(target_pfd, arg1, 0);
6433 goto efault;
6435 target_to_host_sigset(set, target_set);
6436 } else {
6437 set = NULL;
6440 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6442 if (!is_error(ret) && arg3) {
6443 host_to_target_timespec(arg3, timeout_ts);
6445 if (arg4) {
6446 unlock_user(target_set, arg4, 0);
6448 } else
6449 # endif
6450 ret = get_errno(poll(pfd, nfds, timeout));
6452 if (!is_error(ret)) {
6453 for(i = 0; i < nfds; i++) {
6454 target_pfd[i].revents = tswap16(pfd[i].revents);
6457 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6459 break;
6460 #endif
6461 case TARGET_NR_flock:
6462 /* NOTE: the flock constant seems to be the same for every
6463 Linux platform */
6464 ret = get_errno(flock(arg1, arg2));
6465 break;
6466 case TARGET_NR_readv:
6468 int count = arg3;
6469 struct iovec *vec;
6471 vec = alloca(count * sizeof(struct iovec));
6472 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6473 goto efault;
6474 ret = get_errno(readv(arg1, vec, count));
6475 unlock_iovec(vec, arg2, count, 1);
6477 break;
6478 case TARGET_NR_writev:
6480 int count = arg3;
6481 struct iovec *vec;
6483 vec = alloca(count * sizeof(struct iovec));
6484 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6485 goto efault;
6486 ret = get_errno(writev(arg1, vec, count));
6487 unlock_iovec(vec, arg2, count, 0);
6489 break;
6490 case TARGET_NR_getsid:
6491 ret = get_errno(getsid(arg1));
6492 break;
6493 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6494 case TARGET_NR_fdatasync:
6495 ret = get_errno(fdatasync(arg1));
6496 break;
6497 #endif
6498 case TARGET_NR__sysctl:
6499 /* We don't implement this, but ENOTDIR is always a safe
6500 return value. */
6501 ret = -TARGET_ENOTDIR;
6502 break;
6503 case TARGET_NR_sched_getaffinity:
6505 unsigned int mask_size;
6506 unsigned long *mask;
6509 * sched_getaffinity needs multiples of ulong, so need to take
6510 * care of mismatches between target ulong and host ulong sizes.
6512 if (arg2 & (sizeof(abi_ulong) - 1)) {
6513 ret = -TARGET_EINVAL;
6514 break;
6516 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6518 mask = alloca(mask_size);
6519 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6521 if (!is_error(ret)) {
6522 if (copy_to_user(arg3, mask, ret)) {
6523 goto efault;
6527 break;
6528 case TARGET_NR_sched_setaffinity:
6530 unsigned int mask_size;
6531 unsigned long *mask;
6534 * sched_setaffinity needs multiples of ulong, so need to take
6535 * care of mismatches between target ulong and host ulong sizes.
6537 if (arg2 & (sizeof(abi_ulong) - 1)) {
6538 ret = -TARGET_EINVAL;
6539 break;
6541 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6543 mask = alloca(mask_size);
6544 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6545 goto efault;
6547 memcpy(mask, p, arg2);
6548 unlock_user_struct(p, arg2, 0);
6550 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6552 break;
6553 case TARGET_NR_sched_setparam:
6555 struct sched_param *target_schp;
6556 struct sched_param schp;
6558 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6559 goto efault;
6560 schp.sched_priority = tswap32(target_schp->sched_priority);
6561 unlock_user_struct(target_schp, arg2, 0);
6562 ret = get_errno(sched_setparam(arg1, &schp));
6564 break;
6565 case TARGET_NR_sched_getparam:
6567 struct sched_param *target_schp;
6568 struct sched_param schp;
6569 ret = get_errno(sched_getparam(arg1, &schp));
6570 if (!is_error(ret)) {
6571 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6572 goto efault;
6573 target_schp->sched_priority = tswap32(schp.sched_priority);
6574 unlock_user_struct(target_schp, arg2, 1);
6577 break;
6578 case TARGET_NR_sched_setscheduler:
6580 struct sched_param *target_schp;
6581 struct sched_param schp;
6582 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6583 goto efault;
6584 schp.sched_priority = tswap32(target_schp->sched_priority);
6585 unlock_user_struct(target_schp, arg3, 0);
6586 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6588 break;
6589 case TARGET_NR_sched_getscheduler:
6590 ret = get_errno(sched_getscheduler(arg1));
6591 break;
6592 case TARGET_NR_sched_yield:
6593 ret = get_errno(sched_yield());
6594 break;
6595 case TARGET_NR_sched_get_priority_max:
6596 ret = get_errno(sched_get_priority_max(arg1));
6597 break;
6598 case TARGET_NR_sched_get_priority_min:
6599 ret = get_errno(sched_get_priority_min(arg1));
6600 break;
6601 case TARGET_NR_sched_rr_get_interval:
6603 struct timespec ts;
6604 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6605 if (!is_error(ret)) {
6606 host_to_target_timespec(arg2, &ts);
6609 break;
6610 case TARGET_NR_nanosleep:
6612 struct timespec req, rem;
6613 target_to_host_timespec(&req, arg1);
6614 ret = get_errno(nanosleep(&req, &rem));
6615 if (is_error(ret) && arg2) {
6616 host_to_target_timespec(arg2, &rem);
6619 break;
6620 #ifdef TARGET_NR_query_module
6621 case TARGET_NR_query_module:
6622 goto unimplemented;
6623 #endif
6624 #ifdef TARGET_NR_nfsservctl
6625 case TARGET_NR_nfsservctl:
6626 goto unimplemented;
6627 #endif
6628 case TARGET_NR_prctl:
6629 switch (arg1)
6631 case PR_GET_PDEATHSIG:
6633 int deathsig;
6634 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6635 if (!is_error(ret) && arg2
6636 && put_user_ual(deathsig, arg2))
6637 goto efault;
6639 break;
6640 default:
6641 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6642 break;
6644 break;
6645 #ifdef TARGET_NR_arch_prctl
6646 case TARGET_NR_arch_prctl:
6647 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6648 ret = do_arch_prctl(cpu_env, arg1, arg2);
6649 break;
6650 #else
6651 goto unimplemented;
6652 #endif
6653 #endif
6654 #ifdef TARGET_NR_pread
6655 case TARGET_NR_pread:
6656 #ifdef TARGET_ARM
6657 if (((CPUARMState *)cpu_env)->eabi)
6658 arg4 = arg5;
6659 #endif
6660 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6661 goto efault;
6662 ret = get_errno(pread(arg1, p, arg3, arg4));
6663 unlock_user(p, arg2, ret);
6664 break;
6665 case TARGET_NR_pwrite:
6666 #ifdef TARGET_ARM
6667 if (((CPUARMState *)cpu_env)->eabi)
6668 arg4 = arg5;
6669 #endif
6670 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6671 goto efault;
6672 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6673 unlock_user(p, arg2, 0);
6674 break;
6675 #endif
6676 #ifdef TARGET_NR_pread64
6677 case TARGET_NR_pread64:
6678 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6679 goto efault;
6680 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6681 unlock_user(p, arg2, ret);
6682 break;
6683 case TARGET_NR_pwrite64:
6684 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6685 goto efault;
6686 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6687 unlock_user(p, arg2, 0);
6688 break;
6689 #endif
6690 case TARGET_NR_getcwd:
6691 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6692 goto efault;
6693 ret = get_errno(sys_getcwd1(p, arg2));
6694 unlock_user(p, arg1, ret);
6695 break;
6696 case TARGET_NR_capget:
6697 goto unimplemented;
6698 case TARGET_NR_capset:
6699 goto unimplemented;
6700 case TARGET_NR_sigaltstack:
6701 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6702 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6703 defined(TARGET_M68K) || defined(TARGET_S390X)
6704 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6705 break;
6706 #else
6707 goto unimplemented;
6708 #endif
6709 case TARGET_NR_sendfile:
6710 goto unimplemented;
6711 #ifdef TARGET_NR_getpmsg
6712 case TARGET_NR_getpmsg:
6713 goto unimplemented;
6714 #endif
6715 #ifdef TARGET_NR_putpmsg
6716 case TARGET_NR_putpmsg:
6717 goto unimplemented;
6718 #endif
6719 #ifdef TARGET_NR_vfork
6720 case TARGET_NR_vfork:
6721 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6722 0, 0, 0, 0));
6723 break;
6724 #endif
6725 #ifdef TARGET_NR_ugetrlimit
6726 case TARGET_NR_ugetrlimit:
6728 struct rlimit rlim;
6729 ret = get_errno(getrlimit(arg1, &rlim));
6730 if (!is_error(ret)) {
6731 struct target_rlimit *target_rlim;
6732 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6733 goto efault;
6734 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6735 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6736 unlock_user_struct(target_rlim, arg2, 1);
6738 break;
6740 #endif
6741 #ifdef TARGET_NR_truncate64
6742 case TARGET_NR_truncate64:
6743 if (!(p = lock_user_string(arg1)))
6744 goto efault;
6745 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6746 unlock_user(p, arg1, 0);
6747 break;
6748 #endif
6749 #ifdef TARGET_NR_ftruncate64
6750 case TARGET_NR_ftruncate64:
6751 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6752 break;
6753 #endif
6754 #ifdef TARGET_NR_stat64
6755 case TARGET_NR_stat64:
6756 if (!(p = lock_user_string(arg1)))
6757 goto efault;
6758 ret = get_errno(stat(path(p), &st));
6759 unlock_user(p, arg1, 0);
6760 if (!is_error(ret))
6761 ret = host_to_target_stat64(cpu_env, arg2, &st);
6762 break;
6763 #endif
6764 #ifdef TARGET_NR_lstat64
6765 case TARGET_NR_lstat64:
6766 if (!(p = lock_user_string(arg1)))
6767 goto efault;
6768 ret = get_errno(lstat(path(p), &st));
6769 unlock_user(p, arg1, 0);
6770 if (!is_error(ret))
6771 ret = host_to_target_stat64(cpu_env, arg2, &st);
6772 break;
6773 #endif
6774 #ifdef TARGET_NR_fstat64
6775 case TARGET_NR_fstat64:
6776 ret = get_errno(fstat(arg1, &st));
6777 if (!is_error(ret))
6778 ret = host_to_target_stat64(cpu_env, arg2, &st);
6779 break;
6780 #endif
6781 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6782 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6783 #ifdef TARGET_NR_fstatat64
6784 case TARGET_NR_fstatat64:
6785 #endif
6786 #ifdef TARGET_NR_newfstatat
6787 case TARGET_NR_newfstatat:
6788 #endif
6789 if (!(p = lock_user_string(arg2)))
6790 goto efault;
6791 #ifdef __NR_fstatat64
6792 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6793 #else
6794 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6795 #endif
6796 if (!is_error(ret))
6797 ret = host_to_target_stat64(cpu_env, arg3, &st);
6798 break;
6799 #endif
6800 case TARGET_NR_lchown:
6801 if (!(p = lock_user_string(arg1)))
6802 goto efault;
6803 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6804 unlock_user(p, arg1, 0);
6805 break;
6806 #ifdef TARGET_NR_getuid
6807 case TARGET_NR_getuid:
6808 ret = get_errno(high2lowuid(getuid()));
6809 break;
6810 #endif
6811 #ifdef TARGET_NR_getgid
6812 case TARGET_NR_getgid:
6813 ret = get_errno(high2lowgid(getgid()));
6814 break;
6815 #endif
6816 #ifdef TARGET_NR_geteuid
6817 case TARGET_NR_geteuid:
6818 ret = get_errno(high2lowuid(geteuid()));
6819 break;
6820 #endif
6821 #ifdef TARGET_NR_getegid
6822 case TARGET_NR_getegid:
6823 ret = get_errno(high2lowgid(getegid()));
6824 break;
6825 #endif
6826 case TARGET_NR_setreuid:
6827 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6828 break;
6829 case TARGET_NR_setregid:
6830 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6831 break;
6832 case TARGET_NR_getgroups:
6834 int gidsetsize = arg1;
6835 target_id *target_grouplist;
6836 gid_t *grouplist;
6837 int i;
6839 grouplist = alloca(gidsetsize * sizeof(gid_t));
6840 ret = get_errno(getgroups(gidsetsize, grouplist));
6841 if (gidsetsize == 0)
6842 break;
6843 if (!is_error(ret)) {
6844 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6845 if (!target_grouplist)
6846 goto efault;
6847 for(i = 0;i < ret; i++)
6848 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
6849 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6852 break;
6853 case TARGET_NR_setgroups:
6855 int gidsetsize = arg1;
6856 target_id *target_grouplist;
6857 gid_t *grouplist;
6858 int i;
6860 grouplist = alloca(gidsetsize * sizeof(gid_t));
6861 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6862 if (!target_grouplist) {
6863 ret = -TARGET_EFAULT;
6864 goto fail;
6866 for(i = 0;i < gidsetsize; i++)
6867 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
6868 unlock_user(target_grouplist, arg2, 0);
6869 ret = get_errno(setgroups(gidsetsize, grouplist));
6871 break;
6872 case TARGET_NR_fchown:
6873 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6874 break;
6875 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6876 case TARGET_NR_fchownat:
6877 if (!(p = lock_user_string(arg2)))
6878 goto efault;
6879 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6880 unlock_user(p, arg2, 0);
6881 break;
6882 #endif
6883 #ifdef TARGET_NR_setresuid
6884 case TARGET_NR_setresuid:
6885 ret = get_errno(setresuid(low2highuid(arg1),
6886 low2highuid(arg2),
6887 low2highuid(arg3)));
6888 break;
6889 #endif
6890 #ifdef TARGET_NR_getresuid
6891 case TARGET_NR_getresuid:
6893 uid_t ruid, euid, suid;
6894 ret = get_errno(getresuid(&ruid, &euid, &suid));
6895 if (!is_error(ret)) {
6896 if (put_user_u16(high2lowuid(ruid), arg1)
6897 || put_user_u16(high2lowuid(euid), arg2)
6898 || put_user_u16(high2lowuid(suid), arg3))
6899 goto efault;
6902 break;
6903 #endif
6904 #ifdef TARGET_NR_getresgid
6905 case TARGET_NR_setresgid:
6906 ret = get_errno(setresgid(low2highgid(arg1),
6907 low2highgid(arg2),
6908 low2highgid(arg3)));
6909 break;
6910 #endif
6911 #ifdef TARGET_NR_getresgid
6912 case TARGET_NR_getresgid:
6914 gid_t rgid, egid, sgid;
6915 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6916 if (!is_error(ret)) {
6917 if (put_user_u16(high2lowgid(rgid), arg1)
6918 || put_user_u16(high2lowgid(egid), arg2)
6919 || put_user_u16(high2lowgid(sgid), arg3))
6920 goto efault;
6923 break;
6924 #endif
6925 case TARGET_NR_chown:
6926 if (!(p = lock_user_string(arg1)))
6927 goto efault;
6928 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6929 unlock_user(p, arg1, 0);
6930 break;
6931 case TARGET_NR_setuid:
6932 ret = get_errno(setuid(low2highuid(arg1)));
6933 break;
6934 case TARGET_NR_setgid:
6935 ret = get_errno(setgid(low2highgid(arg1)));
6936 break;
6937 case TARGET_NR_setfsuid:
6938 ret = get_errno(setfsuid(arg1));
6939 break;
6940 case TARGET_NR_setfsgid:
6941 ret = get_errno(setfsgid(arg1));
6942 break;
6944 #ifdef TARGET_NR_lchown32
6945 case TARGET_NR_lchown32:
6946 if (!(p = lock_user_string(arg1)))
6947 goto efault;
6948 ret = get_errno(lchown(p, arg2, arg3));
6949 unlock_user(p, arg1, 0);
6950 break;
6951 #endif
6952 #ifdef TARGET_NR_getuid32
6953 case TARGET_NR_getuid32:
6954 ret = get_errno(getuid());
6955 break;
6956 #endif
6958 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6959 /* Alpha specific */
6960 case TARGET_NR_getxuid:
6962 uid_t euid;
6963 euid=geteuid();
6964 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6966 ret = get_errno(getuid());
6967 break;
6968 #endif
6969 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6970 /* Alpha specific */
6971 case TARGET_NR_getxgid:
6973 uid_t egid;
6974 egid=getegid();
6975 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6977 ret = get_errno(getgid());
6978 break;
6979 #endif
6980 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6981 /* Alpha specific */
6982 case TARGET_NR_osf_getsysinfo:
6983 ret = -TARGET_EOPNOTSUPP;
6984 switch (arg1) {
6985 case TARGET_GSI_IEEE_FP_CONTROL:
6987 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
6989 /* Copied from linux ieee_fpcr_to_swcr. */
6990 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
6991 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
6992 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
6993 | SWCR_TRAP_ENABLE_DZE
6994 | SWCR_TRAP_ENABLE_OVF);
6995 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
6996 | SWCR_TRAP_ENABLE_INE);
6997 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
6998 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7000 if (put_user_u64 (swcr, arg2))
7001 goto efault;
7002 ret = 0;
7004 break;
7006 /* case GSI_IEEE_STATE_AT_SIGNAL:
7007 -- Not implemented in linux kernel.
7008 case GSI_UACPROC:
7009 -- Retrieves current unaligned access state; not much used.
7010 case GSI_PROC_TYPE:
7011 -- Retrieves implver information; surely not used.
7012 case GSI_GET_HWRPB:
7013 -- Grabs a copy of the HWRPB; surely not used.
7016 break;
7017 #endif
7018 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7019 /* Alpha specific */
7020 case TARGET_NR_osf_setsysinfo:
7021 ret = -TARGET_EOPNOTSUPP;
7022 switch (arg1) {
7023 case TARGET_SSI_IEEE_FP_CONTROL:
7024 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7026 uint64_t swcr, fpcr, orig_fpcr;
7028 if (get_user_u64 (swcr, arg2))
7029 goto efault;
7030 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7031 fpcr = orig_fpcr & FPCR_DYN_MASK;
7033 /* Copied from linux ieee_swcr_to_fpcr. */
7034 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7035 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7036 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7037 | SWCR_TRAP_ENABLE_DZE
7038 | SWCR_TRAP_ENABLE_OVF)) << 48;
7039 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7040 | SWCR_TRAP_ENABLE_INE)) << 57;
7041 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7042 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7044 cpu_alpha_store_fpcr (cpu_env, fpcr);
7045 ret = 0;
7047 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7048 /* Old exceptions are not signaled. */
7049 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7051 /* If any exceptions set by this call, and are unmasked,
7052 send a signal. */
7053 /* ??? FIXME */
7056 break;
7058 /* case SSI_NVPAIRS:
7059 -- Used with SSIN_UACPROC to enable unaligned accesses.
7060 case SSI_IEEE_STATE_AT_SIGNAL:
7061 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7062 -- Not implemented in linux kernel
7065 break;
7066 #endif
7067 #ifdef TARGET_NR_osf_sigprocmask
7068 /* Alpha specific. */
7069 case TARGET_NR_osf_sigprocmask:
7071 abi_ulong mask;
7072 int how = arg1;
7073 sigset_t set, oldset;
7075 switch(arg1) {
7076 case TARGET_SIG_BLOCK:
7077 how = SIG_BLOCK;
7078 break;
7079 case TARGET_SIG_UNBLOCK:
7080 how = SIG_UNBLOCK;
7081 break;
7082 case TARGET_SIG_SETMASK:
7083 how = SIG_SETMASK;
7084 break;
7085 default:
7086 ret = -TARGET_EINVAL;
7087 goto fail;
7089 mask = arg2;
7090 target_to_host_old_sigset(&set, &mask);
7091 sigprocmask(arg1, &set, &oldset);
7092 host_to_target_old_sigset(&mask, &oldset);
7093 ret = mask;
7095 break;
7096 #endif
7098 #ifdef TARGET_NR_getgid32
7099 case TARGET_NR_getgid32:
7100 ret = get_errno(getgid());
7101 break;
7102 #endif
7103 #ifdef TARGET_NR_geteuid32
7104 case TARGET_NR_geteuid32:
7105 ret = get_errno(geteuid());
7106 break;
7107 #endif
7108 #ifdef TARGET_NR_getegid32
7109 case TARGET_NR_getegid32:
7110 ret = get_errno(getegid());
7111 break;
7112 #endif
7113 #ifdef TARGET_NR_setreuid32
7114 case TARGET_NR_setreuid32:
7115 ret = get_errno(setreuid(arg1, arg2));
7116 break;
7117 #endif
7118 #ifdef TARGET_NR_setregid32
7119 case TARGET_NR_setregid32:
7120 ret = get_errno(setregid(arg1, arg2));
7121 break;
7122 #endif
7123 #ifdef TARGET_NR_getgroups32
7124 case TARGET_NR_getgroups32:
7126 int gidsetsize = arg1;
7127 uint32_t *target_grouplist;
7128 gid_t *grouplist;
7129 int i;
7131 grouplist = alloca(gidsetsize * sizeof(gid_t));
7132 ret = get_errno(getgroups(gidsetsize, grouplist));
7133 if (gidsetsize == 0)
7134 break;
7135 if (!is_error(ret)) {
7136 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7137 if (!target_grouplist) {
7138 ret = -TARGET_EFAULT;
7139 goto fail;
7141 for(i = 0;i < ret; i++)
7142 target_grouplist[i] = tswap32(grouplist[i]);
7143 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7146 break;
7147 #endif
7148 #ifdef TARGET_NR_setgroups32
7149 case TARGET_NR_setgroups32:
7151 int gidsetsize = arg1;
7152 uint32_t *target_grouplist;
7153 gid_t *grouplist;
7154 int i;
7156 grouplist = alloca(gidsetsize * sizeof(gid_t));
7157 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7158 if (!target_grouplist) {
7159 ret = -TARGET_EFAULT;
7160 goto fail;
7162 for(i = 0;i < gidsetsize; i++)
7163 grouplist[i] = tswap32(target_grouplist[i]);
7164 unlock_user(target_grouplist, arg2, 0);
7165 ret = get_errno(setgroups(gidsetsize, grouplist));
7167 break;
7168 #endif
7169 #ifdef TARGET_NR_fchown32
7170 case TARGET_NR_fchown32:
7171 ret = get_errno(fchown(arg1, arg2, arg3));
7172 break;
7173 #endif
7174 #ifdef TARGET_NR_setresuid32
7175 case TARGET_NR_setresuid32:
7176 ret = get_errno(setresuid(arg1, arg2, arg3));
7177 break;
7178 #endif
7179 #ifdef TARGET_NR_getresuid32
7180 case TARGET_NR_getresuid32:
7182 uid_t ruid, euid, suid;
7183 ret = get_errno(getresuid(&ruid, &euid, &suid));
7184 if (!is_error(ret)) {
7185 if (put_user_u32(ruid, arg1)
7186 || put_user_u32(euid, arg2)
7187 || put_user_u32(suid, arg3))
7188 goto efault;
7191 break;
7192 #endif
7193 #ifdef TARGET_NR_setresgid32
7194 case TARGET_NR_setresgid32:
7195 ret = get_errno(setresgid(arg1, arg2, arg3));
7196 break;
7197 #endif
7198 #ifdef TARGET_NR_getresgid32
7199 case TARGET_NR_getresgid32:
7201 gid_t rgid, egid, sgid;
7202 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7203 if (!is_error(ret)) {
7204 if (put_user_u32(rgid, arg1)
7205 || put_user_u32(egid, arg2)
7206 || put_user_u32(sgid, arg3))
7207 goto efault;
7210 break;
7211 #endif
7212 #ifdef TARGET_NR_chown32
7213 case TARGET_NR_chown32:
7214 if (!(p = lock_user_string(arg1)))
7215 goto efault;
7216 ret = get_errno(chown(p, arg2, arg3));
7217 unlock_user(p, arg1, 0);
7218 break;
7219 #endif
7220 #ifdef TARGET_NR_setuid32
7221 case TARGET_NR_setuid32:
7222 ret = get_errno(setuid(arg1));
7223 break;
7224 #endif
7225 #ifdef TARGET_NR_setgid32
7226 case TARGET_NR_setgid32:
7227 ret = get_errno(setgid(arg1));
7228 break;
7229 #endif
7230 #ifdef TARGET_NR_setfsuid32
7231 case TARGET_NR_setfsuid32:
7232 ret = get_errno(setfsuid(arg1));
7233 break;
7234 #endif
7235 #ifdef TARGET_NR_setfsgid32
7236 case TARGET_NR_setfsgid32:
7237 ret = get_errno(setfsgid(arg1));
7238 break;
7239 #endif
7241 case TARGET_NR_pivot_root:
7242 goto unimplemented;
7243 #ifdef TARGET_NR_mincore
7244 case TARGET_NR_mincore:
7246 void *a;
7247 ret = -TARGET_EFAULT;
7248 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7249 goto efault;
7250 if (!(p = lock_user_string(arg3)))
7251 goto mincore_fail;
7252 ret = get_errno(mincore(a, arg2, p));
7253 unlock_user(p, arg3, ret);
7254 mincore_fail:
7255 unlock_user(a, arg1, 0);
7257 break;
7258 #endif
7259 #ifdef TARGET_NR_arm_fadvise64_64
7260 case TARGET_NR_arm_fadvise64_64:
7263 * arm_fadvise64_64 looks like fadvise64_64 but
7264 * with different argument order
7266 abi_long temp;
7267 temp = arg3;
7268 arg3 = arg4;
7269 arg4 = temp;
7271 #endif
7272 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7273 #ifdef TARGET_NR_fadvise64_64
7274 case TARGET_NR_fadvise64_64:
7275 #endif
7276 #ifdef TARGET_NR_fadvise64
7277 case TARGET_NR_fadvise64:
7278 #endif
7279 #ifdef TARGET_S390X
7280 switch (arg4) {
7281 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7282 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7283 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7284 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7285 default: break;
7287 #endif
7288 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7289 break;
7290 #endif
7291 #ifdef TARGET_NR_madvise
7292 case TARGET_NR_madvise:
7293 /* A straight passthrough may not be safe because qemu sometimes
7294 turns private flie-backed mappings into anonymous mappings.
7295 This will break MADV_DONTNEED.
7296 This is a hint, so ignoring and returning success is ok. */
7297 ret = get_errno(0);
7298 break;
7299 #endif
7300 #if TARGET_ABI_BITS == 32
7301 case TARGET_NR_fcntl64:
7303 int cmd;
7304 struct flock64 fl;
7305 struct target_flock64 *target_fl;
7306 #ifdef TARGET_ARM
7307 struct target_eabi_flock64 *target_efl;
7308 #endif
7310 cmd = target_to_host_fcntl_cmd(arg2);
7311 if (cmd == -TARGET_EINVAL)
7312 return cmd;
7314 switch(arg2) {
7315 case TARGET_F_GETLK64:
7316 #ifdef TARGET_ARM
7317 if (((CPUARMState *)cpu_env)->eabi) {
7318 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7319 goto efault;
7320 fl.l_type = tswap16(target_efl->l_type);
7321 fl.l_whence = tswap16(target_efl->l_whence);
7322 fl.l_start = tswap64(target_efl->l_start);
7323 fl.l_len = tswap64(target_efl->l_len);
7324 fl.l_pid = tswap32(target_efl->l_pid);
7325 unlock_user_struct(target_efl, arg3, 0);
7326 } else
7327 #endif
7329 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7330 goto efault;
7331 fl.l_type = tswap16(target_fl->l_type);
7332 fl.l_whence = tswap16(target_fl->l_whence);
7333 fl.l_start = tswap64(target_fl->l_start);
7334 fl.l_len = tswap64(target_fl->l_len);
7335 fl.l_pid = tswap32(target_fl->l_pid);
7336 unlock_user_struct(target_fl, arg3, 0);
7338 ret = get_errno(fcntl(arg1, cmd, &fl));
7339 if (ret == 0) {
7340 #ifdef TARGET_ARM
7341 if (((CPUARMState *)cpu_env)->eabi) {
7342 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7343 goto efault;
7344 target_efl->l_type = tswap16(fl.l_type);
7345 target_efl->l_whence = tswap16(fl.l_whence);
7346 target_efl->l_start = tswap64(fl.l_start);
7347 target_efl->l_len = tswap64(fl.l_len);
7348 target_efl->l_pid = tswap32(fl.l_pid);
7349 unlock_user_struct(target_efl, arg3, 1);
7350 } else
7351 #endif
7353 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7354 goto efault;
7355 target_fl->l_type = tswap16(fl.l_type);
7356 target_fl->l_whence = tswap16(fl.l_whence);
7357 target_fl->l_start = tswap64(fl.l_start);
7358 target_fl->l_len = tswap64(fl.l_len);
7359 target_fl->l_pid = tswap32(fl.l_pid);
7360 unlock_user_struct(target_fl, arg3, 1);
7363 break;
7365 case TARGET_F_SETLK64:
7366 case TARGET_F_SETLKW64:
7367 #ifdef TARGET_ARM
7368 if (((CPUARMState *)cpu_env)->eabi) {
7369 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7370 goto efault;
7371 fl.l_type = tswap16(target_efl->l_type);
7372 fl.l_whence = tswap16(target_efl->l_whence);
7373 fl.l_start = tswap64(target_efl->l_start);
7374 fl.l_len = tswap64(target_efl->l_len);
7375 fl.l_pid = tswap32(target_efl->l_pid);
7376 unlock_user_struct(target_efl, arg3, 0);
7377 } else
7378 #endif
7380 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7381 goto efault;
7382 fl.l_type = tswap16(target_fl->l_type);
7383 fl.l_whence = tswap16(target_fl->l_whence);
7384 fl.l_start = tswap64(target_fl->l_start);
7385 fl.l_len = tswap64(target_fl->l_len);
7386 fl.l_pid = tswap32(target_fl->l_pid);
7387 unlock_user_struct(target_fl, arg3, 0);
7389 ret = get_errno(fcntl(arg1, cmd, &fl));
7390 break;
7391 default:
7392 ret = do_fcntl(arg1, arg2, arg3);
7393 break;
7395 break;
7397 #endif
7398 #ifdef TARGET_NR_cacheflush
7399 case TARGET_NR_cacheflush:
7400 /* self-modifying code is handled automatically, so nothing needed */
7401 ret = 0;
7402 break;
7403 #endif
7404 #ifdef TARGET_NR_security
7405 case TARGET_NR_security:
7406 goto unimplemented;
7407 #endif
7408 #ifdef TARGET_NR_getpagesize
7409 case TARGET_NR_getpagesize:
7410 ret = TARGET_PAGE_SIZE;
7411 break;
7412 #endif
7413 case TARGET_NR_gettid:
7414 ret = get_errno(gettid());
7415 break;
7416 #ifdef TARGET_NR_readahead
7417 case TARGET_NR_readahead:
7418 #if TARGET_ABI_BITS == 32
7419 #ifdef TARGET_ARM
7420 if (((CPUARMState *)cpu_env)->eabi)
7422 arg2 = arg3;
7423 arg3 = arg4;
7424 arg4 = arg5;
7426 #endif
7427 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7428 #else
7429 ret = get_errno(readahead(arg1, arg2, arg3));
7430 #endif
7431 break;
7432 #endif
7433 #ifdef TARGET_NR_setxattr
7434 case TARGET_NR_setxattr:
7435 case TARGET_NR_lsetxattr:
7436 case TARGET_NR_fsetxattr:
7437 case TARGET_NR_getxattr:
7438 case TARGET_NR_lgetxattr:
7439 case TARGET_NR_fgetxattr:
7440 case TARGET_NR_listxattr:
7441 case TARGET_NR_llistxattr:
7442 case TARGET_NR_flistxattr:
7443 case TARGET_NR_removexattr:
7444 case TARGET_NR_lremovexattr:
7445 case TARGET_NR_fremovexattr:
7446 ret = -TARGET_EOPNOTSUPP;
7447 break;
7448 #endif
7449 #ifdef TARGET_NR_set_thread_area
7450 case TARGET_NR_set_thread_area:
7451 #if defined(TARGET_MIPS)
7452 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7453 ret = 0;
7454 break;
7455 #elif defined(TARGET_CRIS)
7456 if (arg1 & 0xff)
7457 ret = -TARGET_EINVAL;
7458 else {
7459 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7460 ret = 0;
7462 break;
7463 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7464 ret = do_set_thread_area(cpu_env, arg1);
7465 break;
7466 #else
7467 goto unimplemented_nowarn;
7468 #endif
7469 #endif
7470 #ifdef TARGET_NR_get_thread_area
7471 case TARGET_NR_get_thread_area:
7472 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7473 ret = do_get_thread_area(cpu_env, arg1);
7474 #else
7475 goto unimplemented_nowarn;
7476 #endif
7477 #endif
7478 #ifdef TARGET_NR_getdomainname
7479 case TARGET_NR_getdomainname:
7480 goto unimplemented_nowarn;
7481 #endif
7483 #ifdef TARGET_NR_clock_gettime
7484 case TARGET_NR_clock_gettime:
7486 struct timespec ts;
7487 ret = get_errno(clock_gettime(arg1, &ts));
7488 if (!is_error(ret)) {
7489 host_to_target_timespec(arg2, &ts);
7491 break;
7493 #endif
7494 #ifdef TARGET_NR_clock_getres
7495 case TARGET_NR_clock_getres:
7497 struct timespec ts;
7498 ret = get_errno(clock_getres(arg1, &ts));
7499 if (!is_error(ret)) {
7500 host_to_target_timespec(arg2, &ts);
7502 break;
7504 #endif
7505 #ifdef TARGET_NR_clock_nanosleep
7506 case TARGET_NR_clock_nanosleep:
7508 struct timespec ts;
7509 target_to_host_timespec(&ts, arg3);
7510 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7511 if (arg4)
7512 host_to_target_timespec(arg4, &ts);
7513 break;
7515 #endif
7517 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7518 case TARGET_NR_set_tid_address:
7519 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7520 break;
7521 #endif
7523 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7524 case TARGET_NR_tkill:
7525 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7526 break;
7527 #endif
7529 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7530 case TARGET_NR_tgkill:
7531 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7532 target_to_host_signal(arg3)));
7533 break;
7534 #endif
7536 #ifdef TARGET_NR_set_robust_list
7537 case TARGET_NR_set_robust_list:
7538 goto unimplemented_nowarn;
7539 #endif
7541 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7542 case TARGET_NR_utimensat:
7544 struct timespec *tsp, ts[2];
7545 if (!arg3) {
7546 tsp = NULL;
7547 } else {
7548 target_to_host_timespec(ts, arg3);
7549 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7550 tsp = ts;
7552 if (!arg2)
7553 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7554 else {
7555 if (!(p = lock_user_string(arg2))) {
7556 ret = -TARGET_EFAULT;
7557 goto fail;
7559 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7560 unlock_user(p, arg2, 0);
7563 break;
7564 #endif
7565 #if defined(CONFIG_USE_NPTL)
7566 case TARGET_NR_futex:
7567 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7568 break;
7569 #endif
7570 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7571 case TARGET_NR_inotify_init:
7572 ret = get_errno(sys_inotify_init());
7573 break;
7574 #endif
7575 #ifdef CONFIG_INOTIFY1
7576 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7577 case TARGET_NR_inotify_init1:
7578 ret = get_errno(sys_inotify_init1(arg1));
7579 break;
7580 #endif
7581 #endif
7582 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7583 case TARGET_NR_inotify_add_watch:
7584 p = lock_user_string(arg2);
7585 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7586 unlock_user(p, arg2, 0);
7587 break;
7588 #endif
7589 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7590 case TARGET_NR_inotify_rm_watch:
7591 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7592 break;
7593 #endif
7595 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7596 case TARGET_NR_mq_open:
7598 struct mq_attr posix_mq_attr;
7600 p = lock_user_string(arg1 - 1);
7601 if (arg4 != 0)
7602 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7603 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7604 unlock_user (p, arg1, 0);
7606 break;
7608 case TARGET_NR_mq_unlink:
7609 p = lock_user_string(arg1 - 1);
7610 ret = get_errno(mq_unlink(p));
7611 unlock_user (p, arg1, 0);
7612 break;
7614 case TARGET_NR_mq_timedsend:
7616 struct timespec ts;
7618 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7619 if (arg5 != 0) {
7620 target_to_host_timespec(&ts, arg5);
7621 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7622 host_to_target_timespec(arg5, &ts);
7624 else
7625 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7626 unlock_user (p, arg2, arg3);
7628 break;
7630 case TARGET_NR_mq_timedreceive:
7632 struct timespec ts;
7633 unsigned int prio;
7635 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7636 if (arg5 != 0) {
7637 target_to_host_timespec(&ts, arg5);
7638 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7639 host_to_target_timespec(arg5, &ts);
7641 else
7642 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7643 unlock_user (p, arg2, arg3);
7644 if (arg4 != 0)
7645 put_user_u32(prio, arg4);
7647 break;
7649 /* Not implemented for now... */
7650 /* case TARGET_NR_mq_notify: */
7651 /* break; */
7653 case TARGET_NR_mq_getsetattr:
7655 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7656 ret = 0;
7657 if (arg3 != 0) {
7658 ret = mq_getattr(arg1, &posix_mq_attr_out);
7659 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7661 if (arg2 != 0) {
7662 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7663 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7667 break;
7668 #endif
7670 #ifdef CONFIG_SPLICE
7671 #ifdef TARGET_NR_tee
7672 case TARGET_NR_tee:
7674 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7676 break;
7677 #endif
7678 #ifdef TARGET_NR_splice
7679 case TARGET_NR_splice:
7681 loff_t loff_in, loff_out;
7682 loff_t *ploff_in = NULL, *ploff_out = NULL;
7683 if(arg2) {
7684 get_user_u64(loff_in, arg2);
7685 ploff_in = &loff_in;
7687 if(arg4) {
7688 get_user_u64(loff_out, arg2);
7689 ploff_out = &loff_out;
7691 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7693 break;
7694 #endif
7695 #ifdef TARGET_NR_vmsplice
7696 case TARGET_NR_vmsplice:
7698 int count = arg3;
7699 struct iovec *vec;
7701 vec = alloca(count * sizeof(struct iovec));
7702 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7703 goto efault;
7704 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7705 unlock_iovec(vec, arg2, count, 0);
7707 break;
7708 #endif
7709 #endif /* CONFIG_SPLICE */
7710 #ifdef CONFIG_EVENTFD
7711 #if defined(TARGET_NR_eventfd)
7712 case TARGET_NR_eventfd:
7713 ret = get_errno(eventfd(arg1, 0));
7714 break;
7715 #endif
7716 #if defined(TARGET_NR_eventfd2)
7717 case TARGET_NR_eventfd2:
7718 ret = get_errno(eventfd(arg1, arg2));
7719 break;
7720 #endif
7721 #endif /* CONFIG_EVENTFD */
7722 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7723 case TARGET_NR_fallocate:
7724 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7725 break;
7726 #endif
7727 #if defined(CONFIG_SYNC_FILE_RANGE)
7728 #if defined(TARGET_NR_sync_file_range)
7729 case TARGET_NR_sync_file_range:
7730 #if TARGET_ABI_BITS == 32
7731 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7732 target_offset64(arg4, arg5), arg6));
7733 #else
7734 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7735 #endif
7736 break;
7737 #endif
7738 #if defined(TARGET_NR_sync_file_range2)
7739 case TARGET_NR_sync_file_range2:
7740 /* This is like sync_file_range but the arguments are reordered */
7741 #if TARGET_ABI_BITS == 32
7742 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7743 target_offset64(arg5, arg6), arg2));
7744 #else
7745 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7746 #endif
7747 break;
7748 #endif
7749 #endif
7750 #if defined(CONFIG_EPOLL)
7751 #if defined(TARGET_NR_epoll_create)
7752 case TARGET_NR_epoll_create:
7753 ret = get_errno(epoll_create(arg1));
7754 break;
7755 #endif
7756 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7757 case TARGET_NR_epoll_create1:
7758 ret = get_errno(epoll_create1(arg1));
7759 break;
7760 #endif
7761 #if defined(TARGET_NR_epoll_ctl)
7762 case TARGET_NR_epoll_ctl:
7764 struct epoll_event ep;
7765 struct epoll_event *epp = 0;
7766 if (arg4) {
7767 struct target_epoll_event *target_ep;
7768 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7769 goto efault;
7771 ep.events = tswap32(target_ep->events);
7772 /* The epoll_data_t union is just opaque data to the kernel,
7773 * so we transfer all 64 bits across and need not worry what
7774 * actual data type it is.
7776 ep.data.u64 = tswap64(target_ep->data.u64);
7777 unlock_user_struct(target_ep, arg4, 0);
7778 epp = &ep;
7780 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7781 break;
7783 #endif
7785 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7786 #define IMPLEMENT_EPOLL_PWAIT
7787 #endif
7788 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7789 #if defined(TARGET_NR_epoll_wait)
7790 case TARGET_NR_epoll_wait:
7791 #endif
7792 #if defined(IMPLEMENT_EPOLL_PWAIT)
7793 case TARGET_NR_epoll_pwait:
7794 #endif
7796 struct target_epoll_event *target_ep;
7797 struct epoll_event *ep;
7798 int epfd = arg1;
7799 int maxevents = arg3;
7800 int timeout = arg4;
7802 target_ep = lock_user(VERIFY_WRITE, arg2,
7803 maxevents * sizeof(struct target_epoll_event), 1);
7804 if (!target_ep) {
7805 goto efault;
7808 ep = alloca(maxevents * sizeof(struct epoll_event));
7810 switch (num) {
7811 #if defined(IMPLEMENT_EPOLL_PWAIT)
7812 case TARGET_NR_epoll_pwait:
7814 target_sigset_t *target_set;
7815 sigset_t _set, *set = &_set;
7817 if (arg5) {
7818 target_set = lock_user(VERIFY_READ, arg5,
7819 sizeof(target_sigset_t), 1);
7820 if (!target_set) {
7821 unlock_user(target_ep, arg2, 0);
7822 goto efault;
7824 target_to_host_sigset(set, target_set);
7825 unlock_user(target_set, arg5, 0);
7826 } else {
7827 set = NULL;
7830 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
7831 break;
7833 #endif
7834 #if defined(TARGET_NR_epoll_wait)
7835 case TARGET_NR_epoll_wait:
7836 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
7837 break;
7838 #endif
7839 default:
7840 ret = -TARGET_ENOSYS;
7842 if (!is_error(ret)) {
7843 int i;
7844 for (i = 0; i < ret; i++) {
7845 target_ep[i].events = tswap32(ep[i].events);
7846 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
7849 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
7850 break;
7852 #endif
7853 #endif
7854 default:
7855 unimplemented:
7856 gemu_log("qemu: Unsupported syscall: %d\n", num);
7857 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7858 unimplemented_nowarn:
7859 #endif
7860 ret = -TARGET_ENOSYS;
7861 break;
7863 fail:
7864 #ifdef DEBUG
7865 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
7866 #endif
7867 if(do_strace)
7868 print_syscall_ret(num, ret);
7869 return ret;
7870 efault:
7871 ret = -TARGET_EFAULT;
7872 goto fail;