Merge remote-tracking branch 'kraxel/usb.16' into staging
[qemu.git] / linux-user / syscall.c
blob5cb27c7f9f771ab3b689f1a30fc5522ba06a7769
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #include <linux/vt.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
98 #include "qemu.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
104 #else
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
107 #endif
109 //#define DEBUG
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
116 #undef _syscall0
117 #undef _syscall1
118 #undef _syscall2
119 #undef _syscall3
120 #undef _syscall4
121 #undef _syscall5
122 #undef _syscall6
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
155 type5,arg5) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
165 type6 arg6) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 defined(__s390x__)
202 #define __NR__llseek __NR_lseek
203 #endif
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
213 #endif
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 _syscall2(int, sys_getpriority, int, which, int, who);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #endif
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #endif
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #endif
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
250 static bitmask_transtbl fcntl_flags_tbl[] = {
251 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
252 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
253 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
254 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
255 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
256 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
257 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
258 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
259 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
260 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
261 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
262 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
263 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
266 #endif
267 { 0, 0, 0, 0 }
270 #define COPY_UTSNAME_FIELD(dest, src) \
271 do { \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
275 } while (0)
277 static int sys_uname(struct new_utsname *buf)
279 struct utsname uts_buf;
281 if (uname(&uts_buf) < 0)
282 return (-1);
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 memset(buf, 0, sizeof(*buf));
291 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
292 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
293 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
294 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
295 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
296 #ifdef _GNU_SOURCE
297 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
298 #endif
299 return (0);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf, size_t size)
306 if (getcwd(buf, size) == NULL) {
307 /* getcwd() sets errno */
308 return (-1);
310 return strlen(buf)+1;
313 #ifdef CONFIG_ATFILE
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd, const char *pathname, int mode)
322 return (faccessat(dirfd, pathname, mode, 0));
324 #endif
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
328 return (fchmodat(dirfd, pathname, mode, 0));
330 #endif
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
333 gid_t group, int flags)
335 return (fchownat(dirfd, pathname, owner, group, flags));
337 #endif
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
340 int flags)
342 return (fstatat(dirfd, pathname, buf, flags));
344 #endif
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
347 int flags)
349 return (fstatat(dirfd, pathname, buf, flags));
351 #endif
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd, const char *pathname,
354 const struct timeval times[2])
356 return (futimesat(dirfd, pathname, times));
358 #endif
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd, const char *oldpath,
361 int newdirfd, const char *newpath, int flags)
363 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
365 #endif
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
369 return (mkdirat(dirfd, pathname, mode));
371 #endif
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
374 dev_t dev)
376 return (mknodat(dirfd, pathname, mode, dev));
378 #endif
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
383 * open(2) has extra parameter 'mode' when called with
384 * flag O_CREAT.
386 if ((flags & O_CREAT) != 0) {
387 va_list ap;
388 mode_t mode;
391 * Get the 'mode' parameter and translate it to
392 * host bits.
394 va_start(ap, flags);
395 mode = va_arg(ap, mode_t);
396 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
397 va_end(ap);
399 return (openat(dirfd, pathname, flags, mode));
401 return (openat(dirfd, pathname, flags));
403 #endif
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
407 return (readlinkat(dirfd, pathname, buf, bufsiz));
409 #endif
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd, const char *oldpath,
412 int newdirfd, const char *newpath)
414 return (renameat(olddirfd, oldpath, newdirfd, newpath));
416 #endif
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
420 return (symlinkat(oldpath, newdirfd, newpath));
422 #endif
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
426 return (unlinkat(dirfd, pathname, flags));
428 #endif
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
436 #endif
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
439 #endif
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
442 uid_t,owner,gid_t,group,int,flags)
443 #endif
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
447 struct stat *,buf,int,flags)
448 #endif
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
451 const struct timeval *,times)
452 #endif
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
456 struct stat *,buf,int,flags)
457 #endif
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
460 int,newdirfd,const char *,newpath,int,flags)
461 #endif
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
464 #endif
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
467 mode_t,mode,dev_t,dev)
468 #endif
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
471 #endif
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
474 char *,buf,size_t,bufsize)
475 #endif
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
478 int,newdirfd,const char *,newpath)
479 #endif
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat,const char *,oldpath,
482 int,newdirfd,const char *,newpath)
483 #endif
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
486 #endif
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd, const char *pathname,
492 const struct timespec times[2], int flags)
494 if (pathname == NULL)
495 return futimens(dirfd, times);
496 else
497 return utimensat(dirfd, pathname, times, flags);
499 #else
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
502 const struct timespec *,tsp,int,flags)
503 #endif
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
514 #endif
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
518 return (inotify_add_watch(fd, pathname, mask));
520 #endif
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd, int32_t wd)
524 return (inotify_rm_watch(fd, wd));
526 #endif
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags)
531 return (inotify_init1(flags));
533 #endif
534 #endif
535 #else
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
544 #ifndef __NR_ppoll
545 # define __NR_ppoll -1
546 #endif
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
549 struct timespec *, timeout, const __sigset_t *, sigmask,
550 size_t, sigsetsize)
551 #endif
553 extern int personality(int);
554 extern int flock(int, int);
555 extern int setfsuid(int);
556 extern int setfsgid(int);
557 extern int setgroups(int, gid_t *);
559 #define ERRNO_TABLE_SIZE 1200
561 /* target_to_host_errno_table[] is initialized from
562 * host_to_target_errno_table[] in syscall_init(). */
563 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
567 * This list is the union of errno values overridden in asm-<arch>/errno.h
568 * minus the errnos that are not actually generic to all archs.
570 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
571 [EIDRM] = TARGET_EIDRM,
572 [ECHRNG] = TARGET_ECHRNG,
573 [EL2NSYNC] = TARGET_EL2NSYNC,
574 [EL3HLT] = TARGET_EL3HLT,
575 [EL3RST] = TARGET_EL3RST,
576 [ELNRNG] = TARGET_ELNRNG,
577 [EUNATCH] = TARGET_EUNATCH,
578 [ENOCSI] = TARGET_ENOCSI,
579 [EL2HLT] = TARGET_EL2HLT,
580 [EDEADLK] = TARGET_EDEADLK,
581 [ENOLCK] = TARGET_ENOLCK,
582 [EBADE] = TARGET_EBADE,
583 [EBADR] = TARGET_EBADR,
584 [EXFULL] = TARGET_EXFULL,
585 [ENOANO] = TARGET_ENOANO,
586 [EBADRQC] = TARGET_EBADRQC,
587 [EBADSLT] = TARGET_EBADSLT,
588 [EBFONT] = TARGET_EBFONT,
589 [ENOSTR] = TARGET_ENOSTR,
590 [ENODATA] = TARGET_ENODATA,
591 [ETIME] = TARGET_ETIME,
592 [ENOSR] = TARGET_ENOSR,
593 [ENONET] = TARGET_ENONET,
594 [ENOPKG] = TARGET_ENOPKG,
595 [EREMOTE] = TARGET_EREMOTE,
596 [ENOLINK] = TARGET_ENOLINK,
597 [EADV] = TARGET_EADV,
598 [ESRMNT] = TARGET_ESRMNT,
599 [ECOMM] = TARGET_ECOMM,
600 [EPROTO] = TARGET_EPROTO,
601 [EDOTDOT] = TARGET_EDOTDOT,
602 [EMULTIHOP] = TARGET_EMULTIHOP,
603 [EBADMSG] = TARGET_EBADMSG,
604 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
605 [EOVERFLOW] = TARGET_EOVERFLOW,
606 [ENOTUNIQ] = TARGET_ENOTUNIQ,
607 [EBADFD] = TARGET_EBADFD,
608 [EREMCHG] = TARGET_EREMCHG,
609 [ELIBACC] = TARGET_ELIBACC,
610 [ELIBBAD] = TARGET_ELIBBAD,
611 [ELIBSCN] = TARGET_ELIBSCN,
612 [ELIBMAX] = TARGET_ELIBMAX,
613 [ELIBEXEC] = TARGET_ELIBEXEC,
614 [EILSEQ] = TARGET_EILSEQ,
615 [ENOSYS] = TARGET_ENOSYS,
616 [ELOOP] = TARGET_ELOOP,
617 [ERESTART] = TARGET_ERESTART,
618 [ESTRPIPE] = TARGET_ESTRPIPE,
619 [ENOTEMPTY] = TARGET_ENOTEMPTY,
620 [EUSERS] = TARGET_EUSERS,
621 [ENOTSOCK] = TARGET_ENOTSOCK,
622 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
623 [EMSGSIZE] = TARGET_EMSGSIZE,
624 [EPROTOTYPE] = TARGET_EPROTOTYPE,
625 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
626 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
627 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
628 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
629 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
630 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
631 [EADDRINUSE] = TARGET_EADDRINUSE,
632 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
633 [ENETDOWN] = TARGET_ENETDOWN,
634 [ENETUNREACH] = TARGET_ENETUNREACH,
635 [ENETRESET] = TARGET_ENETRESET,
636 [ECONNABORTED] = TARGET_ECONNABORTED,
637 [ECONNRESET] = TARGET_ECONNRESET,
638 [ENOBUFS] = TARGET_ENOBUFS,
639 [EISCONN] = TARGET_EISCONN,
640 [ENOTCONN] = TARGET_ENOTCONN,
641 [EUCLEAN] = TARGET_EUCLEAN,
642 [ENOTNAM] = TARGET_ENOTNAM,
643 [ENAVAIL] = TARGET_ENAVAIL,
644 [EISNAM] = TARGET_EISNAM,
645 [EREMOTEIO] = TARGET_EREMOTEIO,
646 [ESHUTDOWN] = TARGET_ESHUTDOWN,
647 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
648 [ETIMEDOUT] = TARGET_ETIMEDOUT,
649 [ECONNREFUSED] = TARGET_ECONNREFUSED,
650 [EHOSTDOWN] = TARGET_EHOSTDOWN,
651 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
652 [EALREADY] = TARGET_EALREADY,
653 [EINPROGRESS] = TARGET_EINPROGRESS,
654 [ESTALE] = TARGET_ESTALE,
655 [ECANCELED] = TARGET_ECANCELED,
656 [ENOMEDIUM] = TARGET_ENOMEDIUM,
657 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
658 #ifdef ENOKEY
659 [ENOKEY] = TARGET_ENOKEY,
660 #endif
661 #ifdef EKEYEXPIRED
662 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
663 #endif
664 #ifdef EKEYREVOKED
665 [EKEYREVOKED] = TARGET_EKEYREVOKED,
666 #endif
667 #ifdef EKEYREJECTED
668 [EKEYREJECTED] = TARGET_EKEYREJECTED,
669 #endif
670 #ifdef EOWNERDEAD
671 [EOWNERDEAD] = TARGET_EOWNERDEAD,
672 #endif
673 #ifdef ENOTRECOVERABLE
674 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
675 #endif
678 static inline int host_to_target_errno(int err)
680 if(host_to_target_errno_table[err])
681 return host_to_target_errno_table[err];
682 return err;
685 static inline int target_to_host_errno(int err)
687 if (target_to_host_errno_table[err])
688 return target_to_host_errno_table[err];
689 return err;
692 static inline abi_long get_errno(abi_long ret)
694 if (ret == -1)
695 return -host_to_target_errno(errno);
696 else
697 return ret;
700 static inline int is_error(abi_long ret)
702 return (abi_ulong)ret >= (abi_ulong)(-4096);
705 char *target_strerror(int err)
707 return strerror(target_to_host_errno(err));
710 static abi_ulong target_brk;
711 static abi_ulong target_original_brk;
713 void target_set_brk(abi_ulong new_brk)
715 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
718 /* do_brk() must return target values and target errnos. */
719 abi_long do_brk(abi_ulong new_brk)
721 abi_ulong brk_page;
722 abi_long mapped_addr;
723 int new_alloc_size;
725 if (!new_brk)
726 return target_brk;
727 if (new_brk < target_original_brk)
728 return target_brk;
730 brk_page = HOST_PAGE_ALIGN(target_brk);
732 /* If the new brk is less than this, set it and we're done... */
733 if (new_brk < brk_page) {
734 target_brk = new_brk;
735 return target_brk;
738 /* We need to allocate more memory after the brk... */
739 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
740 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
741 PROT_READ|PROT_WRITE,
742 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
744 #if defined(TARGET_ALPHA)
745 /* We (partially) emulate OSF/1 on Alpha, which requires we
746 return a proper errno, not an unchanged brk value. */
747 if (is_error(mapped_addr)) {
748 return -TARGET_ENOMEM;
750 #endif
752 if (!is_error(mapped_addr)) {
753 target_brk = new_brk;
755 return target_brk;
758 static inline abi_long copy_from_user_fdset(fd_set *fds,
759 abi_ulong target_fds_addr,
760 int n)
762 int i, nw, j, k;
763 abi_ulong b, *target_fds;
765 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
766 if (!(target_fds = lock_user(VERIFY_READ,
767 target_fds_addr,
768 sizeof(abi_ulong) * nw,
769 1)))
770 return -TARGET_EFAULT;
772 FD_ZERO(fds);
773 k = 0;
774 for (i = 0; i < nw; i++) {
775 /* grab the abi_ulong */
776 __get_user(b, &target_fds[i]);
777 for (j = 0; j < TARGET_ABI_BITS; j++) {
778 /* check the bit inside the abi_ulong */
779 if ((b >> j) & 1)
780 FD_SET(k, fds);
781 k++;
785 unlock_user(target_fds, target_fds_addr, 0);
787 return 0;
790 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
791 const fd_set *fds,
792 int n)
794 int i, nw, j, k;
795 abi_long v;
796 abi_ulong *target_fds;
798 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
799 if (!(target_fds = lock_user(VERIFY_WRITE,
800 target_fds_addr,
801 sizeof(abi_ulong) * nw,
802 0)))
803 return -TARGET_EFAULT;
805 k = 0;
806 for (i = 0; i < nw; i++) {
807 v = 0;
808 for (j = 0; j < TARGET_ABI_BITS; j++) {
809 v |= ((FD_ISSET(k, fds) != 0) << j);
810 k++;
812 __put_user(v, &target_fds[i]);
815 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
817 return 0;
820 #if defined(__alpha__)
821 #define HOST_HZ 1024
822 #else
823 #define HOST_HZ 100
824 #endif
826 static inline abi_long host_to_target_clock_t(long ticks)
828 #if HOST_HZ == TARGET_HZ
829 return ticks;
830 #else
831 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
832 #endif
835 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
836 const struct rusage *rusage)
838 struct target_rusage *target_rusage;
840 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
841 return -TARGET_EFAULT;
842 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
843 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
844 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
845 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
846 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
847 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
848 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
849 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
850 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
851 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
852 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
853 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
854 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
855 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
856 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
857 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
858 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
859 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
860 unlock_user_struct(target_rusage, target_addr, 1);
862 return 0;
865 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
867 if (target_rlim == TARGET_RLIM_INFINITY)
868 return RLIM_INFINITY;
869 else
870 return tswapl(target_rlim);
873 static inline target_ulong host_to_target_rlim(rlim_t rlim)
875 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
876 return TARGET_RLIM_INFINITY;
877 else
878 return tswapl(rlim);
881 static inline abi_long copy_from_user_timeval(struct timeval *tv,
882 abi_ulong target_tv_addr)
884 struct target_timeval *target_tv;
886 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
887 return -TARGET_EFAULT;
889 __get_user(tv->tv_sec, &target_tv->tv_sec);
890 __get_user(tv->tv_usec, &target_tv->tv_usec);
892 unlock_user_struct(target_tv, target_tv_addr, 0);
894 return 0;
897 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
898 const struct timeval *tv)
900 struct target_timeval *target_tv;
902 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
903 return -TARGET_EFAULT;
905 __put_user(tv->tv_sec, &target_tv->tv_sec);
906 __put_user(tv->tv_usec, &target_tv->tv_usec);
908 unlock_user_struct(target_tv, target_tv_addr, 1);
910 return 0;
913 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
914 #include <mqueue.h>
916 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
917 abi_ulong target_mq_attr_addr)
919 struct target_mq_attr *target_mq_attr;
921 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
922 target_mq_attr_addr, 1))
923 return -TARGET_EFAULT;
925 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
926 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
927 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
928 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
930 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
932 return 0;
935 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
936 const struct mq_attr *attr)
938 struct target_mq_attr *target_mq_attr;
940 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
941 target_mq_attr_addr, 0))
942 return -TARGET_EFAULT;
944 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
945 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
946 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
947 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
949 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
951 return 0;
953 #endif
955 /* do_select() must return target values and target errnos. */
956 static abi_long do_select(int n,
957 abi_ulong rfd_addr, abi_ulong wfd_addr,
958 abi_ulong efd_addr, abi_ulong target_tv_addr)
960 fd_set rfds, wfds, efds;
961 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
962 struct timeval tv, *tv_ptr;
963 abi_long ret;
965 if (rfd_addr) {
966 if (copy_from_user_fdset(&rfds, rfd_addr, n))
967 return -TARGET_EFAULT;
968 rfds_ptr = &rfds;
969 } else {
970 rfds_ptr = NULL;
972 if (wfd_addr) {
973 if (copy_from_user_fdset(&wfds, wfd_addr, n))
974 return -TARGET_EFAULT;
975 wfds_ptr = &wfds;
976 } else {
977 wfds_ptr = NULL;
979 if (efd_addr) {
980 if (copy_from_user_fdset(&efds, efd_addr, n))
981 return -TARGET_EFAULT;
982 efds_ptr = &efds;
983 } else {
984 efds_ptr = NULL;
987 if (target_tv_addr) {
988 if (copy_from_user_timeval(&tv, target_tv_addr))
989 return -TARGET_EFAULT;
990 tv_ptr = &tv;
991 } else {
992 tv_ptr = NULL;
995 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
997 if (!is_error(ret)) {
998 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
999 return -TARGET_EFAULT;
1000 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1001 return -TARGET_EFAULT;
1002 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1003 return -TARGET_EFAULT;
1005 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1006 return -TARGET_EFAULT;
1009 return ret;
1012 static abi_long do_pipe2(int host_pipe[], int flags)
1014 #ifdef CONFIG_PIPE2
1015 return pipe2(host_pipe, flags);
1016 #else
1017 return -ENOSYS;
1018 #endif
1021 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1022 int flags, int is_pipe2)
1024 int host_pipe[2];
1025 abi_long ret;
1026 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1028 if (is_error(ret))
1029 return get_errno(ret);
1031 /* Several targets have special calling conventions for the original
1032 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1033 if (!is_pipe2) {
1034 #if defined(TARGET_ALPHA)
1035 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1036 return host_pipe[0];
1037 #elif defined(TARGET_MIPS)
1038 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1039 return host_pipe[0];
1040 #elif defined(TARGET_SH4)
1041 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1042 return host_pipe[0];
1043 #endif
1046 if (put_user_s32(host_pipe[0], pipedes)
1047 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1048 return -TARGET_EFAULT;
1049 return get_errno(ret);
1052 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1053 abi_ulong target_addr,
1054 socklen_t len)
1056 struct target_ip_mreqn *target_smreqn;
1058 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1059 if (!target_smreqn)
1060 return -TARGET_EFAULT;
1061 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1062 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1063 if (len == sizeof(struct target_ip_mreqn))
1064 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1065 unlock_user(target_smreqn, target_addr, 0);
1067 return 0;
1070 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1071 abi_ulong target_addr,
1072 socklen_t len)
1074 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1075 sa_family_t sa_family;
1076 struct target_sockaddr *target_saddr;
1078 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1079 if (!target_saddr)
1080 return -TARGET_EFAULT;
1082 sa_family = tswap16(target_saddr->sa_family);
1084 /* Oops. The caller might send a incomplete sun_path; sun_path
1085 * must be terminated by \0 (see the manual page), but
1086 * unfortunately it is quite common to specify sockaddr_un
1087 * length as "strlen(x->sun_path)" while it should be
1088 * "strlen(...) + 1". We'll fix that here if needed.
1089 * Linux kernel has a similar feature.
1092 if (sa_family == AF_UNIX) {
1093 if (len < unix_maxlen && len > 0) {
1094 char *cp = (char*)target_saddr;
1096 if ( cp[len-1] && !cp[len] )
1097 len++;
1099 if (len > unix_maxlen)
1100 len = unix_maxlen;
1103 memcpy(addr, target_saddr, len);
1104 addr->sa_family = sa_family;
1105 unlock_user(target_saddr, target_addr, 0);
1107 return 0;
1110 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1111 struct sockaddr *addr,
1112 socklen_t len)
1114 struct target_sockaddr *target_saddr;
1116 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1117 if (!target_saddr)
1118 return -TARGET_EFAULT;
1119 memcpy(target_saddr, addr, len);
1120 target_saddr->sa_family = tswap16(addr->sa_family);
1121 unlock_user(target_saddr, target_addr, len);
1123 return 0;
1126 /* ??? Should this also swap msgh->name? */
1127 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1128 struct target_msghdr *target_msgh)
1130 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1131 abi_long msg_controllen;
1132 abi_ulong target_cmsg_addr;
1133 struct target_cmsghdr *target_cmsg;
1134 socklen_t space = 0;
1136 msg_controllen = tswapl(target_msgh->msg_controllen);
1137 if (msg_controllen < sizeof (struct target_cmsghdr))
1138 goto the_end;
1139 target_cmsg_addr = tswapl(target_msgh->msg_control);
1140 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1141 if (!target_cmsg)
1142 return -TARGET_EFAULT;
1144 while (cmsg && target_cmsg) {
1145 void *data = CMSG_DATA(cmsg);
1146 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1148 int len = tswapl(target_cmsg->cmsg_len)
1149 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1151 space += CMSG_SPACE(len);
1152 if (space > msgh->msg_controllen) {
1153 space -= CMSG_SPACE(len);
1154 gemu_log("Host cmsg overflow\n");
1155 break;
1158 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1159 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1160 cmsg->cmsg_len = CMSG_LEN(len);
1162 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1163 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1164 memcpy(data, target_data, len);
1165 } else {
1166 int *fd = (int *)data;
1167 int *target_fd = (int *)target_data;
1168 int i, numfds = len / sizeof(int);
1170 for (i = 0; i < numfds; i++)
1171 fd[i] = tswap32(target_fd[i]);
1174 cmsg = CMSG_NXTHDR(msgh, cmsg);
1175 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1177 unlock_user(target_cmsg, target_cmsg_addr, 0);
1178 the_end:
1179 msgh->msg_controllen = space;
1180 return 0;
1183 /* ??? Should this also swap msgh->name? */
1184 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1185 struct msghdr *msgh)
1187 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1188 abi_long msg_controllen;
1189 abi_ulong target_cmsg_addr;
1190 struct target_cmsghdr *target_cmsg;
1191 socklen_t space = 0;
1193 msg_controllen = tswapl(target_msgh->msg_controllen);
1194 if (msg_controllen < sizeof (struct target_cmsghdr))
1195 goto the_end;
1196 target_cmsg_addr = tswapl(target_msgh->msg_control);
1197 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1198 if (!target_cmsg)
1199 return -TARGET_EFAULT;
1201 while (cmsg && target_cmsg) {
1202 void *data = CMSG_DATA(cmsg);
1203 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1205 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1207 space += TARGET_CMSG_SPACE(len);
1208 if (space > msg_controllen) {
1209 space -= TARGET_CMSG_SPACE(len);
1210 gemu_log("Target cmsg overflow\n");
1211 break;
1214 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1215 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1216 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1218 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1219 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1220 memcpy(target_data, data, len);
1221 } else {
1222 int *fd = (int *)data;
1223 int *target_fd = (int *)target_data;
1224 int i, numfds = len / sizeof(int);
1226 for (i = 0; i < numfds; i++)
1227 target_fd[i] = tswap32(fd[i]);
1230 cmsg = CMSG_NXTHDR(msgh, cmsg);
1231 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1233 unlock_user(target_cmsg, target_cmsg_addr, space);
1234 the_end:
1235 target_msgh->msg_controllen = tswapl(space);
1236 return 0;
1239 /* do_setsockopt() Must return target values and target errnos. */
1240 static abi_long do_setsockopt(int sockfd, int level, int optname,
1241 abi_ulong optval_addr, socklen_t optlen)
1243 abi_long ret;
1244 int val;
1245 struct ip_mreqn *ip_mreq;
1246 struct ip_mreq_source *ip_mreq_source;
1248 switch(level) {
1249 case SOL_TCP:
1250 /* TCP options all take an 'int' value. */
1251 if (optlen < sizeof(uint32_t))
1252 return -TARGET_EINVAL;
1254 if (get_user_u32(val, optval_addr))
1255 return -TARGET_EFAULT;
1256 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1257 break;
1258 case SOL_IP:
1259 switch(optname) {
1260 case IP_TOS:
1261 case IP_TTL:
1262 case IP_HDRINCL:
1263 case IP_ROUTER_ALERT:
1264 case IP_RECVOPTS:
1265 case IP_RETOPTS:
1266 case IP_PKTINFO:
1267 case IP_MTU_DISCOVER:
1268 case IP_RECVERR:
1269 case IP_RECVTOS:
1270 #ifdef IP_FREEBIND
1271 case IP_FREEBIND:
1272 #endif
1273 case IP_MULTICAST_TTL:
1274 case IP_MULTICAST_LOOP:
1275 val = 0;
1276 if (optlen >= sizeof(uint32_t)) {
1277 if (get_user_u32(val, optval_addr))
1278 return -TARGET_EFAULT;
1279 } else if (optlen >= 1) {
1280 if (get_user_u8(val, optval_addr))
1281 return -TARGET_EFAULT;
1283 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1284 break;
1285 case IP_ADD_MEMBERSHIP:
1286 case IP_DROP_MEMBERSHIP:
1287 if (optlen < sizeof (struct target_ip_mreq) ||
1288 optlen > sizeof (struct target_ip_mreqn))
1289 return -TARGET_EINVAL;
1291 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1292 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1293 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1294 break;
1296 case IP_BLOCK_SOURCE:
1297 case IP_UNBLOCK_SOURCE:
1298 case IP_ADD_SOURCE_MEMBERSHIP:
1299 case IP_DROP_SOURCE_MEMBERSHIP:
1300 if (optlen != sizeof (struct target_ip_mreq_source))
1301 return -TARGET_EINVAL;
1303 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1304 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1305 unlock_user (ip_mreq_source, optval_addr, 0);
1306 break;
1308 default:
1309 goto unimplemented;
1311 break;
1312 case TARGET_SOL_SOCKET:
1313 switch (optname) {
1314 /* Options with 'int' argument. */
1315 case TARGET_SO_DEBUG:
1316 optname = SO_DEBUG;
1317 break;
1318 case TARGET_SO_REUSEADDR:
1319 optname = SO_REUSEADDR;
1320 break;
1321 case TARGET_SO_TYPE:
1322 optname = SO_TYPE;
1323 break;
1324 case TARGET_SO_ERROR:
1325 optname = SO_ERROR;
1326 break;
1327 case TARGET_SO_DONTROUTE:
1328 optname = SO_DONTROUTE;
1329 break;
1330 case TARGET_SO_BROADCAST:
1331 optname = SO_BROADCAST;
1332 break;
1333 case TARGET_SO_SNDBUF:
1334 optname = SO_SNDBUF;
1335 break;
1336 case TARGET_SO_RCVBUF:
1337 optname = SO_RCVBUF;
1338 break;
1339 case TARGET_SO_KEEPALIVE:
1340 optname = SO_KEEPALIVE;
1341 break;
1342 case TARGET_SO_OOBINLINE:
1343 optname = SO_OOBINLINE;
1344 break;
1345 case TARGET_SO_NO_CHECK:
1346 optname = SO_NO_CHECK;
1347 break;
1348 case TARGET_SO_PRIORITY:
1349 optname = SO_PRIORITY;
1350 break;
1351 #ifdef SO_BSDCOMPAT
1352 case TARGET_SO_BSDCOMPAT:
1353 optname = SO_BSDCOMPAT;
1354 break;
1355 #endif
1356 case TARGET_SO_PASSCRED:
1357 optname = SO_PASSCRED;
1358 break;
1359 case TARGET_SO_TIMESTAMP:
1360 optname = SO_TIMESTAMP;
1361 break;
1362 case TARGET_SO_RCVLOWAT:
1363 optname = SO_RCVLOWAT;
1364 break;
1365 case TARGET_SO_RCVTIMEO:
1366 optname = SO_RCVTIMEO;
1367 break;
1368 case TARGET_SO_SNDTIMEO:
1369 optname = SO_SNDTIMEO;
1370 break;
1371 break;
1372 default:
1373 goto unimplemented;
1375 if (optlen < sizeof(uint32_t))
1376 return -TARGET_EINVAL;
1378 if (get_user_u32(val, optval_addr))
1379 return -TARGET_EFAULT;
1380 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1381 break;
1382 default:
1383 unimplemented:
1384 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1385 ret = -TARGET_ENOPROTOOPT;
1387 return ret;
1390 /* do_getsockopt() Must return target values and target errnos. */
1391 static abi_long do_getsockopt(int sockfd, int level, int optname,
1392 abi_ulong optval_addr, abi_ulong optlen)
1394 abi_long ret;
1395 int len, val;
1396 socklen_t lv;
1398 switch(level) {
1399 case TARGET_SOL_SOCKET:
1400 level = SOL_SOCKET;
1401 switch (optname) {
1402 /* These don't just return a single integer */
1403 case TARGET_SO_LINGER:
1404 case TARGET_SO_RCVTIMEO:
1405 case TARGET_SO_SNDTIMEO:
1406 case TARGET_SO_PEERCRED:
1407 case TARGET_SO_PEERNAME:
1408 goto unimplemented;
1409 /* Options with 'int' argument. */
1410 case TARGET_SO_DEBUG:
1411 optname = SO_DEBUG;
1412 goto int_case;
1413 case TARGET_SO_REUSEADDR:
1414 optname = SO_REUSEADDR;
1415 goto int_case;
1416 case TARGET_SO_TYPE:
1417 optname = SO_TYPE;
1418 goto int_case;
1419 case TARGET_SO_ERROR:
1420 optname = SO_ERROR;
1421 goto int_case;
1422 case TARGET_SO_DONTROUTE:
1423 optname = SO_DONTROUTE;
1424 goto int_case;
1425 case TARGET_SO_BROADCAST:
1426 optname = SO_BROADCAST;
1427 goto int_case;
1428 case TARGET_SO_SNDBUF:
1429 optname = SO_SNDBUF;
1430 goto int_case;
1431 case TARGET_SO_RCVBUF:
1432 optname = SO_RCVBUF;
1433 goto int_case;
1434 case TARGET_SO_KEEPALIVE:
1435 optname = SO_KEEPALIVE;
1436 goto int_case;
1437 case TARGET_SO_OOBINLINE:
1438 optname = SO_OOBINLINE;
1439 goto int_case;
1440 case TARGET_SO_NO_CHECK:
1441 optname = SO_NO_CHECK;
1442 goto int_case;
1443 case TARGET_SO_PRIORITY:
1444 optname = SO_PRIORITY;
1445 goto int_case;
1446 #ifdef SO_BSDCOMPAT
1447 case TARGET_SO_BSDCOMPAT:
1448 optname = SO_BSDCOMPAT;
1449 goto int_case;
1450 #endif
1451 case TARGET_SO_PASSCRED:
1452 optname = SO_PASSCRED;
1453 goto int_case;
1454 case TARGET_SO_TIMESTAMP:
1455 optname = SO_TIMESTAMP;
1456 goto int_case;
1457 case TARGET_SO_RCVLOWAT:
1458 optname = SO_RCVLOWAT;
1459 goto int_case;
1460 default:
1461 goto int_case;
1463 break;
1464 case SOL_TCP:
1465 /* TCP options all take an 'int' value. */
1466 int_case:
1467 if (get_user_u32(len, optlen))
1468 return -TARGET_EFAULT;
1469 if (len < 0)
1470 return -TARGET_EINVAL;
1471 lv = sizeof(lv);
1472 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1473 if (ret < 0)
1474 return ret;
1475 if (len > lv)
1476 len = lv;
1477 if (len == 4) {
1478 if (put_user_u32(val, optval_addr))
1479 return -TARGET_EFAULT;
1480 } else {
1481 if (put_user_u8(val, optval_addr))
1482 return -TARGET_EFAULT;
1484 if (put_user_u32(len, optlen))
1485 return -TARGET_EFAULT;
1486 break;
1487 case SOL_IP:
1488 switch(optname) {
1489 case IP_TOS:
1490 case IP_TTL:
1491 case IP_HDRINCL:
1492 case IP_ROUTER_ALERT:
1493 case IP_RECVOPTS:
1494 case IP_RETOPTS:
1495 case IP_PKTINFO:
1496 case IP_MTU_DISCOVER:
1497 case IP_RECVERR:
1498 case IP_RECVTOS:
1499 #ifdef IP_FREEBIND
1500 case IP_FREEBIND:
1501 #endif
1502 case IP_MULTICAST_TTL:
1503 case IP_MULTICAST_LOOP:
1504 if (get_user_u32(len, optlen))
1505 return -TARGET_EFAULT;
1506 if (len < 0)
1507 return -TARGET_EINVAL;
1508 lv = sizeof(lv);
1509 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1510 if (ret < 0)
1511 return ret;
1512 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1513 len = 1;
1514 if (put_user_u32(len, optlen)
1515 || put_user_u8(val, optval_addr))
1516 return -TARGET_EFAULT;
1517 } else {
1518 if (len > sizeof(int))
1519 len = sizeof(int);
1520 if (put_user_u32(len, optlen)
1521 || put_user_u32(val, optval_addr))
1522 return -TARGET_EFAULT;
1524 break;
1525 default:
1526 ret = -TARGET_ENOPROTOOPT;
1527 break;
1529 break;
1530 default:
1531 unimplemented:
1532 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1533 level, optname);
1534 ret = -TARGET_EOPNOTSUPP;
1535 break;
1537 return ret;
1540 /* FIXME
1541 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1542 * other lock functions have a return code of 0 for failure.
1544 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1545 int count, int copy)
1547 struct target_iovec *target_vec;
1548 abi_ulong base;
1549 int i;
1551 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1552 if (!target_vec)
1553 return -TARGET_EFAULT;
1554 for(i = 0;i < count; i++) {
1555 base = tswapl(target_vec[i].iov_base);
1556 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1557 if (vec[i].iov_len != 0) {
1558 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1559 /* Don't check lock_user return value. We must call writev even
1560 if a element has invalid base address. */
1561 } else {
1562 /* zero length pointer is ignored */
1563 vec[i].iov_base = NULL;
1566 unlock_user (target_vec, target_addr, 0);
1567 return 0;
1570 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1571 int count, int copy)
1573 struct target_iovec *target_vec;
1574 abi_ulong base;
1575 int i;
1577 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1578 if (!target_vec)
1579 return -TARGET_EFAULT;
1580 for(i = 0;i < count; i++) {
1581 if (target_vec[i].iov_base) {
1582 base = tswapl(target_vec[i].iov_base);
1583 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1586 unlock_user (target_vec, target_addr, 0);
1588 return 0;
1591 /* do_socket() Must return target values and target errnos. */
1592 static abi_long do_socket(int domain, int type, int protocol)
1594 #if defined(TARGET_MIPS)
1595 switch(type) {
1596 case TARGET_SOCK_DGRAM:
1597 type = SOCK_DGRAM;
1598 break;
1599 case TARGET_SOCK_STREAM:
1600 type = SOCK_STREAM;
1601 break;
1602 case TARGET_SOCK_RAW:
1603 type = SOCK_RAW;
1604 break;
1605 case TARGET_SOCK_RDM:
1606 type = SOCK_RDM;
1607 break;
1608 case TARGET_SOCK_SEQPACKET:
1609 type = SOCK_SEQPACKET;
1610 break;
1611 case TARGET_SOCK_PACKET:
1612 type = SOCK_PACKET;
1613 break;
1615 #endif
1616 if (domain == PF_NETLINK)
1617 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1618 return get_errno(socket(domain, type, protocol));
1621 /* do_bind() Must return target values and target errnos. */
1622 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1623 socklen_t addrlen)
1625 void *addr;
1626 abi_long ret;
1628 if ((int)addrlen < 0) {
1629 return -TARGET_EINVAL;
1632 addr = alloca(addrlen+1);
1634 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1635 if (ret)
1636 return ret;
1638 return get_errno(bind(sockfd, addr, addrlen));
1641 /* do_connect() Must return target values and target errnos. */
1642 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1643 socklen_t addrlen)
1645 void *addr;
1646 abi_long ret;
1648 if ((int)addrlen < 0) {
1649 return -TARGET_EINVAL;
1652 addr = alloca(addrlen);
1654 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1655 if (ret)
1656 return ret;
1658 return get_errno(connect(sockfd, addr, addrlen));
1661 /* do_sendrecvmsg() Must return target values and target errnos. */
1662 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1663 int flags, int send)
1665 abi_long ret, len;
1666 struct target_msghdr *msgp;
1667 struct msghdr msg;
1668 int count;
1669 struct iovec *vec;
1670 abi_ulong target_vec;
1672 /* FIXME */
1673 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1674 msgp,
1675 target_msg,
1676 send ? 1 : 0))
1677 return -TARGET_EFAULT;
1678 if (msgp->msg_name) {
1679 msg.msg_namelen = tswap32(msgp->msg_namelen);
1680 msg.msg_name = alloca(msg.msg_namelen);
1681 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1682 msg.msg_namelen);
1683 if (ret) {
1684 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1685 return ret;
1687 } else {
1688 msg.msg_name = NULL;
1689 msg.msg_namelen = 0;
1691 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1692 msg.msg_control = alloca(msg.msg_controllen);
1693 msg.msg_flags = tswap32(msgp->msg_flags);
1695 count = tswapl(msgp->msg_iovlen);
1696 vec = alloca(count * sizeof(struct iovec));
1697 target_vec = tswapl(msgp->msg_iov);
1698 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1699 msg.msg_iovlen = count;
1700 msg.msg_iov = vec;
1702 if (send) {
1703 ret = target_to_host_cmsg(&msg, msgp);
1704 if (ret == 0)
1705 ret = get_errno(sendmsg(fd, &msg, flags));
1706 } else {
1707 ret = get_errno(recvmsg(fd, &msg, flags));
1708 if (!is_error(ret)) {
1709 len = ret;
1710 ret = host_to_target_cmsg(msgp, &msg);
1711 if (!is_error(ret))
1712 ret = len;
1715 unlock_iovec(vec, target_vec, count, !send);
1716 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1717 return ret;
1720 /* do_accept() Must return target values and target errnos. */
1721 static abi_long do_accept(int fd, abi_ulong target_addr,
1722 abi_ulong target_addrlen_addr)
1724 socklen_t addrlen;
1725 void *addr;
1726 abi_long ret;
1728 if (target_addr == 0)
1729 return get_errno(accept(fd, NULL, NULL));
1731 /* linux returns EINVAL if addrlen pointer is invalid */
1732 if (get_user_u32(addrlen, target_addrlen_addr))
1733 return -TARGET_EINVAL;
1735 if ((int)addrlen < 0) {
1736 return -TARGET_EINVAL;
1739 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1740 return -TARGET_EINVAL;
1742 addr = alloca(addrlen);
1744 ret = get_errno(accept(fd, addr, &addrlen));
1745 if (!is_error(ret)) {
1746 host_to_target_sockaddr(target_addr, addr, addrlen);
1747 if (put_user_u32(addrlen, target_addrlen_addr))
1748 ret = -TARGET_EFAULT;
1750 return ret;
1753 /* do_getpeername() Must return target values and target errnos. */
1754 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1755 abi_ulong target_addrlen_addr)
1757 socklen_t addrlen;
1758 void *addr;
1759 abi_long ret;
1761 if (get_user_u32(addrlen, target_addrlen_addr))
1762 return -TARGET_EFAULT;
1764 if ((int)addrlen < 0) {
1765 return -TARGET_EINVAL;
1768 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1769 return -TARGET_EFAULT;
1771 addr = alloca(addrlen);
1773 ret = get_errno(getpeername(fd, addr, &addrlen));
1774 if (!is_error(ret)) {
1775 host_to_target_sockaddr(target_addr, addr, addrlen);
1776 if (put_user_u32(addrlen, target_addrlen_addr))
1777 ret = -TARGET_EFAULT;
1779 return ret;
1782 /* do_getsockname() Must return target values and target errnos. */
1783 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1784 abi_ulong target_addrlen_addr)
1786 socklen_t addrlen;
1787 void *addr;
1788 abi_long ret;
1790 if (get_user_u32(addrlen, target_addrlen_addr))
1791 return -TARGET_EFAULT;
1793 if ((int)addrlen < 0) {
1794 return -TARGET_EINVAL;
1797 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1798 return -TARGET_EFAULT;
1800 addr = alloca(addrlen);
1802 ret = get_errno(getsockname(fd, addr, &addrlen));
1803 if (!is_error(ret)) {
1804 host_to_target_sockaddr(target_addr, addr, addrlen);
1805 if (put_user_u32(addrlen, target_addrlen_addr))
1806 ret = -TARGET_EFAULT;
1808 return ret;
1811 /* do_socketpair() Must return target values and target errnos. */
1812 static abi_long do_socketpair(int domain, int type, int protocol,
1813 abi_ulong target_tab_addr)
1815 int tab[2];
1816 abi_long ret;
1818 ret = get_errno(socketpair(domain, type, protocol, tab));
1819 if (!is_error(ret)) {
1820 if (put_user_s32(tab[0], target_tab_addr)
1821 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1822 ret = -TARGET_EFAULT;
1824 return ret;
1827 /* do_sendto() Must return target values and target errnos. */
1828 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1829 abi_ulong target_addr, socklen_t addrlen)
1831 void *addr;
1832 void *host_msg;
1833 abi_long ret;
1835 if ((int)addrlen < 0) {
1836 return -TARGET_EINVAL;
1839 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1840 if (!host_msg)
1841 return -TARGET_EFAULT;
1842 if (target_addr) {
1843 addr = alloca(addrlen);
1844 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1845 if (ret) {
1846 unlock_user(host_msg, msg, 0);
1847 return ret;
1849 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1850 } else {
1851 ret = get_errno(send(fd, host_msg, len, flags));
1853 unlock_user(host_msg, msg, 0);
1854 return ret;
1857 /* do_recvfrom() Must return target values and target errnos. */
1858 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1859 abi_ulong target_addr,
1860 abi_ulong target_addrlen)
1862 socklen_t addrlen;
1863 void *addr;
1864 void *host_msg;
1865 abi_long ret;
1867 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1868 if (!host_msg)
1869 return -TARGET_EFAULT;
1870 if (target_addr) {
1871 if (get_user_u32(addrlen, target_addrlen)) {
1872 ret = -TARGET_EFAULT;
1873 goto fail;
1875 if ((int)addrlen < 0) {
1876 ret = -TARGET_EINVAL;
1877 goto fail;
1879 addr = alloca(addrlen);
1880 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1881 } else {
1882 addr = NULL; /* To keep compiler quiet. */
1883 ret = get_errno(recv(fd, host_msg, len, flags));
1885 if (!is_error(ret)) {
1886 if (target_addr) {
1887 host_to_target_sockaddr(target_addr, addr, addrlen);
1888 if (put_user_u32(addrlen, target_addrlen)) {
1889 ret = -TARGET_EFAULT;
1890 goto fail;
1893 unlock_user(host_msg, msg, len);
1894 } else {
1895 fail:
1896 unlock_user(host_msg, msg, 0);
1898 return ret;
1901 #ifdef TARGET_NR_socketcall
1902 /* do_socketcall() Must return target values and target errnos. */
1903 static abi_long do_socketcall(int num, abi_ulong vptr)
1905 abi_long ret;
1906 const int n = sizeof(abi_ulong);
1908 switch(num) {
1909 case SOCKOP_socket:
1911 abi_ulong domain, type, protocol;
1913 if (get_user_ual(domain, vptr)
1914 || get_user_ual(type, vptr + n)
1915 || get_user_ual(protocol, vptr + 2 * n))
1916 return -TARGET_EFAULT;
1918 ret = do_socket(domain, type, protocol);
1920 break;
1921 case SOCKOP_bind:
1923 abi_ulong sockfd;
1924 abi_ulong target_addr;
1925 socklen_t addrlen;
1927 if (get_user_ual(sockfd, vptr)
1928 || get_user_ual(target_addr, vptr + n)
1929 || get_user_ual(addrlen, vptr + 2 * n))
1930 return -TARGET_EFAULT;
1932 ret = do_bind(sockfd, target_addr, addrlen);
1934 break;
1935 case SOCKOP_connect:
1937 abi_ulong sockfd;
1938 abi_ulong target_addr;
1939 socklen_t addrlen;
1941 if (get_user_ual(sockfd, vptr)
1942 || get_user_ual(target_addr, vptr + n)
1943 || get_user_ual(addrlen, vptr + 2 * n))
1944 return -TARGET_EFAULT;
1946 ret = do_connect(sockfd, target_addr, addrlen);
1948 break;
1949 case SOCKOP_listen:
1951 abi_ulong sockfd, backlog;
1953 if (get_user_ual(sockfd, vptr)
1954 || get_user_ual(backlog, vptr + n))
1955 return -TARGET_EFAULT;
1957 ret = get_errno(listen(sockfd, backlog));
1959 break;
1960 case SOCKOP_accept:
1962 abi_ulong sockfd;
1963 abi_ulong target_addr, target_addrlen;
1965 if (get_user_ual(sockfd, vptr)
1966 || get_user_ual(target_addr, vptr + n)
1967 || get_user_ual(target_addrlen, vptr + 2 * n))
1968 return -TARGET_EFAULT;
1970 ret = do_accept(sockfd, target_addr, target_addrlen);
1972 break;
1973 case SOCKOP_getsockname:
1975 abi_ulong sockfd;
1976 abi_ulong target_addr, target_addrlen;
1978 if (get_user_ual(sockfd, vptr)
1979 || get_user_ual(target_addr, vptr + n)
1980 || get_user_ual(target_addrlen, vptr + 2 * n))
1981 return -TARGET_EFAULT;
1983 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1985 break;
1986 case SOCKOP_getpeername:
1988 abi_ulong sockfd;
1989 abi_ulong target_addr, target_addrlen;
1991 if (get_user_ual(sockfd, vptr)
1992 || get_user_ual(target_addr, vptr + n)
1993 || get_user_ual(target_addrlen, vptr + 2 * n))
1994 return -TARGET_EFAULT;
1996 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1998 break;
1999 case SOCKOP_socketpair:
2001 abi_ulong domain, type, protocol;
2002 abi_ulong tab;
2004 if (get_user_ual(domain, vptr)
2005 || get_user_ual(type, vptr + n)
2006 || get_user_ual(protocol, vptr + 2 * n)
2007 || get_user_ual(tab, vptr + 3 * n))
2008 return -TARGET_EFAULT;
2010 ret = do_socketpair(domain, type, protocol, tab);
2012 break;
2013 case SOCKOP_send:
2015 abi_ulong sockfd;
2016 abi_ulong msg;
2017 size_t len;
2018 abi_ulong flags;
2020 if (get_user_ual(sockfd, vptr)
2021 || get_user_ual(msg, vptr + n)
2022 || get_user_ual(len, vptr + 2 * n)
2023 || get_user_ual(flags, vptr + 3 * n))
2024 return -TARGET_EFAULT;
2026 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2028 break;
2029 case SOCKOP_recv:
2031 abi_ulong sockfd;
2032 abi_ulong msg;
2033 size_t len;
2034 abi_ulong flags;
2036 if (get_user_ual(sockfd, vptr)
2037 || get_user_ual(msg, vptr + n)
2038 || get_user_ual(len, vptr + 2 * n)
2039 || get_user_ual(flags, vptr + 3 * n))
2040 return -TARGET_EFAULT;
2042 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2044 break;
2045 case SOCKOP_sendto:
2047 abi_ulong sockfd;
2048 abi_ulong msg;
2049 size_t len;
2050 abi_ulong flags;
2051 abi_ulong addr;
2052 socklen_t addrlen;
2054 if (get_user_ual(sockfd, vptr)
2055 || get_user_ual(msg, vptr + n)
2056 || get_user_ual(len, vptr + 2 * n)
2057 || get_user_ual(flags, vptr + 3 * n)
2058 || get_user_ual(addr, vptr + 4 * n)
2059 || get_user_ual(addrlen, vptr + 5 * n))
2060 return -TARGET_EFAULT;
2062 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2064 break;
2065 case SOCKOP_recvfrom:
2067 abi_ulong sockfd;
2068 abi_ulong msg;
2069 size_t len;
2070 abi_ulong flags;
2071 abi_ulong addr;
2072 socklen_t addrlen;
2074 if (get_user_ual(sockfd, vptr)
2075 || get_user_ual(msg, vptr + n)
2076 || get_user_ual(len, vptr + 2 * n)
2077 || get_user_ual(flags, vptr + 3 * n)
2078 || get_user_ual(addr, vptr + 4 * n)
2079 || get_user_ual(addrlen, vptr + 5 * n))
2080 return -TARGET_EFAULT;
2082 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2084 break;
2085 case SOCKOP_shutdown:
2087 abi_ulong sockfd, how;
2089 if (get_user_ual(sockfd, vptr)
2090 || get_user_ual(how, vptr + n))
2091 return -TARGET_EFAULT;
2093 ret = get_errno(shutdown(sockfd, how));
2095 break;
2096 case SOCKOP_sendmsg:
2097 case SOCKOP_recvmsg:
2099 abi_ulong fd;
2100 abi_ulong target_msg;
2101 abi_ulong flags;
2103 if (get_user_ual(fd, vptr)
2104 || get_user_ual(target_msg, vptr + n)
2105 || get_user_ual(flags, vptr + 2 * n))
2106 return -TARGET_EFAULT;
2108 ret = do_sendrecvmsg(fd, target_msg, flags,
2109 (num == SOCKOP_sendmsg));
2111 break;
2112 case SOCKOP_setsockopt:
2114 abi_ulong sockfd;
2115 abi_ulong level;
2116 abi_ulong optname;
2117 abi_ulong optval;
2118 socklen_t optlen;
2120 if (get_user_ual(sockfd, vptr)
2121 || get_user_ual(level, vptr + n)
2122 || get_user_ual(optname, vptr + 2 * n)
2123 || get_user_ual(optval, vptr + 3 * n)
2124 || get_user_ual(optlen, vptr + 4 * n))
2125 return -TARGET_EFAULT;
2127 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2129 break;
2130 case SOCKOP_getsockopt:
2132 abi_ulong sockfd;
2133 abi_ulong level;
2134 abi_ulong optname;
2135 abi_ulong optval;
2136 socklen_t optlen;
2138 if (get_user_ual(sockfd, vptr)
2139 || get_user_ual(level, vptr + n)
2140 || get_user_ual(optname, vptr + 2 * n)
2141 || get_user_ual(optval, vptr + 3 * n)
2142 || get_user_ual(optlen, vptr + 4 * n))
2143 return -TARGET_EFAULT;
2145 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2147 break;
2148 default:
2149 gemu_log("Unsupported socketcall: %d\n", num);
2150 ret = -TARGET_ENOSYS;
2151 break;
2153 return ret;
2155 #endif
2157 #define N_SHM_REGIONS 32
2159 static struct shm_region {
2160 abi_ulong start;
2161 abi_ulong size;
2162 } shm_regions[N_SHM_REGIONS];
2164 struct target_ipc_perm
2166 abi_long __key;
2167 abi_ulong uid;
2168 abi_ulong gid;
2169 abi_ulong cuid;
2170 abi_ulong cgid;
2171 unsigned short int mode;
2172 unsigned short int __pad1;
2173 unsigned short int __seq;
2174 unsigned short int __pad2;
2175 abi_ulong __unused1;
2176 abi_ulong __unused2;
2179 struct target_semid_ds
2181 struct target_ipc_perm sem_perm;
2182 abi_ulong sem_otime;
2183 abi_ulong __unused1;
2184 abi_ulong sem_ctime;
2185 abi_ulong __unused2;
2186 abi_ulong sem_nsems;
2187 abi_ulong __unused3;
2188 abi_ulong __unused4;
2191 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2192 abi_ulong target_addr)
2194 struct target_ipc_perm *target_ip;
2195 struct target_semid_ds *target_sd;
2197 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2198 return -TARGET_EFAULT;
2199 target_ip = &(target_sd->sem_perm);
2200 host_ip->__key = tswapl(target_ip->__key);
2201 host_ip->uid = tswapl(target_ip->uid);
2202 host_ip->gid = tswapl(target_ip->gid);
2203 host_ip->cuid = tswapl(target_ip->cuid);
2204 host_ip->cgid = tswapl(target_ip->cgid);
2205 host_ip->mode = tswapl(target_ip->mode);
2206 unlock_user_struct(target_sd, target_addr, 0);
2207 return 0;
2210 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2211 struct ipc_perm *host_ip)
2213 struct target_ipc_perm *target_ip;
2214 struct target_semid_ds *target_sd;
2216 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2217 return -TARGET_EFAULT;
2218 target_ip = &(target_sd->sem_perm);
2219 target_ip->__key = tswapl(host_ip->__key);
2220 target_ip->uid = tswapl(host_ip->uid);
2221 target_ip->gid = tswapl(host_ip->gid);
2222 target_ip->cuid = tswapl(host_ip->cuid);
2223 target_ip->cgid = tswapl(host_ip->cgid);
2224 target_ip->mode = tswapl(host_ip->mode);
2225 unlock_user_struct(target_sd, target_addr, 1);
2226 return 0;
2229 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2230 abi_ulong target_addr)
2232 struct target_semid_ds *target_sd;
2234 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2235 return -TARGET_EFAULT;
2236 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2237 return -TARGET_EFAULT;
2238 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2239 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2240 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2241 unlock_user_struct(target_sd, target_addr, 0);
2242 return 0;
2245 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2246 struct semid_ds *host_sd)
2248 struct target_semid_ds *target_sd;
2250 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2251 return -TARGET_EFAULT;
2252 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2253 return -TARGET_EFAULT;;
2254 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2255 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2256 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2257 unlock_user_struct(target_sd, target_addr, 1);
2258 return 0;
2261 struct target_seminfo {
2262 int semmap;
2263 int semmni;
2264 int semmns;
2265 int semmnu;
2266 int semmsl;
2267 int semopm;
2268 int semume;
2269 int semusz;
2270 int semvmx;
2271 int semaem;
2274 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2275 struct seminfo *host_seminfo)
2277 struct target_seminfo *target_seminfo;
2278 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2279 return -TARGET_EFAULT;
2280 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2281 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2282 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2283 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2284 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2285 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2286 __put_user(host_seminfo->semume, &target_seminfo->semume);
2287 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2288 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2289 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2290 unlock_user_struct(target_seminfo, target_addr, 1);
2291 return 0;
2294 union semun {
2295 int val;
2296 struct semid_ds *buf;
2297 unsigned short *array;
2298 struct seminfo *__buf;
2301 union target_semun {
2302 int val;
2303 abi_ulong buf;
2304 abi_ulong array;
2305 abi_ulong __buf;
2308 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2309 abi_ulong target_addr)
2311 int nsems;
2312 unsigned short *array;
2313 union semun semun;
2314 struct semid_ds semid_ds;
2315 int i, ret;
2317 semun.buf = &semid_ds;
2319 ret = semctl(semid, 0, IPC_STAT, semun);
2320 if (ret == -1)
2321 return get_errno(ret);
2323 nsems = semid_ds.sem_nsems;
2325 *host_array = malloc(nsems*sizeof(unsigned short));
2326 array = lock_user(VERIFY_READ, target_addr,
2327 nsems*sizeof(unsigned short), 1);
2328 if (!array)
2329 return -TARGET_EFAULT;
2331 for(i=0; i<nsems; i++) {
2332 __get_user((*host_array)[i], &array[i]);
2334 unlock_user(array, target_addr, 0);
2336 return 0;
2339 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2340 unsigned short **host_array)
2342 int nsems;
2343 unsigned short *array;
2344 union semun semun;
2345 struct semid_ds semid_ds;
2346 int i, ret;
2348 semun.buf = &semid_ds;
2350 ret = semctl(semid, 0, IPC_STAT, semun);
2351 if (ret == -1)
2352 return get_errno(ret);
2354 nsems = semid_ds.sem_nsems;
2356 array = lock_user(VERIFY_WRITE, target_addr,
2357 nsems*sizeof(unsigned short), 0);
2358 if (!array)
2359 return -TARGET_EFAULT;
2361 for(i=0; i<nsems; i++) {
2362 __put_user((*host_array)[i], &array[i]);
2364 free(*host_array);
2365 unlock_user(array, target_addr, 1);
2367 return 0;
2370 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2371 union target_semun target_su)
2373 union semun arg;
2374 struct semid_ds dsarg;
2375 unsigned short *array = NULL;
2376 struct seminfo seminfo;
2377 abi_long ret = -TARGET_EINVAL;
2378 abi_long err;
2379 cmd &= 0xff;
2381 switch( cmd ) {
2382 case GETVAL:
2383 case SETVAL:
2384 arg.val = tswapl(target_su.val);
2385 ret = get_errno(semctl(semid, semnum, cmd, arg));
2386 target_su.val = tswapl(arg.val);
2387 break;
2388 case GETALL:
2389 case SETALL:
2390 err = target_to_host_semarray(semid, &array, target_su.array);
2391 if (err)
2392 return err;
2393 arg.array = array;
2394 ret = get_errno(semctl(semid, semnum, cmd, arg));
2395 err = host_to_target_semarray(semid, target_su.array, &array);
2396 if (err)
2397 return err;
2398 break;
2399 case IPC_STAT:
2400 case IPC_SET:
2401 case SEM_STAT:
2402 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2403 if (err)
2404 return err;
2405 arg.buf = &dsarg;
2406 ret = get_errno(semctl(semid, semnum, cmd, arg));
2407 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2408 if (err)
2409 return err;
2410 break;
2411 case IPC_INFO:
2412 case SEM_INFO:
2413 arg.__buf = &seminfo;
2414 ret = get_errno(semctl(semid, semnum, cmd, arg));
2415 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2416 if (err)
2417 return err;
2418 break;
2419 case IPC_RMID:
2420 case GETPID:
2421 case GETNCNT:
2422 case GETZCNT:
2423 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2424 break;
2427 return ret;
2430 struct target_sembuf {
2431 unsigned short sem_num;
2432 short sem_op;
2433 short sem_flg;
2436 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2437 abi_ulong target_addr,
2438 unsigned nsops)
2440 struct target_sembuf *target_sembuf;
2441 int i;
2443 target_sembuf = lock_user(VERIFY_READ, target_addr,
2444 nsops*sizeof(struct target_sembuf), 1);
2445 if (!target_sembuf)
2446 return -TARGET_EFAULT;
2448 for(i=0; i<nsops; i++) {
2449 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2450 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2451 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2454 unlock_user(target_sembuf, target_addr, 0);
2456 return 0;
2459 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2461 struct sembuf sops[nsops];
2463 if (target_to_host_sembuf(sops, ptr, nsops))
2464 return -TARGET_EFAULT;
2466 return semop(semid, sops, nsops);
2469 struct target_msqid_ds
2471 struct target_ipc_perm msg_perm;
2472 abi_ulong msg_stime;
2473 #if TARGET_ABI_BITS == 32
2474 abi_ulong __unused1;
2475 #endif
2476 abi_ulong msg_rtime;
2477 #if TARGET_ABI_BITS == 32
2478 abi_ulong __unused2;
2479 #endif
2480 abi_ulong msg_ctime;
2481 #if TARGET_ABI_BITS == 32
2482 abi_ulong __unused3;
2483 #endif
2484 abi_ulong __msg_cbytes;
2485 abi_ulong msg_qnum;
2486 abi_ulong msg_qbytes;
2487 abi_ulong msg_lspid;
2488 abi_ulong msg_lrpid;
2489 abi_ulong __unused4;
2490 abi_ulong __unused5;
2493 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2494 abi_ulong target_addr)
2496 struct target_msqid_ds *target_md;
2498 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2499 return -TARGET_EFAULT;
2500 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2501 return -TARGET_EFAULT;
2502 host_md->msg_stime = tswapl(target_md->msg_stime);
2503 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2504 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2505 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2506 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2507 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2508 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2509 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2510 unlock_user_struct(target_md, target_addr, 0);
2511 return 0;
2514 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2515 struct msqid_ds *host_md)
2517 struct target_msqid_ds *target_md;
2519 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2520 return -TARGET_EFAULT;
2521 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2522 return -TARGET_EFAULT;
2523 target_md->msg_stime = tswapl(host_md->msg_stime);
2524 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2525 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2526 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2527 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2528 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2529 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2530 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2531 unlock_user_struct(target_md, target_addr, 1);
2532 return 0;
2535 struct target_msginfo {
2536 int msgpool;
2537 int msgmap;
2538 int msgmax;
2539 int msgmnb;
2540 int msgmni;
2541 int msgssz;
2542 int msgtql;
2543 unsigned short int msgseg;
2546 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2547 struct msginfo *host_msginfo)
2549 struct target_msginfo *target_msginfo;
2550 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2551 return -TARGET_EFAULT;
2552 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2553 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2554 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2555 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2556 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2557 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2558 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2559 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2560 unlock_user_struct(target_msginfo, target_addr, 1);
2561 return 0;
2564 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2566 struct msqid_ds dsarg;
2567 struct msginfo msginfo;
2568 abi_long ret = -TARGET_EINVAL;
2570 cmd &= 0xff;
2572 switch (cmd) {
2573 case IPC_STAT:
2574 case IPC_SET:
2575 case MSG_STAT:
2576 if (target_to_host_msqid_ds(&dsarg,ptr))
2577 return -TARGET_EFAULT;
2578 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2579 if (host_to_target_msqid_ds(ptr,&dsarg))
2580 return -TARGET_EFAULT;
2581 break;
2582 case IPC_RMID:
2583 ret = get_errno(msgctl(msgid, cmd, NULL));
2584 break;
2585 case IPC_INFO:
2586 case MSG_INFO:
2587 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2588 if (host_to_target_msginfo(ptr, &msginfo))
2589 return -TARGET_EFAULT;
2590 break;
2593 return ret;
2596 struct target_msgbuf {
2597 abi_long mtype;
2598 char mtext[1];
2601 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2602 unsigned int msgsz, int msgflg)
2604 struct target_msgbuf *target_mb;
2605 struct msgbuf *host_mb;
2606 abi_long ret = 0;
2608 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2609 return -TARGET_EFAULT;
2610 host_mb = malloc(msgsz+sizeof(long));
2611 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2612 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2613 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2614 free(host_mb);
2615 unlock_user_struct(target_mb, msgp, 0);
2617 return ret;
2620 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2621 unsigned int msgsz, abi_long msgtyp,
2622 int msgflg)
2624 struct target_msgbuf *target_mb;
2625 char *target_mtext;
2626 struct msgbuf *host_mb;
2627 abi_long ret = 0;
2629 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2630 return -TARGET_EFAULT;
2632 host_mb = malloc(msgsz+sizeof(long));
2633 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2635 if (ret > 0) {
2636 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2637 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2638 if (!target_mtext) {
2639 ret = -TARGET_EFAULT;
2640 goto end;
2642 memcpy(target_mb->mtext, host_mb->mtext, ret);
2643 unlock_user(target_mtext, target_mtext_addr, ret);
2646 target_mb->mtype = tswapl(host_mb->mtype);
2647 free(host_mb);
2649 end:
2650 if (target_mb)
2651 unlock_user_struct(target_mb, msgp, 1);
2652 return ret;
2655 struct target_shmid_ds
2657 struct target_ipc_perm shm_perm;
2658 abi_ulong shm_segsz;
2659 abi_ulong shm_atime;
2660 #if TARGET_ABI_BITS == 32
2661 abi_ulong __unused1;
2662 #endif
2663 abi_ulong shm_dtime;
2664 #if TARGET_ABI_BITS == 32
2665 abi_ulong __unused2;
2666 #endif
2667 abi_ulong shm_ctime;
2668 #if TARGET_ABI_BITS == 32
2669 abi_ulong __unused3;
2670 #endif
2671 int shm_cpid;
2672 int shm_lpid;
2673 abi_ulong shm_nattch;
2674 unsigned long int __unused4;
2675 unsigned long int __unused5;
2678 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2679 abi_ulong target_addr)
2681 struct target_shmid_ds *target_sd;
2683 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2684 return -TARGET_EFAULT;
2685 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2686 return -TARGET_EFAULT;
2687 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2688 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2689 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2690 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2691 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2692 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2693 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2694 unlock_user_struct(target_sd, target_addr, 0);
2695 return 0;
2698 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2699 struct shmid_ds *host_sd)
2701 struct target_shmid_ds *target_sd;
2703 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2704 return -TARGET_EFAULT;
2705 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2706 return -TARGET_EFAULT;
2707 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2708 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2709 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2710 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2711 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2712 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2713 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2714 unlock_user_struct(target_sd, target_addr, 1);
2715 return 0;
2718 struct target_shminfo {
2719 abi_ulong shmmax;
2720 abi_ulong shmmin;
2721 abi_ulong shmmni;
2722 abi_ulong shmseg;
2723 abi_ulong shmall;
2726 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2727 struct shminfo *host_shminfo)
2729 struct target_shminfo *target_shminfo;
2730 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2731 return -TARGET_EFAULT;
2732 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2733 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2734 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2735 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2736 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2737 unlock_user_struct(target_shminfo, target_addr, 1);
2738 return 0;
2741 struct target_shm_info {
2742 int used_ids;
2743 abi_ulong shm_tot;
2744 abi_ulong shm_rss;
2745 abi_ulong shm_swp;
2746 abi_ulong swap_attempts;
2747 abi_ulong swap_successes;
2750 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2751 struct shm_info *host_shm_info)
2753 struct target_shm_info *target_shm_info;
2754 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2755 return -TARGET_EFAULT;
2756 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2757 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2758 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2759 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2760 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2761 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2762 unlock_user_struct(target_shm_info, target_addr, 1);
2763 return 0;
2766 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2768 struct shmid_ds dsarg;
2769 struct shminfo shminfo;
2770 struct shm_info shm_info;
2771 abi_long ret = -TARGET_EINVAL;
2773 cmd &= 0xff;
2775 switch(cmd) {
2776 case IPC_STAT:
2777 case IPC_SET:
2778 case SHM_STAT:
2779 if (target_to_host_shmid_ds(&dsarg, buf))
2780 return -TARGET_EFAULT;
2781 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2782 if (host_to_target_shmid_ds(buf, &dsarg))
2783 return -TARGET_EFAULT;
2784 break;
2785 case IPC_INFO:
2786 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2787 if (host_to_target_shminfo(buf, &shminfo))
2788 return -TARGET_EFAULT;
2789 break;
2790 case SHM_INFO:
2791 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2792 if (host_to_target_shm_info(buf, &shm_info))
2793 return -TARGET_EFAULT;
2794 break;
2795 case IPC_RMID:
2796 case SHM_LOCK:
2797 case SHM_UNLOCK:
2798 ret = get_errno(shmctl(shmid, cmd, NULL));
2799 break;
2802 return ret;
2805 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2807 abi_long raddr;
2808 void *host_raddr;
2809 struct shmid_ds shm_info;
2810 int i,ret;
2812 /* find out the length of the shared memory segment */
2813 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2814 if (is_error(ret)) {
2815 /* can't get length, bail out */
2816 return ret;
2819 mmap_lock();
2821 if (shmaddr)
2822 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2823 else {
2824 abi_ulong mmap_start;
2826 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2828 if (mmap_start == -1) {
2829 errno = ENOMEM;
2830 host_raddr = (void *)-1;
2831 } else
2832 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2835 if (host_raddr == (void *)-1) {
2836 mmap_unlock();
2837 return get_errno((long)host_raddr);
2839 raddr=h2g((unsigned long)host_raddr);
2841 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2842 PAGE_VALID | PAGE_READ |
2843 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2845 for (i = 0; i < N_SHM_REGIONS; i++) {
2846 if (shm_regions[i].start == 0) {
2847 shm_regions[i].start = raddr;
2848 shm_regions[i].size = shm_info.shm_segsz;
2849 break;
2853 mmap_unlock();
2854 return raddr;
2858 static inline abi_long do_shmdt(abi_ulong shmaddr)
2860 int i;
2862 for (i = 0; i < N_SHM_REGIONS; ++i) {
2863 if (shm_regions[i].start == shmaddr) {
2864 shm_regions[i].start = 0;
2865 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2866 break;
2870 return get_errno(shmdt(g2h(shmaddr)));
2873 #ifdef TARGET_NR_ipc
2874 /* ??? This only works with linear mappings. */
2875 /* do_ipc() must return target values and target errnos. */
2876 static abi_long do_ipc(unsigned int call, int first,
2877 int second, int third,
2878 abi_long ptr, abi_long fifth)
2880 int version;
2881 abi_long ret = 0;
2883 version = call >> 16;
2884 call &= 0xffff;
2886 switch (call) {
2887 case IPCOP_semop:
2888 ret = do_semop(first, ptr, second);
2889 break;
2891 case IPCOP_semget:
2892 ret = get_errno(semget(first, second, third));
2893 break;
2895 case IPCOP_semctl:
2896 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2897 break;
2899 case IPCOP_msgget:
2900 ret = get_errno(msgget(first, second));
2901 break;
2903 case IPCOP_msgsnd:
2904 ret = do_msgsnd(first, ptr, second, third);
2905 break;
2907 case IPCOP_msgctl:
2908 ret = do_msgctl(first, second, ptr);
2909 break;
2911 case IPCOP_msgrcv:
2912 switch (version) {
2913 case 0:
2915 struct target_ipc_kludge {
2916 abi_long msgp;
2917 abi_long msgtyp;
2918 } *tmp;
2920 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2921 ret = -TARGET_EFAULT;
2922 break;
2925 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2927 unlock_user_struct(tmp, ptr, 0);
2928 break;
2930 default:
2931 ret = do_msgrcv(first, ptr, second, fifth, third);
2933 break;
2935 case IPCOP_shmat:
2936 switch (version) {
2937 default:
2939 abi_ulong raddr;
2940 raddr = do_shmat(first, ptr, second);
2941 if (is_error(raddr))
2942 return get_errno(raddr);
2943 if (put_user_ual(raddr, third))
2944 return -TARGET_EFAULT;
2945 break;
2947 case 1:
2948 ret = -TARGET_EINVAL;
2949 break;
2951 break;
2952 case IPCOP_shmdt:
2953 ret = do_shmdt(ptr);
2954 break;
2956 case IPCOP_shmget:
2957 /* IPC_* flag values are the same on all linux platforms */
2958 ret = get_errno(shmget(first, second, third));
2959 break;
2961 /* IPC_* and SHM_* command values are the same on all linux platforms */
2962 case IPCOP_shmctl:
2963 ret = do_shmctl(first, second, third);
2964 break;
2965 default:
2966 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2967 ret = -TARGET_ENOSYS;
2968 break;
2970 return ret;
2972 #endif
2974 /* kernel structure types definitions */
2976 #define STRUCT(name, ...) STRUCT_ ## name,
2977 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2978 enum {
2979 #include "syscall_types.h"
2981 #undef STRUCT
2982 #undef STRUCT_SPECIAL
2984 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2985 #define STRUCT_SPECIAL(name)
2986 #include "syscall_types.h"
2987 #undef STRUCT
2988 #undef STRUCT_SPECIAL
2990 typedef struct IOCTLEntry IOCTLEntry;
2992 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
2993 int fd, abi_long cmd, abi_long arg);
2995 struct IOCTLEntry {
2996 unsigned int target_cmd;
2997 unsigned int host_cmd;
2998 const char *name;
2999 int access;
3000 do_ioctl_fn *do_ioctl;
3001 const argtype arg_type[5];
3004 #define IOC_R 0x0001
3005 #define IOC_W 0x0002
3006 #define IOC_RW (IOC_R | IOC_W)
3008 #define MAX_STRUCT_SIZE 4096
3010 #ifdef CONFIG_FIEMAP
3011 /* So fiemap access checks don't overflow on 32 bit systems.
3012 * This is very slightly smaller than the limit imposed by
3013 * the underlying kernel.
3015 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3016 / sizeof(struct fiemap_extent))
3018 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3019 int fd, abi_long cmd, abi_long arg)
3021 /* The parameter for this ioctl is a struct fiemap followed
3022 * by an array of struct fiemap_extent whose size is set
3023 * in fiemap->fm_extent_count. The array is filled in by the
3024 * ioctl.
3026 int target_size_in, target_size_out;
3027 struct fiemap *fm;
3028 const argtype *arg_type = ie->arg_type;
3029 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3030 void *argptr, *p;
3031 abi_long ret;
3032 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3033 uint32_t outbufsz;
3034 int free_fm = 0;
3036 assert(arg_type[0] == TYPE_PTR);
3037 assert(ie->access == IOC_RW);
3038 arg_type++;
3039 target_size_in = thunk_type_size(arg_type, 0);
3040 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3041 if (!argptr) {
3042 return -TARGET_EFAULT;
3044 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3045 unlock_user(argptr, arg, 0);
3046 fm = (struct fiemap *)buf_temp;
3047 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3048 return -TARGET_EINVAL;
3051 outbufsz = sizeof (*fm) +
3052 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3054 if (outbufsz > MAX_STRUCT_SIZE) {
3055 /* We can't fit all the extents into the fixed size buffer.
3056 * Allocate one that is large enough and use it instead.
3058 fm = malloc(outbufsz);
3059 if (!fm) {
3060 return -TARGET_ENOMEM;
3062 memcpy(fm, buf_temp, sizeof(struct fiemap));
3063 free_fm = 1;
3065 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3066 if (!is_error(ret)) {
3067 target_size_out = target_size_in;
3068 /* An extent_count of 0 means we were only counting the extents
3069 * so there are no structs to copy
3071 if (fm->fm_extent_count != 0) {
3072 target_size_out += fm->fm_mapped_extents * extent_size;
3074 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3075 if (!argptr) {
3076 ret = -TARGET_EFAULT;
3077 } else {
3078 /* Convert the struct fiemap */
3079 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3080 if (fm->fm_extent_count != 0) {
3081 p = argptr + target_size_in;
3082 /* ...and then all the struct fiemap_extents */
3083 for (i = 0; i < fm->fm_mapped_extents; i++) {
3084 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3085 THUNK_TARGET);
3086 p += extent_size;
3089 unlock_user(argptr, arg, target_size_out);
3092 if (free_fm) {
3093 free(fm);
3095 return ret;
3097 #endif
3099 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3100 int fd, abi_long cmd, abi_long arg)
3102 const argtype *arg_type = ie->arg_type;
3103 int target_size;
3104 void *argptr;
3105 int ret;
3106 struct ifconf *host_ifconf;
3107 uint32_t outbufsz;
3108 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3109 int target_ifreq_size;
3110 int nb_ifreq;
3111 int free_buf = 0;
3112 int i;
3113 int target_ifc_len;
3114 abi_long target_ifc_buf;
3115 int host_ifc_len;
3116 char *host_ifc_buf;
3118 assert(arg_type[0] == TYPE_PTR);
3119 assert(ie->access == IOC_RW);
3121 arg_type++;
3122 target_size = thunk_type_size(arg_type, 0);
3124 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3125 if (!argptr)
3126 return -TARGET_EFAULT;
3127 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3128 unlock_user(argptr, arg, 0);
3130 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3131 target_ifc_len = host_ifconf->ifc_len;
3132 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3134 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3135 nb_ifreq = target_ifc_len / target_ifreq_size;
3136 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3138 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3139 if (outbufsz > MAX_STRUCT_SIZE) {
3140 /* We can't fit all the extents into the fixed size buffer.
3141 * Allocate one that is large enough and use it instead.
3143 host_ifconf = malloc(outbufsz);
3144 if (!host_ifconf) {
3145 return -TARGET_ENOMEM;
3147 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3148 free_buf = 1;
3150 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3152 host_ifconf->ifc_len = host_ifc_len;
3153 host_ifconf->ifc_buf = host_ifc_buf;
3155 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3156 if (!is_error(ret)) {
3157 /* convert host ifc_len to target ifc_len */
3159 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3160 target_ifc_len = nb_ifreq * target_ifreq_size;
3161 host_ifconf->ifc_len = target_ifc_len;
3163 /* restore target ifc_buf */
3165 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3167 /* copy struct ifconf to target user */
3169 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3170 if (!argptr)
3171 return -TARGET_EFAULT;
3172 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3173 unlock_user(argptr, arg, target_size);
3175 /* copy ifreq[] to target user */
3177 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3178 for (i = 0; i < nb_ifreq ; i++) {
3179 thunk_convert(argptr + i * target_ifreq_size,
3180 host_ifc_buf + i * sizeof(struct ifreq),
3181 ifreq_arg_type, THUNK_TARGET);
3183 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3186 if (free_buf) {
3187 free(host_ifconf);
3190 return ret;
3193 static IOCTLEntry ioctl_entries[] = {
3194 #define IOCTL(cmd, access, ...) \
3195 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3196 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3197 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3198 #include "ioctls.h"
3199 { 0, 0, },
3202 /* ??? Implement proper locking for ioctls. */
3203 /* do_ioctl() Must return target values and target errnos. */
3204 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3206 const IOCTLEntry *ie;
3207 const argtype *arg_type;
3208 abi_long ret;
3209 uint8_t buf_temp[MAX_STRUCT_SIZE];
3210 int target_size;
3211 void *argptr;
3213 ie = ioctl_entries;
3214 for(;;) {
3215 if (ie->target_cmd == 0) {
3216 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3217 return -TARGET_ENOSYS;
3219 if (ie->target_cmd == cmd)
3220 break;
3221 ie++;
3223 arg_type = ie->arg_type;
3224 #if defined(DEBUG)
3225 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3226 #endif
3227 if (ie->do_ioctl) {
3228 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3231 switch(arg_type[0]) {
3232 case TYPE_NULL:
3233 /* no argument */
3234 ret = get_errno(ioctl(fd, ie->host_cmd));
3235 break;
3236 case TYPE_PTRVOID:
3237 case TYPE_INT:
3238 /* int argment */
3239 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3240 break;
3241 case TYPE_PTR:
3242 arg_type++;
3243 target_size = thunk_type_size(arg_type, 0);
3244 switch(ie->access) {
3245 case IOC_R:
3246 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3247 if (!is_error(ret)) {
3248 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3249 if (!argptr)
3250 return -TARGET_EFAULT;
3251 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3252 unlock_user(argptr, arg, target_size);
3254 break;
3255 case IOC_W:
3256 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3257 if (!argptr)
3258 return -TARGET_EFAULT;
3259 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3260 unlock_user(argptr, arg, 0);
3261 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3262 break;
3263 default:
3264 case IOC_RW:
3265 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3266 if (!argptr)
3267 return -TARGET_EFAULT;
3268 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3269 unlock_user(argptr, arg, 0);
3270 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3271 if (!is_error(ret)) {
3272 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3273 if (!argptr)
3274 return -TARGET_EFAULT;
3275 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3276 unlock_user(argptr, arg, target_size);
3278 break;
3280 break;
3281 default:
3282 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3283 (long)cmd, arg_type[0]);
3284 ret = -TARGET_ENOSYS;
3285 break;
3287 return ret;
3290 static const bitmask_transtbl iflag_tbl[] = {
3291 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3292 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3293 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3294 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3295 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3296 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3297 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3298 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3299 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3300 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3301 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3302 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3303 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3304 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3305 { 0, 0, 0, 0 }
3308 static const bitmask_transtbl oflag_tbl[] = {
3309 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3310 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3311 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3312 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3313 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3314 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3315 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3316 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3317 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3318 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3319 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3320 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3321 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3322 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3323 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3324 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3325 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3326 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3327 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3328 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3329 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3330 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3331 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3332 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3333 { 0, 0, 0, 0 }
3336 static const bitmask_transtbl cflag_tbl[] = {
3337 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3338 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3339 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3340 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3341 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3342 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3343 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3344 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3345 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3346 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3347 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3348 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3349 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3350 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3351 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3352 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3353 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3354 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3355 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3356 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3357 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3358 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3359 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3360 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3361 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3362 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3363 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3364 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3365 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3366 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3367 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3368 { 0, 0, 0, 0 }
3371 static const bitmask_transtbl lflag_tbl[] = {
3372 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3373 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3374 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3375 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3376 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3377 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3378 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3379 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3380 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3381 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3382 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3383 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3384 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3385 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3386 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3387 { 0, 0, 0, 0 }
3390 static void target_to_host_termios (void *dst, const void *src)
3392 struct host_termios *host = dst;
3393 const struct target_termios *target = src;
3395 host->c_iflag =
3396 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3397 host->c_oflag =
3398 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3399 host->c_cflag =
3400 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3401 host->c_lflag =
3402 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3403 host->c_line = target->c_line;
3405 memset(host->c_cc, 0, sizeof(host->c_cc));
3406 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3407 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3408 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3409 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3410 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3411 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3412 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3413 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3414 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3415 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3416 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3417 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3418 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3419 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3420 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3421 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3422 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3425 static void host_to_target_termios (void *dst, const void *src)
3427 struct target_termios *target = dst;
3428 const struct host_termios *host = src;
3430 target->c_iflag =
3431 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3432 target->c_oflag =
3433 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3434 target->c_cflag =
3435 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3436 target->c_lflag =
3437 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3438 target->c_line = host->c_line;
3440 memset(target->c_cc, 0, sizeof(target->c_cc));
3441 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3442 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3443 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3444 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3445 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3446 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3447 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3448 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3449 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3450 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3451 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3452 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3453 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3454 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3455 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3456 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3457 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3460 static const StructEntry struct_termios_def = {
3461 .convert = { host_to_target_termios, target_to_host_termios },
3462 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3463 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3466 static bitmask_transtbl mmap_flags_tbl[] = {
3467 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3468 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3469 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3470 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3471 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3472 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3473 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3474 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3475 { 0, 0, 0, 0 }
3478 #if defined(TARGET_I386)
3480 /* NOTE: there is really one LDT for all the threads */
3481 static uint8_t *ldt_table;
3483 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3485 int size;
3486 void *p;
3488 if (!ldt_table)
3489 return 0;
3490 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3491 if (size > bytecount)
3492 size = bytecount;
3493 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3494 if (!p)
3495 return -TARGET_EFAULT;
3496 /* ??? Should this by byteswapped? */
3497 memcpy(p, ldt_table, size);
3498 unlock_user(p, ptr, size);
3499 return size;
3502 /* XXX: add locking support */
3503 static abi_long write_ldt(CPUX86State *env,
3504 abi_ulong ptr, unsigned long bytecount, int oldmode)
3506 struct target_modify_ldt_ldt_s ldt_info;
3507 struct target_modify_ldt_ldt_s *target_ldt_info;
3508 int seg_32bit, contents, read_exec_only, limit_in_pages;
3509 int seg_not_present, useable, lm;
3510 uint32_t *lp, entry_1, entry_2;
3512 if (bytecount != sizeof(ldt_info))
3513 return -TARGET_EINVAL;
3514 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3515 return -TARGET_EFAULT;
3516 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3517 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3518 ldt_info.limit = tswap32(target_ldt_info->limit);
3519 ldt_info.flags = tswap32(target_ldt_info->flags);
3520 unlock_user_struct(target_ldt_info, ptr, 0);
3522 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3523 return -TARGET_EINVAL;
3524 seg_32bit = ldt_info.flags & 1;
3525 contents = (ldt_info.flags >> 1) & 3;
3526 read_exec_only = (ldt_info.flags >> 3) & 1;
3527 limit_in_pages = (ldt_info.flags >> 4) & 1;
3528 seg_not_present = (ldt_info.flags >> 5) & 1;
3529 useable = (ldt_info.flags >> 6) & 1;
3530 #ifdef TARGET_ABI32
3531 lm = 0;
3532 #else
3533 lm = (ldt_info.flags >> 7) & 1;
3534 #endif
3535 if (contents == 3) {
3536 if (oldmode)
3537 return -TARGET_EINVAL;
3538 if (seg_not_present == 0)
3539 return -TARGET_EINVAL;
3541 /* allocate the LDT */
3542 if (!ldt_table) {
3543 env->ldt.base = target_mmap(0,
3544 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3545 PROT_READ|PROT_WRITE,
3546 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3547 if (env->ldt.base == -1)
3548 return -TARGET_ENOMEM;
3549 memset(g2h(env->ldt.base), 0,
3550 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3551 env->ldt.limit = 0xffff;
3552 ldt_table = g2h(env->ldt.base);
3555 /* NOTE: same code as Linux kernel */
3556 /* Allow LDTs to be cleared by the user. */
3557 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3558 if (oldmode ||
3559 (contents == 0 &&
3560 read_exec_only == 1 &&
3561 seg_32bit == 0 &&
3562 limit_in_pages == 0 &&
3563 seg_not_present == 1 &&
3564 useable == 0 )) {
3565 entry_1 = 0;
3566 entry_2 = 0;
3567 goto install;
3571 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3572 (ldt_info.limit & 0x0ffff);
3573 entry_2 = (ldt_info.base_addr & 0xff000000) |
3574 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3575 (ldt_info.limit & 0xf0000) |
3576 ((read_exec_only ^ 1) << 9) |
3577 (contents << 10) |
3578 ((seg_not_present ^ 1) << 15) |
3579 (seg_32bit << 22) |
3580 (limit_in_pages << 23) |
3581 (lm << 21) |
3582 0x7000;
3583 if (!oldmode)
3584 entry_2 |= (useable << 20);
3586 /* Install the new entry ... */
3587 install:
3588 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3589 lp[0] = tswap32(entry_1);
3590 lp[1] = tswap32(entry_2);
3591 return 0;
3594 /* specific and weird i386 syscalls */
3595 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3596 unsigned long bytecount)
3598 abi_long ret;
3600 switch (func) {
3601 case 0:
3602 ret = read_ldt(ptr, bytecount);
3603 break;
3604 case 1:
3605 ret = write_ldt(env, ptr, bytecount, 1);
3606 break;
3607 case 0x11:
3608 ret = write_ldt(env, ptr, bytecount, 0);
3609 break;
3610 default:
3611 ret = -TARGET_ENOSYS;
3612 break;
3614 return ret;
3617 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3618 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3620 uint64_t *gdt_table = g2h(env->gdt.base);
3621 struct target_modify_ldt_ldt_s ldt_info;
3622 struct target_modify_ldt_ldt_s *target_ldt_info;
3623 int seg_32bit, contents, read_exec_only, limit_in_pages;
3624 int seg_not_present, useable, lm;
3625 uint32_t *lp, entry_1, entry_2;
3626 int i;
3628 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3629 if (!target_ldt_info)
3630 return -TARGET_EFAULT;
3631 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3632 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3633 ldt_info.limit = tswap32(target_ldt_info->limit);
3634 ldt_info.flags = tswap32(target_ldt_info->flags);
3635 if (ldt_info.entry_number == -1) {
3636 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3637 if (gdt_table[i] == 0) {
3638 ldt_info.entry_number = i;
3639 target_ldt_info->entry_number = tswap32(i);
3640 break;
3644 unlock_user_struct(target_ldt_info, ptr, 1);
3646 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3647 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3648 return -TARGET_EINVAL;
3649 seg_32bit = ldt_info.flags & 1;
3650 contents = (ldt_info.flags >> 1) & 3;
3651 read_exec_only = (ldt_info.flags >> 3) & 1;
3652 limit_in_pages = (ldt_info.flags >> 4) & 1;
3653 seg_not_present = (ldt_info.flags >> 5) & 1;
3654 useable = (ldt_info.flags >> 6) & 1;
3655 #ifdef TARGET_ABI32
3656 lm = 0;
3657 #else
3658 lm = (ldt_info.flags >> 7) & 1;
3659 #endif
3661 if (contents == 3) {
3662 if (seg_not_present == 0)
3663 return -TARGET_EINVAL;
3666 /* NOTE: same code as Linux kernel */
3667 /* Allow LDTs to be cleared by the user. */
3668 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3669 if ((contents == 0 &&
3670 read_exec_only == 1 &&
3671 seg_32bit == 0 &&
3672 limit_in_pages == 0 &&
3673 seg_not_present == 1 &&
3674 useable == 0 )) {
3675 entry_1 = 0;
3676 entry_2 = 0;
3677 goto install;
3681 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3682 (ldt_info.limit & 0x0ffff);
3683 entry_2 = (ldt_info.base_addr & 0xff000000) |
3684 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3685 (ldt_info.limit & 0xf0000) |
3686 ((read_exec_only ^ 1) << 9) |
3687 (contents << 10) |
3688 ((seg_not_present ^ 1) << 15) |
3689 (seg_32bit << 22) |
3690 (limit_in_pages << 23) |
3691 (useable << 20) |
3692 (lm << 21) |
3693 0x7000;
3695 /* Install the new entry ... */
3696 install:
3697 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3698 lp[0] = tswap32(entry_1);
3699 lp[1] = tswap32(entry_2);
3700 return 0;
3703 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3705 struct target_modify_ldt_ldt_s *target_ldt_info;
3706 uint64_t *gdt_table = g2h(env->gdt.base);
3707 uint32_t base_addr, limit, flags;
3708 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3709 int seg_not_present, useable, lm;
3710 uint32_t *lp, entry_1, entry_2;
3712 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3713 if (!target_ldt_info)
3714 return -TARGET_EFAULT;
3715 idx = tswap32(target_ldt_info->entry_number);
3716 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3717 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3718 unlock_user_struct(target_ldt_info, ptr, 1);
3719 return -TARGET_EINVAL;
3721 lp = (uint32_t *)(gdt_table + idx);
3722 entry_1 = tswap32(lp[0]);
3723 entry_2 = tswap32(lp[1]);
3725 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3726 contents = (entry_2 >> 10) & 3;
3727 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3728 seg_32bit = (entry_2 >> 22) & 1;
3729 limit_in_pages = (entry_2 >> 23) & 1;
3730 useable = (entry_2 >> 20) & 1;
3731 #ifdef TARGET_ABI32
3732 lm = 0;
3733 #else
3734 lm = (entry_2 >> 21) & 1;
3735 #endif
3736 flags = (seg_32bit << 0) | (contents << 1) |
3737 (read_exec_only << 3) | (limit_in_pages << 4) |
3738 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3739 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3740 base_addr = (entry_1 >> 16) |
3741 (entry_2 & 0xff000000) |
3742 ((entry_2 & 0xff) << 16);
3743 target_ldt_info->base_addr = tswapl(base_addr);
3744 target_ldt_info->limit = tswap32(limit);
3745 target_ldt_info->flags = tswap32(flags);
3746 unlock_user_struct(target_ldt_info, ptr, 1);
3747 return 0;
3749 #endif /* TARGET_I386 && TARGET_ABI32 */
3751 #ifndef TARGET_ABI32
3752 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3754 abi_long ret;
3755 abi_ulong val;
3756 int idx;
3758 switch(code) {
3759 case TARGET_ARCH_SET_GS:
3760 case TARGET_ARCH_SET_FS:
3761 if (code == TARGET_ARCH_SET_GS)
3762 idx = R_GS;
3763 else
3764 idx = R_FS;
3765 cpu_x86_load_seg(env, idx, 0);
3766 env->segs[idx].base = addr;
3767 break;
3768 case TARGET_ARCH_GET_GS:
3769 case TARGET_ARCH_GET_FS:
3770 if (code == TARGET_ARCH_GET_GS)
3771 idx = R_GS;
3772 else
3773 idx = R_FS;
3774 val = env->segs[idx].base;
3775 if (put_user(val, addr, abi_ulong))
3776 return -TARGET_EFAULT;
3777 break;
3778 default:
3779 ret = -TARGET_EINVAL;
3780 break;
3782 return 0;
3784 #endif
3786 #endif /* defined(TARGET_I386) */
3788 #define NEW_STACK_SIZE 0x40000
3790 #if defined(CONFIG_USE_NPTL)
3792 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3793 typedef struct {
3794 CPUState *env;
3795 pthread_mutex_t mutex;
3796 pthread_cond_t cond;
3797 pthread_t thread;
3798 uint32_t tid;
3799 abi_ulong child_tidptr;
3800 abi_ulong parent_tidptr;
3801 sigset_t sigmask;
3802 } new_thread_info;
3804 static void *clone_func(void *arg)
3806 new_thread_info *info = arg;
3807 CPUState *env;
3808 TaskState *ts;
3810 env = info->env;
3811 thread_env = env;
3812 ts = (TaskState *)thread_env->opaque;
3813 info->tid = gettid();
3814 env->host_tid = info->tid;
3815 task_settid(ts);
3816 if (info->child_tidptr)
3817 put_user_u32(info->tid, info->child_tidptr);
3818 if (info->parent_tidptr)
3819 put_user_u32(info->tid, info->parent_tidptr);
3820 /* Enable signals. */
3821 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3822 /* Signal to the parent that we're ready. */
3823 pthread_mutex_lock(&info->mutex);
3824 pthread_cond_broadcast(&info->cond);
3825 pthread_mutex_unlock(&info->mutex);
3826 /* Wait until the parent has finshed initializing the tls state. */
3827 pthread_mutex_lock(&clone_lock);
3828 pthread_mutex_unlock(&clone_lock);
3829 cpu_loop(env);
3830 /* never exits */
3831 return NULL;
3833 #else
3835 static int clone_func(void *arg)
3837 CPUState *env = arg;
3838 cpu_loop(env);
3839 /* never exits */
3840 return 0;
3842 #endif
3844 /* do_fork() Must return host values and target errnos (unlike most
3845 do_*() functions). */
3846 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3847 abi_ulong parent_tidptr, target_ulong newtls,
3848 abi_ulong child_tidptr)
3850 int ret;
3851 TaskState *ts;
3852 CPUState *new_env;
3853 #if defined(CONFIG_USE_NPTL)
3854 unsigned int nptl_flags;
3855 sigset_t sigmask;
3856 #else
3857 uint8_t *new_stack;
3858 #endif
3860 /* Emulate vfork() with fork() */
3861 if (flags & CLONE_VFORK)
3862 flags &= ~(CLONE_VFORK | CLONE_VM);
3864 if (flags & CLONE_VM) {
3865 TaskState *parent_ts = (TaskState *)env->opaque;
3866 #if defined(CONFIG_USE_NPTL)
3867 new_thread_info info;
3868 pthread_attr_t attr;
3869 #endif
3870 ts = qemu_mallocz(sizeof(TaskState));
3871 init_task_state(ts);
3872 /* we create a new CPU instance. */
3873 new_env = cpu_copy(env);
3874 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3875 cpu_reset(new_env);
3876 #endif
3877 /* Init regs that differ from the parent. */
3878 cpu_clone_regs(new_env, newsp);
3879 new_env->opaque = ts;
3880 ts->bprm = parent_ts->bprm;
3881 ts->info = parent_ts->info;
3882 #if defined(CONFIG_USE_NPTL)
3883 nptl_flags = flags;
3884 flags &= ~CLONE_NPTL_FLAGS2;
3886 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3887 ts->child_tidptr = child_tidptr;
3890 if (nptl_flags & CLONE_SETTLS)
3891 cpu_set_tls (new_env, newtls);
3893 /* Grab a mutex so that thread setup appears atomic. */
3894 pthread_mutex_lock(&clone_lock);
3896 memset(&info, 0, sizeof(info));
3897 pthread_mutex_init(&info.mutex, NULL);
3898 pthread_mutex_lock(&info.mutex);
3899 pthread_cond_init(&info.cond, NULL);
3900 info.env = new_env;
3901 if (nptl_flags & CLONE_CHILD_SETTID)
3902 info.child_tidptr = child_tidptr;
3903 if (nptl_flags & CLONE_PARENT_SETTID)
3904 info.parent_tidptr = parent_tidptr;
3906 ret = pthread_attr_init(&attr);
3907 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
3908 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3909 /* It is not safe to deliver signals until the child has finished
3910 initializing, so temporarily block all signals. */
3911 sigfillset(&sigmask);
3912 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3914 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3915 /* TODO: Free new CPU state if thread creation failed. */
3917 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3918 pthread_attr_destroy(&attr);
3919 if (ret == 0) {
3920 /* Wait for the child to initialize. */
3921 pthread_cond_wait(&info.cond, &info.mutex);
3922 ret = info.tid;
3923 if (flags & CLONE_PARENT_SETTID)
3924 put_user_u32(ret, parent_tidptr);
3925 } else {
3926 ret = -1;
3928 pthread_mutex_unlock(&info.mutex);
3929 pthread_cond_destroy(&info.cond);
3930 pthread_mutex_destroy(&info.mutex);
3931 pthread_mutex_unlock(&clone_lock);
3932 #else
3933 if (flags & CLONE_NPTL_FLAGS2)
3934 return -EINVAL;
3935 /* This is probably going to die very quickly, but do it anyway. */
3936 new_stack = qemu_mallocz (NEW_STACK_SIZE);
3937 #ifdef __ia64__
3938 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
3939 #else
3940 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3941 #endif
3942 #endif
3943 } else {
3944 /* if no CLONE_VM, we consider it is a fork */
3945 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3946 return -EINVAL;
3947 fork_start();
3948 ret = fork();
3949 if (ret == 0) {
3950 /* Child Process. */
3951 cpu_clone_regs(env, newsp);
3952 fork_end(1);
3953 #if defined(CONFIG_USE_NPTL)
3954 /* There is a race condition here. The parent process could
3955 theoretically read the TID in the child process before the child
3956 tid is set. This would require using either ptrace
3957 (not implemented) or having *_tidptr to point at a shared memory
3958 mapping. We can't repeat the spinlock hack used above because
3959 the child process gets its own copy of the lock. */
3960 if (flags & CLONE_CHILD_SETTID)
3961 put_user_u32(gettid(), child_tidptr);
3962 if (flags & CLONE_PARENT_SETTID)
3963 put_user_u32(gettid(), parent_tidptr);
3964 ts = (TaskState *)env->opaque;
3965 if (flags & CLONE_SETTLS)
3966 cpu_set_tls (env, newtls);
3967 if (flags & CLONE_CHILD_CLEARTID)
3968 ts->child_tidptr = child_tidptr;
3969 #endif
3970 } else {
3971 fork_end(0);
3974 return ret;
3977 /* warning : doesn't handle linux specific flags... */
3978 static int target_to_host_fcntl_cmd(int cmd)
3980 switch(cmd) {
3981 case TARGET_F_DUPFD:
3982 case TARGET_F_GETFD:
3983 case TARGET_F_SETFD:
3984 case TARGET_F_GETFL:
3985 case TARGET_F_SETFL:
3986 return cmd;
3987 case TARGET_F_GETLK:
3988 return F_GETLK;
3989 case TARGET_F_SETLK:
3990 return F_SETLK;
3991 case TARGET_F_SETLKW:
3992 return F_SETLKW;
3993 case TARGET_F_GETOWN:
3994 return F_GETOWN;
3995 case TARGET_F_SETOWN:
3996 return F_SETOWN;
3997 case TARGET_F_GETSIG:
3998 return F_GETSIG;
3999 case TARGET_F_SETSIG:
4000 return F_SETSIG;
4001 #if TARGET_ABI_BITS == 32
4002 case TARGET_F_GETLK64:
4003 return F_GETLK64;
4004 case TARGET_F_SETLK64:
4005 return F_SETLK64;
4006 case TARGET_F_SETLKW64:
4007 return F_SETLKW64;
4008 #endif
4009 case TARGET_F_SETLEASE:
4010 return F_SETLEASE;
4011 case TARGET_F_GETLEASE:
4012 return F_GETLEASE;
4013 #ifdef F_DUPFD_CLOEXEC
4014 case TARGET_F_DUPFD_CLOEXEC:
4015 return F_DUPFD_CLOEXEC;
4016 #endif
4017 case TARGET_F_NOTIFY:
4018 return F_NOTIFY;
4019 default:
4020 return -TARGET_EINVAL;
4022 return -TARGET_EINVAL;
4025 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4027 struct flock fl;
4028 struct target_flock *target_fl;
4029 struct flock64 fl64;
4030 struct target_flock64 *target_fl64;
4031 abi_long ret;
4032 int host_cmd = target_to_host_fcntl_cmd(cmd);
4034 if (host_cmd == -TARGET_EINVAL)
4035 return host_cmd;
4037 switch(cmd) {
4038 case TARGET_F_GETLK:
4039 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4040 return -TARGET_EFAULT;
4041 fl.l_type = tswap16(target_fl->l_type);
4042 fl.l_whence = tswap16(target_fl->l_whence);
4043 fl.l_start = tswapl(target_fl->l_start);
4044 fl.l_len = tswapl(target_fl->l_len);
4045 fl.l_pid = tswap32(target_fl->l_pid);
4046 unlock_user_struct(target_fl, arg, 0);
4047 ret = get_errno(fcntl(fd, host_cmd, &fl));
4048 if (ret == 0) {
4049 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4050 return -TARGET_EFAULT;
4051 target_fl->l_type = tswap16(fl.l_type);
4052 target_fl->l_whence = tswap16(fl.l_whence);
4053 target_fl->l_start = tswapl(fl.l_start);
4054 target_fl->l_len = tswapl(fl.l_len);
4055 target_fl->l_pid = tswap32(fl.l_pid);
4056 unlock_user_struct(target_fl, arg, 1);
4058 break;
4060 case TARGET_F_SETLK:
4061 case TARGET_F_SETLKW:
4062 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4063 return -TARGET_EFAULT;
4064 fl.l_type = tswap16(target_fl->l_type);
4065 fl.l_whence = tswap16(target_fl->l_whence);
4066 fl.l_start = tswapl(target_fl->l_start);
4067 fl.l_len = tswapl(target_fl->l_len);
4068 fl.l_pid = tswap32(target_fl->l_pid);
4069 unlock_user_struct(target_fl, arg, 0);
4070 ret = get_errno(fcntl(fd, host_cmd, &fl));
4071 break;
4073 case TARGET_F_GETLK64:
4074 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4075 return -TARGET_EFAULT;
4076 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4077 fl64.l_whence = tswap16(target_fl64->l_whence);
4078 fl64.l_start = tswapl(target_fl64->l_start);
4079 fl64.l_len = tswapl(target_fl64->l_len);
4080 fl64.l_pid = tswap32(target_fl64->l_pid);
4081 unlock_user_struct(target_fl64, arg, 0);
4082 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4083 if (ret == 0) {
4084 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4085 return -TARGET_EFAULT;
4086 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4087 target_fl64->l_whence = tswap16(fl64.l_whence);
4088 target_fl64->l_start = tswapl(fl64.l_start);
4089 target_fl64->l_len = tswapl(fl64.l_len);
4090 target_fl64->l_pid = tswap32(fl64.l_pid);
4091 unlock_user_struct(target_fl64, arg, 1);
4093 break;
4094 case TARGET_F_SETLK64:
4095 case TARGET_F_SETLKW64:
4096 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4097 return -TARGET_EFAULT;
4098 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4099 fl64.l_whence = tswap16(target_fl64->l_whence);
4100 fl64.l_start = tswapl(target_fl64->l_start);
4101 fl64.l_len = tswapl(target_fl64->l_len);
4102 fl64.l_pid = tswap32(target_fl64->l_pid);
4103 unlock_user_struct(target_fl64, arg, 0);
4104 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4105 break;
4107 case TARGET_F_GETFL:
4108 ret = get_errno(fcntl(fd, host_cmd, arg));
4109 if (ret >= 0) {
4110 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4112 break;
4114 case TARGET_F_SETFL:
4115 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4116 break;
4118 case TARGET_F_SETOWN:
4119 case TARGET_F_GETOWN:
4120 case TARGET_F_SETSIG:
4121 case TARGET_F_GETSIG:
4122 case TARGET_F_SETLEASE:
4123 case TARGET_F_GETLEASE:
4124 ret = get_errno(fcntl(fd, host_cmd, arg));
4125 break;
4127 default:
4128 ret = get_errno(fcntl(fd, cmd, arg));
4129 break;
4131 return ret;
4134 #ifdef USE_UID16
4136 static inline int high2lowuid(int uid)
4138 if (uid > 65535)
4139 return 65534;
4140 else
4141 return uid;
4144 static inline int high2lowgid(int gid)
4146 if (gid > 65535)
4147 return 65534;
4148 else
4149 return gid;
4152 static inline int low2highuid(int uid)
4154 if ((int16_t)uid == -1)
4155 return -1;
4156 else
4157 return uid;
4160 static inline int low2highgid(int gid)
4162 if ((int16_t)gid == -1)
4163 return -1;
4164 else
4165 return gid;
4167 static inline int tswapid(int id)
4169 return tswap16(id);
4171 #else /* !USE_UID16 */
4172 static inline int high2lowuid(int uid)
4174 return uid;
4176 static inline int high2lowgid(int gid)
4178 return gid;
4180 static inline int low2highuid(int uid)
4182 return uid;
4184 static inline int low2highgid(int gid)
4186 return gid;
4188 static inline int tswapid(int id)
4190 return tswap32(id);
4192 #endif /* USE_UID16 */
4194 void syscall_init(void)
4196 IOCTLEntry *ie;
4197 const argtype *arg_type;
4198 int size;
4199 int i;
4201 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4202 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4203 #include "syscall_types.h"
4204 #undef STRUCT
4205 #undef STRUCT_SPECIAL
4207 /* we patch the ioctl size if necessary. We rely on the fact that
4208 no ioctl has all the bits at '1' in the size field */
4209 ie = ioctl_entries;
4210 while (ie->target_cmd != 0) {
4211 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4212 TARGET_IOC_SIZEMASK) {
4213 arg_type = ie->arg_type;
4214 if (arg_type[0] != TYPE_PTR) {
4215 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4216 ie->target_cmd);
4217 exit(1);
4219 arg_type++;
4220 size = thunk_type_size(arg_type, 0);
4221 ie->target_cmd = (ie->target_cmd &
4222 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4223 (size << TARGET_IOC_SIZESHIFT);
4226 /* Build target_to_host_errno_table[] table from
4227 * host_to_target_errno_table[]. */
4228 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4229 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4231 /* automatic consistency check if same arch */
4232 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4233 (defined(__x86_64__) && defined(TARGET_X86_64))
4234 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4235 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4236 ie->name, ie->target_cmd, ie->host_cmd);
4238 #endif
4239 ie++;
4243 #if TARGET_ABI_BITS == 32
4244 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4246 #ifdef TARGET_WORDS_BIGENDIAN
4247 return ((uint64_t)word0 << 32) | word1;
4248 #else
4249 return ((uint64_t)word1 << 32) | word0;
4250 #endif
4252 #else /* TARGET_ABI_BITS == 32 */
4253 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4255 return word0;
4257 #endif /* TARGET_ABI_BITS != 32 */
4259 #ifdef TARGET_NR_truncate64
4260 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4261 abi_long arg2,
4262 abi_long arg3,
4263 abi_long arg4)
4265 #ifdef TARGET_ARM
4266 if (((CPUARMState *)cpu_env)->eabi)
4268 arg2 = arg3;
4269 arg3 = arg4;
4271 #endif
4272 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4274 #endif
4276 #ifdef TARGET_NR_ftruncate64
4277 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4278 abi_long arg2,
4279 abi_long arg3,
4280 abi_long arg4)
4282 #ifdef TARGET_ARM
4283 if (((CPUARMState *)cpu_env)->eabi)
4285 arg2 = arg3;
4286 arg3 = arg4;
4288 #endif
4289 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4291 #endif
4293 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4294 abi_ulong target_addr)
4296 struct target_timespec *target_ts;
4298 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4299 return -TARGET_EFAULT;
4300 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4301 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4302 unlock_user_struct(target_ts, target_addr, 0);
4303 return 0;
4306 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4307 struct timespec *host_ts)
4309 struct target_timespec *target_ts;
4311 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4312 return -TARGET_EFAULT;
4313 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4314 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4315 unlock_user_struct(target_ts, target_addr, 1);
4316 return 0;
4319 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4320 static inline abi_long host_to_target_stat64(void *cpu_env,
4321 abi_ulong target_addr,
4322 struct stat *host_st)
4324 #ifdef TARGET_ARM
4325 if (((CPUARMState *)cpu_env)->eabi) {
4326 struct target_eabi_stat64 *target_st;
4328 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4329 return -TARGET_EFAULT;
4330 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4331 __put_user(host_st->st_dev, &target_st->st_dev);
4332 __put_user(host_st->st_ino, &target_st->st_ino);
4333 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4334 __put_user(host_st->st_ino, &target_st->__st_ino);
4335 #endif
4336 __put_user(host_st->st_mode, &target_st->st_mode);
4337 __put_user(host_st->st_nlink, &target_st->st_nlink);
4338 __put_user(host_st->st_uid, &target_st->st_uid);
4339 __put_user(host_st->st_gid, &target_st->st_gid);
4340 __put_user(host_st->st_rdev, &target_st->st_rdev);
4341 __put_user(host_st->st_size, &target_st->st_size);
4342 __put_user(host_st->st_blksize, &target_st->st_blksize);
4343 __put_user(host_st->st_blocks, &target_st->st_blocks);
4344 __put_user(host_st->st_atime, &target_st->target_st_atime);
4345 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4346 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4347 unlock_user_struct(target_st, target_addr, 1);
4348 } else
4349 #endif
4351 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4352 struct target_stat *target_st;
4353 #else
4354 struct target_stat64 *target_st;
4355 #endif
4357 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4358 return -TARGET_EFAULT;
4359 memset(target_st, 0, sizeof(*target_st));
4360 __put_user(host_st->st_dev, &target_st->st_dev);
4361 __put_user(host_st->st_ino, &target_st->st_ino);
4362 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4363 __put_user(host_st->st_ino, &target_st->__st_ino);
4364 #endif
4365 __put_user(host_st->st_mode, &target_st->st_mode);
4366 __put_user(host_st->st_nlink, &target_st->st_nlink);
4367 __put_user(host_st->st_uid, &target_st->st_uid);
4368 __put_user(host_st->st_gid, &target_st->st_gid);
4369 __put_user(host_st->st_rdev, &target_st->st_rdev);
4370 /* XXX: better use of kernel struct */
4371 __put_user(host_st->st_size, &target_st->st_size);
4372 __put_user(host_st->st_blksize, &target_st->st_blksize);
4373 __put_user(host_st->st_blocks, &target_st->st_blocks);
4374 __put_user(host_st->st_atime, &target_st->target_st_atime);
4375 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4376 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4377 unlock_user_struct(target_st, target_addr, 1);
4380 return 0;
4382 #endif
4384 #if defined(CONFIG_USE_NPTL)
4385 /* ??? Using host futex calls even when target atomic operations
4386 are not really atomic probably breaks things. However implementing
4387 futexes locally would make futexes shared between multiple processes
4388 tricky. However they're probably useless because guest atomic
4389 operations won't work either. */
4390 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4391 target_ulong uaddr2, int val3)
4393 struct timespec ts, *pts;
4394 int base_op;
4396 /* ??? We assume FUTEX_* constants are the same on both host
4397 and target. */
4398 #ifdef FUTEX_CMD_MASK
4399 base_op = op & FUTEX_CMD_MASK;
4400 #else
4401 base_op = op;
4402 #endif
4403 switch (base_op) {
4404 case FUTEX_WAIT:
4405 if (timeout) {
4406 pts = &ts;
4407 target_to_host_timespec(pts, timeout);
4408 } else {
4409 pts = NULL;
4411 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4412 pts, NULL, 0));
4413 case FUTEX_WAKE:
4414 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4415 case FUTEX_FD:
4416 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4417 case FUTEX_REQUEUE:
4418 case FUTEX_CMP_REQUEUE:
4419 case FUTEX_WAKE_OP:
4420 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4421 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4422 But the prototype takes a `struct timespec *'; insert casts
4423 to satisfy the compiler. We do not need to tswap TIMEOUT
4424 since it's not compared to guest memory. */
4425 pts = (struct timespec *)(uintptr_t) timeout;
4426 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4427 g2h(uaddr2),
4428 (base_op == FUTEX_CMP_REQUEUE
4429 ? tswap32(val3)
4430 : val3)));
4431 default:
4432 return -TARGET_ENOSYS;
4435 #endif
4437 /* Map host to target signal numbers for the wait family of syscalls.
4438 Assume all other status bits are the same. */
4439 static int host_to_target_waitstatus(int status)
4441 if (WIFSIGNALED(status)) {
4442 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4444 if (WIFSTOPPED(status)) {
4445 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4446 | (status & 0xff);
4448 return status;
4451 int get_osversion(void)
4453 static int osversion;
4454 struct new_utsname buf;
4455 const char *s;
4456 int i, n, tmp;
4457 if (osversion)
4458 return osversion;
4459 if (qemu_uname_release && *qemu_uname_release) {
4460 s = qemu_uname_release;
4461 } else {
4462 if (sys_uname(&buf))
4463 return 0;
4464 s = buf.release;
4466 tmp = 0;
4467 for (i = 0; i < 3; i++) {
4468 n = 0;
4469 while (*s >= '0' && *s <= '9') {
4470 n *= 10;
4471 n += *s - '0';
4472 s++;
4474 tmp = (tmp << 8) + n;
4475 if (*s == '.')
4476 s++;
4478 osversion = tmp;
4479 return osversion;
4482 /* do_syscall() should always have a single exit point at the end so
4483 that actions, such as logging of syscall results, can be performed.
4484 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4485 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4486 abi_long arg2, abi_long arg3, abi_long arg4,
4487 abi_long arg5, abi_long arg6)
4489 abi_long ret;
4490 struct stat st;
4491 struct statfs stfs;
4492 void *p;
4494 #ifdef DEBUG
4495 gemu_log("syscall %d", num);
4496 #endif
4497 if(do_strace)
4498 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4500 switch(num) {
4501 case TARGET_NR_exit:
4502 #ifdef CONFIG_USE_NPTL
4503 /* In old applications this may be used to implement _exit(2).
4504 However in threaded applictions it is used for thread termination,
4505 and _exit_group is used for application termination.
4506 Do thread termination if we have more then one thread. */
4507 /* FIXME: This probably breaks if a signal arrives. We should probably
4508 be disabling signals. */
4509 if (first_cpu->next_cpu) {
4510 TaskState *ts;
4511 CPUState **lastp;
4512 CPUState *p;
4514 cpu_list_lock();
4515 lastp = &first_cpu;
4516 p = first_cpu;
4517 while (p && p != (CPUState *)cpu_env) {
4518 lastp = &p->next_cpu;
4519 p = p->next_cpu;
4521 /* If we didn't find the CPU for this thread then something is
4522 horribly wrong. */
4523 if (!p)
4524 abort();
4525 /* Remove the CPU from the list. */
4526 *lastp = p->next_cpu;
4527 cpu_list_unlock();
4528 ts = ((CPUState *)cpu_env)->opaque;
4529 if (ts->child_tidptr) {
4530 put_user_u32(0, ts->child_tidptr);
4531 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4532 NULL, NULL, 0);
4534 thread_env = NULL;
4535 qemu_free(cpu_env);
4536 qemu_free(ts);
4537 pthread_exit(NULL);
4539 #endif
4540 #ifdef TARGET_GPROF
4541 _mcleanup();
4542 #endif
4543 gdb_exit(cpu_env, arg1);
4544 _exit(arg1);
4545 ret = 0; /* avoid warning */
4546 break;
4547 case TARGET_NR_read:
4548 if (arg3 == 0)
4549 ret = 0;
4550 else {
4551 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4552 goto efault;
4553 ret = get_errno(read(arg1, p, arg3));
4554 unlock_user(p, arg2, ret);
4556 break;
4557 case TARGET_NR_write:
4558 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4559 goto efault;
4560 ret = get_errno(write(arg1, p, arg3));
4561 unlock_user(p, arg2, 0);
4562 break;
4563 case TARGET_NR_open:
4564 if (!(p = lock_user_string(arg1)))
4565 goto efault;
4566 ret = get_errno(open(path(p),
4567 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4568 arg3));
4569 unlock_user(p, arg1, 0);
4570 break;
4571 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4572 case TARGET_NR_openat:
4573 if (!(p = lock_user_string(arg2)))
4574 goto efault;
4575 ret = get_errno(sys_openat(arg1,
4576 path(p),
4577 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4578 arg4));
4579 unlock_user(p, arg2, 0);
4580 break;
4581 #endif
4582 case TARGET_NR_close:
4583 ret = get_errno(close(arg1));
4584 break;
4585 case TARGET_NR_brk:
4586 ret = do_brk(arg1);
4587 break;
4588 case TARGET_NR_fork:
4589 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4590 break;
4591 #ifdef TARGET_NR_waitpid
4592 case TARGET_NR_waitpid:
4594 int status;
4595 ret = get_errno(waitpid(arg1, &status, arg3));
4596 if (!is_error(ret) && arg2
4597 && put_user_s32(host_to_target_waitstatus(status), arg2))
4598 goto efault;
4600 break;
4601 #endif
4602 #ifdef TARGET_NR_waitid
4603 case TARGET_NR_waitid:
4605 siginfo_t info;
4606 info.si_pid = 0;
4607 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4608 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4609 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4610 goto efault;
4611 host_to_target_siginfo(p, &info);
4612 unlock_user(p, arg3, sizeof(target_siginfo_t));
4615 break;
4616 #endif
4617 #ifdef TARGET_NR_creat /* not on alpha */
4618 case TARGET_NR_creat:
4619 if (!(p = lock_user_string(arg1)))
4620 goto efault;
4621 ret = get_errno(creat(p, arg2));
4622 unlock_user(p, arg1, 0);
4623 break;
4624 #endif
4625 case TARGET_NR_link:
4627 void * p2;
4628 p = lock_user_string(arg1);
4629 p2 = lock_user_string(arg2);
4630 if (!p || !p2)
4631 ret = -TARGET_EFAULT;
4632 else
4633 ret = get_errno(link(p, p2));
4634 unlock_user(p2, arg2, 0);
4635 unlock_user(p, arg1, 0);
4637 break;
4638 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4639 case TARGET_NR_linkat:
4641 void * p2 = NULL;
4642 if (!arg2 || !arg4)
4643 goto efault;
4644 p = lock_user_string(arg2);
4645 p2 = lock_user_string(arg4);
4646 if (!p || !p2)
4647 ret = -TARGET_EFAULT;
4648 else
4649 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4650 unlock_user(p, arg2, 0);
4651 unlock_user(p2, arg4, 0);
4653 break;
4654 #endif
4655 case TARGET_NR_unlink:
4656 if (!(p = lock_user_string(arg1)))
4657 goto efault;
4658 ret = get_errno(unlink(p));
4659 unlock_user(p, arg1, 0);
4660 break;
4661 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4662 case TARGET_NR_unlinkat:
4663 if (!(p = lock_user_string(arg2)))
4664 goto efault;
4665 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4666 unlock_user(p, arg2, 0);
4667 break;
4668 #endif
4669 case TARGET_NR_execve:
4671 char **argp, **envp;
4672 int argc, envc;
4673 abi_ulong gp;
4674 abi_ulong guest_argp;
4675 abi_ulong guest_envp;
4676 abi_ulong addr;
4677 char **q;
4679 argc = 0;
4680 guest_argp = arg2;
4681 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4682 if (get_user_ual(addr, gp))
4683 goto efault;
4684 if (!addr)
4685 break;
4686 argc++;
4688 envc = 0;
4689 guest_envp = arg3;
4690 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4691 if (get_user_ual(addr, gp))
4692 goto efault;
4693 if (!addr)
4694 break;
4695 envc++;
4698 argp = alloca((argc + 1) * sizeof(void *));
4699 envp = alloca((envc + 1) * sizeof(void *));
4701 for (gp = guest_argp, q = argp; gp;
4702 gp += sizeof(abi_ulong), q++) {
4703 if (get_user_ual(addr, gp))
4704 goto execve_efault;
4705 if (!addr)
4706 break;
4707 if (!(*q = lock_user_string(addr)))
4708 goto execve_efault;
4710 *q = NULL;
4712 for (gp = guest_envp, q = envp; gp;
4713 gp += sizeof(abi_ulong), q++) {
4714 if (get_user_ual(addr, gp))
4715 goto execve_efault;
4716 if (!addr)
4717 break;
4718 if (!(*q = lock_user_string(addr)))
4719 goto execve_efault;
4721 *q = NULL;
4723 if (!(p = lock_user_string(arg1)))
4724 goto execve_efault;
4725 ret = get_errno(execve(p, argp, envp));
4726 unlock_user(p, arg1, 0);
4728 goto execve_end;
4730 execve_efault:
4731 ret = -TARGET_EFAULT;
4733 execve_end:
4734 for (gp = guest_argp, q = argp; *q;
4735 gp += sizeof(abi_ulong), q++) {
4736 if (get_user_ual(addr, gp)
4737 || !addr)
4738 break;
4739 unlock_user(*q, addr, 0);
4741 for (gp = guest_envp, q = envp; *q;
4742 gp += sizeof(abi_ulong), q++) {
4743 if (get_user_ual(addr, gp)
4744 || !addr)
4745 break;
4746 unlock_user(*q, addr, 0);
4749 break;
4750 case TARGET_NR_chdir:
4751 if (!(p = lock_user_string(arg1)))
4752 goto efault;
4753 ret = get_errno(chdir(p));
4754 unlock_user(p, arg1, 0);
4755 break;
4756 #ifdef TARGET_NR_time
4757 case TARGET_NR_time:
4759 time_t host_time;
4760 ret = get_errno(time(&host_time));
4761 if (!is_error(ret)
4762 && arg1
4763 && put_user_sal(host_time, arg1))
4764 goto efault;
4766 break;
4767 #endif
4768 case TARGET_NR_mknod:
4769 if (!(p = lock_user_string(arg1)))
4770 goto efault;
4771 ret = get_errno(mknod(p, arg2, arg3));
4772 unlock_user(p, arg1, 0);
4773 break;
4774 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4775 case TARGET_NR_mknodat:
4776 if (!(p = lock_user_string(arg2)))
4777 goto efault;
4778 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4779 unlock_user(p, arg2, 0);
4780 break;
4781 #endif
4782 case TARGET_NR_chmod:
4783 if (!(p = lock_user_string(arg1)))
4784 goto efault;
4785 ret = get_errno(chmod(p, arg2));
4786 unlock_user(p, arg1, 0);
4787 break;
4788 #ifdef TARGET_NR_break
4789 case TARGET_NR_break:
4790 goto unimplemented;
4791 #endif
4792 #ifdef TARGET_NR_oldstat
4793 case TARGET_NR_oldstat:
4794 goto unimplemented;
4795 #endif
4796 case TARGET_NR_lseek:
4797 ret = get_errno(lseek(arg1, arg2, arg3));
4798 break;
4799 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4800 /* Alpha specific */
4801 case TARGET_NR_getxpid:
4802 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4803 ret = get_errno(getpid());
4804 break;
4805 #endif
4806 #ifdef TARGET_NR_getpid
4807 case TARGET_NR_getpid:
4808 ret = get_errno(getpid());
4809 break;
4810 #endif
4811 case TARGET_NR_mount:
4813 /* need to look at the data field */
4814 void *p2, *p3;
4815 p = lock_user_string(arg1);
4816 p2 = lock_user_string(arg2);
4817 p3 = lock_user_string(arg3);
4818 if (!p || !p2 || !p3)
4819 ret = -TARGET_EFAULT;
4820 else {
4821 /* FIXME - arg5 should be locked, but it isn't clear how to
4822 * do that since it's not guaranteed to be a NULL-terminated
4823 * string.
4825 if ( ! arg5 )
4826 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4827 else
4828 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4830 unlock_user(p, arg1, 0);
4831 unlock_user(p2, arg2, 0);
4832 unlock_user(p3, arg3, 0);
4833 break;
4835 #ifdef TARGET_NR_umount
4836 case TARGET_NR_umount:
4837 if (!(p = lock_user_string(arg1)))
4838 goto efault;
4839 ret = get_errno(umount(p));
4840 unlock_user(p, arg1, 0);
4841 break;
4842 #endif
4843 #ifdef TARGET_NR_stime /* not on alpha */
4844 case TARGET_NR_stime:
4846 time_t host_time;
4847 if (get_user_sal(host_time, arg1))
4848 goto efault;
4849 ret = get_errno(stime(&host_time));
4851 break;
4852 #endif
4853 case TARGET_NR_ptrace:
4854 goto unimplemented;
4855 #ifdef TARGET_NR_alarm /* not on alpha */
4856 case TARGET_NR_alarm:
4857 ret = alarm(arg1);
4858 break;
4859 #endif
4860 #ifdef TARGET_NR_oldfstat
4861 case TARGET_NR_oldfstat:
4862 goto unimplemented;
4863 #endif
4864 #ifdef TARGET_NR_pause /* not on alpha */
4865 case TARGET_NR_pause:
4866 ret = get_errno(pause());
4867 break;
4868 #endif
4869 #ifdef TARGET_NR_utime
4870 case TARGET_NR_utime:
4872 struct utimbuf tbuf, *host_tbuf;
4873 struct target_utimbuf *target_tbuf;
4874 if (arg2) {
4875 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4876 goto efault;
4877 tbuf.actime = tswapl(target_tbuf->actime);
4878 tbuf.modtime = tswapl(target_tbuf->modtime);
4879 unlock_user_struct(target_tbuf, arg2, 0);
4880 host_tbuf = &tbuf;
4881 } else {
4882 host_tbuf = NULL;
4884 if (!(p = lock_user_string(arg1)))
4885 goto efault;
4886 ret = get_errno(utime(p, host_tbuf));
4887 unlock_user(p, arg1, 0);
4889 break;
4890 #endif
4891 case TARGET_NR_utimes:
4893 struct timeval *tvp, tv[2];
4894 if (arg2) {
4895 if (copy_from_user_timeval(&tv[0], arg2)
4896 || copy_from_user_timeval(&tv[1],
4897 arg2 + sizeof(struct target_timeval)))
4898 goto efault;
4899 tvp = tv;
4900 } else {
4901 tvp = NULL;
4903 if (!(p = lock_user_string(arg1)))
4904 goto efault;
4905 ret = get_errno(utimes(p, tvp));
4906 unlock_user(p, arg1, 0);
4908 break;
4909 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4910 case TARGET_NR_futimesat:
4912 struct timeval *tvp, tv[2];
4913 if (arg3) {
4914 if (copy_from_user_timeval(&tv[0], arg3)
4915 || copy_from_user_timeval(&tv[1],
4916 arg3 + sizeof(struct target_timeval)))
4917 goto efault;
4918 tvp = tv;
4919 } else {
4920 tvp = NULL;
4922 if (!(p = lock_user_string(arg2)))
4923 goto efault;
4924 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4925 unlock_user(p, arg2, 0);
4927 break;
4928 #endif
4929 #ifdef TARGET_NR_stty
4930 case TARGET_NR_stty:
4931 goto unimplemented;
4932 #endif
4933 #ifdef TARGET_NR_gtty
4934 case TARGET_NR_gtty:
4935 goto unimplemented;
4936 #endif
4937 case TARGET_NR_access:
4938 if (!(p = lock_user_string(arg1)))
4939 goto efault;
4940 ret = get_errno(access(path(p), arg2));
4941 unlock_user(p, arg1, 0);
4942 break;
4943 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4944 case TARGET_NR_faccessat:
4945 if (!(p = lock_user_string(arg2)))
4946 goto efault;
4947 ret = get_errno(sys_faccessat(arg1, p, arg3));
4948 unlock_user(p, arg2, 0);
4949 break;
4950 #endif
4951 #ifdef TARGET_NR_nice /* not on alpha */
4952 case TARGET_NR_nice:
4953 ret = get_errno(nice(arg1));
4954 break;
4955 #endif
4956 #ifdef TARGET_NR_ftime
4957 case TARGET_NR_ftime:
4958 goto unimplemented;
4959 #endif
4960 case TARGET_NR_sync:
4961 sync();
4962 ret = 0;
4963 break;
4964 case TARGET_NR_kill:
4965 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4966 break;
4967 case TARGET_NR_rename:
4969 void *p2;
4970 p = lock_user_string(arg1);
4971 p2 = lock_user_string(arg2);
4972 if (!p || !p2)
4973 ret = -TARGET_EFAULT;
4974 else
4975 ret = get_errno(rename(p, p2));
4976 unlock_user(p2, arg2, 0);
4977 unlock_user(p, arg1, 0);
4979 break;
4980 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4981 case TARGET_NR_renameat:
4983 void *p2;
4984 p = lock_user_string(arg2);
4985 p2 = lock_user_string(arg4);
4986 if (!p || !p2)
4987 ret = -TARGET_EFAULT;
4988 else
4989 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4990 unlock_user(p2, arg4, 0);
4991 unlock_user(p, arg2, 0);
4993 break;
4994 #endif
4995 case TARGET_NR_mkdir:
4996 if (!(p = lock_user_string(arg1)))
4997 goto efault;
4998 ret = get_errno(mkdir(p, arg2));
4999 unlock_user(p, arg1, 0);
5000 break;
5001 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5002 case TARGET_NR_mkdirat:
5003 if (!(p = lock_user_string(arg2)))
5004 goto efault;
5005 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5006 unlock_user(p, arg2, 0);
5007 break;
5008 #endif
5009 case TARGET_NR_rmdir:
5010 if (!(p = lock_user_string(arg1)))
5011 goto efault;
5012 ret = get_errno(rmdir(p));
5013 unlock_user(p, arg1, 0);
5014 break;
5015 case TARGET_NR_dup:
5016 ret = get_errno(dup(arg1));
5017 break;
5018 case TARGET_NR_pipe:
5019 ret = do_pipe(cpu_env, arg1, 0, 0);
5020 break;
5021 #ifdef TARGET_NR_pipe2
5022 case TARGET_NR_pipe2:
5023 ret = do_pipe(cpu_env, arg1, arg2, 1);
5024 break;
5025 #endif
5026 case TARGET_NR_times:
5028 struct target_tms *tmsp;
5029 struct tms tms;
5030 ret = get_errno(times(&tms));
5031 if (arg1) {
5032 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5033 if (!tmsp)
5034 goto efault;
5035 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5036 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5037 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5038 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5040 if (!is_error(ret))
5041 ret = host_to_target_clock_t(ret);
5043 break;
5044 #ifdef TARGET_NR_prof
5045 case TARGET_NR_prof:
5046 goto unimplemented;
5047 #endif
5048 #ifdef TARGET_NR_signal
5049 case TARGET_NR_signal:
5050 goto unimplemented;
5051 #endif
5052 case TARGET_NR_acct:
5053 if (arg1 == 0) {
5054 ret = get_errno(acct(NULL));
5055 } else {
5056 if (!(p = lock_user_string(arg1)))
5057 goto efault;
5058 ret = get_errno(acct(path(p)));
5059 unlock_user(p, arg1, 0);
5061 break;
5062 #ifdef TARGET_NR_umount2 /* not on alpha */
5063 case TARGET_NR_umount2:
5064 if (!(p = lock_user_string(arg1)))
5065 goto efault;
5066 ret = get_errno(umount2(p, arg2));
5067 unlock_user(p, arg1, 0);
5068 break;
5069 #endif
5070 #ifdef TARGET_NR_lock
5071 case TARGET_NR_lock:
5072 goto unimplemented;
5073 #endif
5074 case TARGET_NR_ioctl:
5075 ret = do_ioctl(arg1, arg2, arg3);
5076 break;
5077 case TARGET_NR_fcntl:
5078 ret = do_fcntl(arg1, arg2, arg3);
5079 break;
5080 #ifdef TARGET_NR_mpx
5081 case TARGET_NR_mpx:
5082 goto unimplemented;
5083 #endif
5084 case TARGET_NR_setpgid:
5085 ret = get_errno(setpgid(arg1, arg2));
5086 break;
5087 #ifdef TARGET_NR_ulimit
5088 case TARGET_NR_ulimit:
5089 goto unimplemented;
5090 #endif
5091 #ifdef TARGET_NR_oldolduname
5092 case TARGET_NR_oldolduname:
5093 goto unimplemented;
5094 #endif
5095 case TARGET_NR_umask:
5096 ret = get_errno(umask(arg1));
5097 break;
5098 case TARGET_NR_chroot:
5099 if (!(p = lock_user_string(arg1)))
5100 goto efault;
5101 ret = get_errno(chroot(p));
5102 unlock_user(p, arg1, 0);
5103 break;
5104 case TARGET_NR_ustat:
5105 goto unimplemented;
5106 case TARGET_NR_dup2:
5107 ret = get_errno(dup2(arg1, arg2));
5108 break;
5109 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5110 case TARGET_NR_dup3:
5111 ret = get_errno(dup3(arg1, arg2, arg3));
5112 break;
5113 #endif
5114 #ifdef TARGET_NR_getppid /* not on alpha */
5115 case TARGET_NR_getppid:
5116 ret = get_errno(getppid());
5117 break;
5118 #endif
5119 case TARGET_NR_getpgrp:
5120 ret = get_errno(getpgrp());
5121 break;
5122 case TARGET_NR_setsid:
5123 ret = get_errno(setsid());
5124 break;
5125 #ifdef TARGET_NR_sigaction
5126 case TARGET_NR_sigaction:
5128 #if defined(TARGET_ALPHA)
5129 struct target_sigaction act, oact, *pact = 0;
5130 struct target_old_sigaction *old_act;
5131 if (arg2) {
5132 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5133 goto efault;
5134 act._sa_handler = old_act->_sa_handler;
5135 target_siginitset(&act.sa_mask, old_act->sa_mask);
5136 act.sa_flags = old_act->sa_flags;
5137 act.sa_restorer = 0;
5138 unlock_user_struct(old_act, arg2, 0);
5139 pact = &act;
5141 ret = get_errno(do_sigaction(arg1, pact, &oact));
5142 if (!is_error(ret) && arg3) {
5143 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5144 goto efault;
5145 old_act->_sa_handler = oact._sa_handler;
5146 old_act->sa_mask = oact.sa_mask.sig[0];
5147 old_act->sa_flags = oact.sa_flags;
5148 unlock_user_struct(old_act, arg3, 1);
5150 #elif defined(TARGET_MIPS)
5151 struct target_sigaction act, oact, *pact, *old_act;
5153 if (arg2) {
5154 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5155 goto efault;
5156 act._sa_handler = old_act->_sa_handler;
5157 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5158 act.sa_flags = old_act->sa_flags;
5159 unlock_user_struct(old_act, arg2, 0);
5160 pact = &act;
5161 } else {
5162 pact = NULL;
5165 ret = get_errno(do_sigaction(arg1, pact, &oact));
5167 if (!is_error(ret) && arg3) {
5168 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5169 goto efault;
5170 old_act->_sa_handler = oact._sa_handler;
5171 old_act->sa_flags = oact.sa_flags;
5172 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5173 old_act->sa_mask.sig[1] = 0;
5174 old_act->sa_mask.sig[2] = 0;
5175 old_act->sa_mask.sig[3] = 0;
5176 unlock_user_struct(old_act, arg3, 1);
5178 #else
5179 struct target_old_sigaction *old_act;
5180 struct target_sigaction act, oact, *pact;
5181 if (arg2) {
5182 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5183 goto efault;
5184 act._sa_handler = old_act->_sa_handler;
5185 target_siginitset(&act.sa_mask, old_act->sa_mask);
5186 act.sa_flags = old_act->sa_flags;
5187 act.sa_restorer = old_act->sa_restorer;
5188 unlock_user_struct(old_act, arg2, 0);
5189 pact = &act;
5190 } else {
5191 pact = NULL;
5193 ret = get_errno(do_sigaction(arg1, pact, &oact));
5194 if (!is_error(ret) && arg3) {
5195 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5196 goto efault;
5197 old_act->_sa_handler = oact._sa_handler;
5198 old_act->sa_mask = oact.sa_mask.sig[0];
5199 old_act->sa_flags = oact.sa_flags;
5200 old_act->sa_restorer = oact.sa_restorer;
5201 unlock_user_struct(old_act, arg3, 1);
5203 #endif
5205 break;
5206 #endif
5207 case TARGET_NR_rt_sigaction:
5209 #if defined(TARGET_ALPHA)
5210 struct target_sigaction act, oact, *pact = 0;
5211 struct target_rt_sigaction *rt_act;
5212 /* ??? arg4 == sizeof(sigset_t). */
5213 if (arg2) {
5214 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5215 goto efault;
5216 act._sa_handler = rt_act->_sa_handler;
5217 act.sa_mask = rt_act->sa_mask;
5218 act.sa_flags = rt_act->sa_flags;
5219 act.sa_restorer = arg5;
5220 unlock_user_struct(rt_act, arg2, 0);
5221 pact = &act;
5223 ret = get_errno(do_sigaction(arg1, pact, &oact));
5224 if (!is_error(ret) && arg3) {
5225 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5226 goto efault;
5227 rt_act->_sa_handler = oact._sa_handler;
5228 rt_act->sa_mask = oact.sa_mask;
5229 rt_act->sa_flags = oact.sa_flags;
5230 unlock_user_struct(rt_act, arg3, 1);
5232 #else
5233 struct target_sigaction *act;
5234 struct target_sigaction *oact;
5236 if (arg2) {
5237 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5238 goto efault;
5239 } else
5240 act = NULL;
5241 if (arg3) {
5242 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5243 ret = -TARGET_EFAULT;
5244 goto rt_sigaction_fail;
5246 } else
5247 oact = NULL;
5248 ret = get_errno(do_sigaction(arg1, act, oact));
5249 rt_sigaction_fail:
5250 if (act)
5251 unlock_user_struct(act, arg2, 0);
5252 if (oact)
5253 unlock_user_struct(oact, arg3, 1);
5254 #endif
5256 break;
5257 #ifdef TARGET_NR_sgetmask /* not on alpha */
5258 case TARGET_NR_sgetmask:
5260 sigset_t cur_set;
5261 abi_ulong target_set;
5262 sigprocmask(0, NULL, &cur_set);
5263 host_to_target_old_sigset(&target_set, &cur_set);
5264 ret = target_set;
5266 break;
5267 #endif
5268 #ifdef TARGET_NR_ssetmask /* not on alpha */
5269 case TARGET_NR_ssetmask:
5271 sigset_t set, oset, cur_set;
5272 abi_ulong target_set = arg1;
5273 sigprocmask(0, NULL, &cur_set);
5274 target_to_host_old_sigset(&set, &target_set);
5275 sigorset(&set, &set, &cur_set);
5276 sigprocmask(SIG_SETMASK, &set, &oset);
5277 host_to_target_old_sigset(&target_set, &oset);
5278 ret = target_set;
5280 break;
5281 #endif
5282 #ifdef TARGET_NR_sigprocmask
5283 case TARGET_NR_sigprocmask:
5285 #if defined(TARGET_ALPHA)
5286 sigset_t set, oldset;
5287 abi_ulong mask;
5288 int how;
5290 switch (arg1) {
5291 case TARGET_SIG_BLOCK:
5292 how = SIG_BLOCK;
5293 break;
5294 case TARGET_SIG_UNBLOCK:
5295 how = SIG_UNBLOCK;
5296 break;
5297 case TARGET_SIG_SETMASK:
5298 how = SIG_SETMASK;
5299 break;
5300 default:
5301 ret = -TARGET_EINVAL;
5302 goto fail;
5304 mask = arg2;
5305 target_to_host_old_sigset(&set, &mask);
5307 ret = get_errno(sigprocmask(how, &set, &oldset));
5309 if (!is_error(ret)) {
5310 host_to_target_old_sigset(&mask, &oldset);
5311 ret = mask;
5312 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5314 #else
5315 sigset_t set, oldset, *set_ptr;
5316 int how;
5318 if (arg2) {
5319 switch (arg1) {
5320 case TARGET_SIG_BLOCK:
5321 how = SIG_BLOCK;
5322 break;
5323 case TARGET_SIG_UNBLOCK:
5324 how = SIG_UNBLOCK;
5325 break;
5326 case TARGET_SIG_SETMASK:
5327 how = SIG_SETMASK;
5328 break;
5329 default:
5330 ret = -TARGET_EINVAL;
5331 goto fail;
5333 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5334 goto efault;
5335 target_to_host_old_sigset(&set, p);
5336 unlock_user(p, arg2, 0);
5337 set_ptr = &set;
5338 } else {
5339 how = 0;
5340 set_ptr = NULL;
5342 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5343 if (!is_error(ret) && arg3) {
5344 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5345 goto efault;
5346 host_to_target_old_sigset(p, &oldset);
5347 unlock_user(p, arg3, sizeof(target_sigset_t));
5349 #endif
5351 break;
5352 #endif
5353 case TARGET_NR_rt_sigprocmask:
5355 int how = arg1;
5356 sigset_t set, oldset, *set_ptr;
5358 if (arg2) {
5359 switch(how) {
5360 case TARGET_SIG_BLOCK:
5361 how = SIG_BLOCK;
5362 break;
5363 case TARGET_SIG_UNBLOCK:
5364 how = SIG_UNBLOCK;
5365 break;
5366 case TARGET_SIG_SETMASK:
5367 how = SIG_SETMASK;
5368 break;
5369 default:
5370 ret = -TARGET_EINVAL;
5371 goto fail;
5373 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5374 goto efault;
5375 target_to_host_sigset(&set, p);
5376 unlock_user(p, arg2, 0);
5377 set_ptr = &set;
5378 } else {
5379 how = 0;
5380 set_ptr = NULL;
5382 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5383 if (!is_error(ret) && arg3) {
5384 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5385 goto efault;
5386 host_to_target_sigset(p, &oldset);
5387 unlock_user(p, arg3, sizeof(target_sigset_t));
5390 break;
5391 #ifdef TARGET_NR_sigpending
5392 case TARGET_NR_sigpending:
5394 sigset_t set;
5395 ret = get_errno(sigpending(&set));
5396 if (!is_error(ret)) {
5397 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5398 goto efault;
5399 host_to_target_old_sigset(p, &set);
5400 unlock_user(p, arg1, sizeof(target_sigset_t));
5403 break;
5404 #endif
5405 case TARGET_NR_rt_sigpending:
5407 sigset_t set;
5408 ret = get_errno(sigpending(&set));
5409 if (!is_error(ret)) {
5410 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5411 goto efault;
5412 host_to_target_sigset(p, &set);
5413 unlock_user(p, arg1, sizeof(target_sigset_t));
5416 break;
5417 #ifdef TARGET_NR_sigsuspend
5418 case TARGET_NR_sigsuspend:
5420 sigset_t set;
5421 #if defined(TARGET_ALPHA)
5422 abi_ulong mask = arg1;
5423 target_to_host_old_sigset(&set, &mask);
5424 #else
5425 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5426 goto efault;
5427 target_to_host_old_sigset(&set, p);
5428 unlock_user(p, arg1, 0);
5429 #endif
5430 ret = get_errno(sigsuspend(&set));
5432 break;
5433 #endif
5434 case TARGET_NR_rt_sigsuspend:
5436 sigset_t set;
5437 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5438 goto efault;
5439 target_to_host_sigset(&set, p);
5440 unlock_user(p, arg1, 0);
5441 ret = get_errno(sigsuspend(&set));
5443 break;
5444 case TARGET_NR_rt_sigtimedwait:
5446 sigset_t set;
5447 struct timespec uts, *puts;
5448 siginfo_t uinfo;
5450 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5451 goto efault;
5452 target_to_host_sigset(&set, p);
5453 unlock_user(p, arg1, 0);
5454 if (arg3) {
5455 puts = &uts;
5456 target_to_host_timespec(puts, arg3);
5457 } else {
5458 puts = NULL;
5460 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5461 if (!is_error(ret) && arg2) {
5462 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5463 goto efault;
5464 host_to_target_siginfo(p, &uinfo);
5465 unlock_user(p, arg2, sizeof(target_siginfo_t));
5468 break;
5469 case TARGET_NR_rt_sigqueueinfo:
5471 siginfo_t uinfo;
5472 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5473 goto efault;
5474 target_to_host_siginfo(&uinfo, p);
5475 unlock_user(p, arg1, 0);
5476 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5478 break;
5479 #ifdef TARGET_NR_sigreturn
5480 case TARGET_NR_sigreturn:
5481 /* NOTE: ret is eax, so not transcoding must be done */
5482 ret = do_sigreturn(cpu_env);
5483 break;
5484 #endif
5485 case TARGET_NR_rt_sigreturn:
5486 /* NOTE: ret is eax, so not transcoding must be done */
5487 ret = do_rt_sigreturn(cpu_env);
5488 break;
5489 case TARGET_NR_sethostname:
5490 if (!(p = lock_user_string(arg1)))
5491 goto efault;
5492 ret = get_errno(sethostname(p, arg2));
5493 unlock_user(p, arg1, 0);
5494 break;
5495 case TARGET_NR_setrlimit:
5497 int resource = arg1;
5498 struct target_rlimit *target_rlim;
5499 struct rlimit rlim;
5500 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5501 goto efault;
5502 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5503 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5504 unlock_user_struct(target_rlim, arg2, 0);
5505 ret = get_errno(setrlimit(resource, &rlim));
5507 break;
5508 case TARGET_NR_getrlimit:
5510 int resource = arg1;
5511 struct target_rlimit *target_rlim;
5512 struct rlimit rlim;
5514 ret = get_errno(getrlimit(resource, &rlim));
5515 if (!is_error(ret)) {
5516 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5517 goto efault;
5518 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5519 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5520 unlock_user_struct(target_rlim, arg2, 1);
5523 break;
5524 case TARGET_NR_getrusage:
5526 struct rusage rusage;
5527 ret = get_errno(getrusage(arg1, &rusage));
5528 if (!is_error(ret)) {
5529 host_to_target_rusage(arg2, &rusage);
5532 break;
5533 case TARGET_NR_gettimeofday:
5535 struct timeval tv;
5536 ret = get_errno(gettimeofday(&tv, NULL));
5537 if (!is_error(ret)) {
5538 if (copy_to_user_timeval(arg1, &tv))
5539 goto efault;
5542 break;
5543 case TARGET_NR_settimeofday:
5545 struct timeval tv;
5546 if (copy_from_user_timeval(&tv, arg1))
5547 goto efault;
5548 ret = get_errno(settimeofday(&tv, NULL));
5550 break;
5551 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5552 case TARGET_NR_select:
5554 struct target_sel_arg_struct *sel;
5555 abi_ulong inp, outp, exp, tvp;
5556 long nsel;
5558 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5559 goto efault;
5560 nsel = tswapl(sel->n);
5561 inp = tswapl(sel->inp);
5562 outp = tswapl(sel->outp);
5563 exp = tswapl(sel->exp);
5564 tvp = tswapl(sel->tvp);
5565 unlock_user_struct(sel, arg1, 0);
5566 ret = do_select(nsel, inp, outp, exp, tvp);
5568 break;
5569 #endif
5570 #ifdef TARGET_NR_pselect6
5571 case TARGET_NR_pselect6:
5572 goto unimplemented_nowarn;
5573 #endif
5574 case TARGET_NR_symlink:
5576 void *p2;
5577 p = lock_user_string(arg1);
5578 p2 = lock_user_string(arg2);
5579 if (!p || !p2)
5580 ret = -TARGET_EFAULT;
5581 else
5582 ret = get_errno(symlink(p, p2));
5583 unlock_user(p2, arg2, 0);
5584 unlock_user(p, arg1, 0);
5586 break;
5587 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5588 case TARGET_NR_symlinkat:
5590 void *p2;
5591 p = lock_user_string(arg1);
5592 p2 = lock_user_string(arg3);
5593 if (!p || !p2)
5594 ret = -TARGET_EFAULT;
5595 else
5596 ret = get_errno(sys_symlinkat(p, arg2, p2));
5597 unlock_user(p2, arg3, 0);
5598 unlock_user(p, arg1, 0);
5600 break;
5601 #endif
5602 #ifdef TARGET_NR_oldlstat
5603 case TARGET_NR_oldlstat:
5604 goto unimplemented;
5605 #endif
5606 case TARGET_NR_readlink:
5608 void *p2, *temp;
5609 p = lock_user_string(arg1);
5610 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5611 if (!p || !p2)
5612 ret = -TARGET_EFAULT;
5613 else {
5614 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5615 char real[PATH_MAX];
5616 temp = realpath(exec_path,real);
5617 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5618 snprintf((char *)p2, arg3, "%s", real);
5620 else
5621 ret = get_errno(readlink(path(p), p2, arg3));
5623 unlock_user(p2, arg2, ret);
5624 unlock_user(p, arg1, 0);
5626 break;
5627 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5628 case TARGET_NR_readlinkat:
5630 void *p2;
5631 p = lock_user_string(arg2);
5632 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5633 if (!p || !p2)
5634 ret = -TARGET_EFAULT;
5635 else
5636 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5637 unlock_user(p2, arg3, ret);
5638 unlock_user(p, arg2, 0);
5640 break;
5641 #endif
5642 #ifdef TARGET_NR_uselib
5643 case TARGET_NR_uselib:
5644 goto unimplemented;
5645 #endif
5646 #ifdef TARGET_NR_swapon
5647 case TARGET_NR_swapon:
5648 if (!(p = lock_user_string(arg1)))
5649 goto efault;
5650 ret = get_errno(swapon(p, arg2));
5651 unlock_user(p, arg1, 0);
5652 break;
5653 #endif
5654 case TARGET_NR_reboot:
5655 goto unimplemented;
5656 #ifdef TARGET_NR_readdir
5657 case TARGET_NR_readdir:
5658 goto unimplemented;
5659 #endif
5660 #ifdef TARGET_NR_mmap
5661 case TARGET_NR_mmap:
5662 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5663 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5664 || defined(TARGET_S390X)
5666 abi_ulong *v;
5667 abi_ulong v1, v2, v3, v4, v5, v6;
5668 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5669 goto efault;
5670 v1 = tswapl(v[0]);
5671 v2 = tswapl(v[1]);
5672 v3 = tswapl(v[2]);
5673 v4 = tswapl(v[3]);
5674 v5 = tswapl(v[4]);
5675 v6 = tswapl(v[5]);
5676 unlock_user(v, arg1, 0);
5677 ret = get_errno(target_mmap(v1, v2, v3,
5678 target_to_host_bitmask(v4, mmap_flags_tbl),
5679 v5, v6));
5681 #else
5682 ret = get_errno(target_mmap(arg1, arg2, arg3,
5683 target_to_host_bitmask(arg4, mmap_flags_tbl),
5684 arg5,
5685 arg6));
5686 #endif
5687 break;
5688 #endif
5689 #ifdef TARGET_NR_mmap2
5690 case TARGET_NR_mmap2:
5691 #ifndef MMAP_SHIFT
5692 #define MMAP_SHIFT 12
5693 #endif
5694 ret = get_errno(target_mmap(arg1, arg2, arg3,
5695 target_to_host_bitmask(arg4, mmap_flags_tbl),
5696 arg5,
5697 arg6 << MMAP_SHIFT));
5698 break;
5699 #endif
5700 case TARGET_NR_munmap:
5701 ret = get_errno(target_munmap(arg1, arg2));
5702 break;
5703 case TARGET_NR_mprotect:
5705 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5706 /* Special hack to detect libc making the stack executable. */
5707 if ((arg3 & PROT_GROWSDOWN)
5708 && arg1 >= ts->info->stack_limit
5709 && arg1 <= ts->info->start_stack) {
5710 arg3 &= ~PROT_GROWSDOWN;
5711 arg2 = arg2 + arg1 - ts->info->stack_limit;
5712 arg1 = ts->info->stack_limit;
5715 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5716 break;
5717 #ifdef TARGET_NR_mremap
5718 case TARGET_NR_mremap:
5719 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5720 break;
5721 #endif
5722 /* ??? msync/mlock/munlock are broken for softmmu. */
5723 #ifdef TARGET_NR_msync
5724 case TARGET_NR_msync:
5725 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5726 break;
5727 #endif
5728 #ifdef TARGET_NR_mlock
5729 case TARGET_NR_mlock:
5730 ret = get_errno(mlock(g2h(arg1), arg2));
5731 break;
5732 #endif
5733 #ifdef TARGET_NR_munlock
5734 case TARGET_NR_munlock:
5735 ret = get_errno(munlock(g2h(arg1), arg2));
5736 break;
5737 #endif
5738 #ifdef TARGET_NR_mlockall
5739 case TARGET_NR_mlockall:
5740 ret = get_errno(mlockall(arg1));
5741 break;
5742 #endif
5743 #ifdef TARGET_NR_munlockall
5744 case TARGET_NR_munlockall:
5745 ret = get_errno(munlockall());
5746 break;
5747 #endif
5748 case TARGET_NR_truncate:
5749 if (!(p = lock_user_string(arg1)))
5750 goto efault;
5751 ret = get_errno(truncate(p, arg2));
5752 unlock_user(p, arg1, 0);
5753 break;
5754 case TARGET_NR_ftruncate:
5755 ret = get_errno(ftruncate(arg1, arg2));
5756 break;
5757 case TARGET_NR_fchmod:
5758 ret = get_errno(fchmod(arg1, arg2));
5759 break;
5760 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5761 case TARGET_NR_fchmodat:
5762 if (!(p = lock_user_string(arg2)))
5763 goto efault;
5764 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5765 unlock_user(p, arg2, 0);
5766 break;
5767 #endif
5768 case TARGET_NR_getpriority:
5769 /* libc does special remapping of the return value of
5770 * sys_getpriority() so it's just easiest to call
5771 * sys_getpriority() directly rather than through libc. */
5772 ret = get_errno(sys_getpriority(arg1, arg2));
5773 break;
5774 case TARGET_NR_setpriority:
5775 ret = get_errno(setpriority(arg1, arg2, arg3));
5776 break;
5777 #ifdef TARGET_NR_profil
5778 case TARGET_NR_profil:
5779 goto unimplemented;
5780 #endif
5781 case TARGET_NR_statfs:
5782 if (!(p = lock_user_string(arg1)))
5783 goto efault;
5784 ret = get_errno(statfs(path(p), &stfs));
5785 unlock_user(p, arg1, 0);
5786 convert_statfs:
5787 if (!is_error(ret)) {
5788 struct target_statfs *target_stfs;
5790 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5791 goto efault;
5792 __put_user(stfs.f_type, &target_stfs->f_type);
5793 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5794 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5795 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5796 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5797 __put_user(stfs.f_files, &target_stfs->f_files);
5798 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5799 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5800 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5801 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5802 unlock_user_struct(target_stfs, arg2, 1);
5804 break;
5805 case TARGET_NR_fstatfs:
5806 ret = get_errno(fstatfs(arg1, &stfs));
5807 goto convert_statfs;
5808 #ifdef TARGET_NR_statfs64
5809 case TARGET_NR_statfs64:
5810 if (!(p = lock_user_string(arg1)))
5811 goto efault;
5812 ret = get_errno(statfs(path(p), &stfs));
5813 unlock_user(p, arg1, 0);
5814 convert_statfs64:
5815 if (!is_error(ret)) {
5816 struct target_statfs64 *target_stfs;
5818 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5819 goto efault;
5820 __put_user(stfs.f_type, &target_stfs->f_type);
5821 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5822 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5823 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5824 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5825 __put_user(stfs.f_files, &target_stfs->f_files);
5826 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5827 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5828 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5829 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5830 unlock_user_struct(target_stfs, arg3, 1);
5832 break;
5833 case TARGET_NR_fstatfs64:
5834 ret = get_errno(fstatfs(arg1, &stfs));
5835 goto convert_statfs64;
5836 #endif
5837 #ifdef TARGET_NR_ioperm
5838 case TARGET_NR_ioperm:
5839 goto unimplemented;
5840 #endif
5841 #ifdef TARGET_NR_socketcall
5842 case TARGET_NR_socketcall:
5843 ret = do_socketcall(arg1, arg2);
5844 break;
5845 #endif
5846 #ifdef TARGET_NR_accept
5847 case TARGET_NR_accept:
5848 ret = do_accept(arg1, arg2, arg3);
5849 break;
5850 #endif
5851 #ifdef TARGET_NR_bind
5852 case TARGET_NR_bind:
5853 ret = do_bind(arg1, arg2, arg3);
5854 break;
5855 #endif
5856 #ifdef TARGET_NR_connect
5857 case TARGET_NR_connect:
5858 ret = do_connect(arg1, arg2, arg3);
5859 break;
5860 #endif
5861 #ifdef TARGET_NR_getpeername
5862 case TARGET_NR_getpeername:
5863 ret = do_getpeername(arg1, arg2, arg3);
5864 break;
5865 #endif
5866 #ifdef TARGET_NR_getsockname
5867 case TARGET_NR_getsockname:
5868 ret = do_getsockname(arg1, arg2, arg3);
5869 break;
5870 #endif
5871 #ifdef TARGET_NR_getsockopt
5872 case TARGET_NR_getsockopt:
5873 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5874 break;
5875 #endif
5876 #ifdef TARGET_NR_listen
5877 case TARGET_NR_listen:
5878 ret = get_errno(listen(arg1, arg2));
5879 break;
5880 #endif
5881 #ifdef TARGET_NR_recv
5882 case TARGET_NR_recv:
5883 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5884 break;
5885 #endif
5886 #ifdef TARGET_NR_recvfrom
5887 case TARGET_NR_recvfrom:
5888 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5889 break;
5890 #endif
5891 #ifdef TARGET_NR_recvmsg
5892 case TARGET_NR_recvmsg:
5893 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5894 break;
5895 #endif
5896 #ifdef TARGET_NR_send
5897 case TARGET_NR_send:
5898 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5899 break;
5900 #endif
5901 #ifdef TARGET_NR_sendmsg
5902 case TARGET_NR_sendmsg:
5903 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5904 break;
5905 #endif
5906 #ifdef TARGET_NR_sendto
5907 case TARGET_NR_sendto:
5908 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5909 break;
5910 #endif
5911 #ifdef TARGET_NR_shutdown
5912 case TARGET_NR_shutdown:
5913 ret = get_errno(shutdown(arg1, arg2));
5914 break;
5915 #endif
5916 #ifdef TARGET_NR_socket
5917 case TARGET_NR_socket:
5918 ret = do_socket(arg1, arg2, arg3);
5919 break;
5920 #endif
5921 #ifdef TARGET_NR_socketpair
5922 case TARGET_NR_socketpair:
5923 ret = do_socketpair(arg1, arg2, arg3, arg4);
5924 break;
5925 #endif
5926 #ifdef TARGET_NR_setsockopt
5927 case TARGET_NR_setsockopt:
5928 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5929 break;
5930 #endif
5932 case TARGET_NR_syslog:
5933 if (!(p = lock_user_string(arg2)))
5934 goto efault;
5935 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5936 unlock_user(p, arg2, 0);
5937 break;
5939 case TARGET_NR_setitimer:
5941 struct itimerval value, ovalue, *pvalue;
5943 if (arg2) {
5944 pvalue = &value;
5945 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5946 || copy_from_user_timeval(&pvalue->it_value,
5947 arg2 + sizeof(struct target_timeval)))
5948 goto efault;
5949 } else {
5950 pvalue = NULL;
5952 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5953 if (!is_error(ret) && arg3) {
5954 if (copy_to_user_timeval(arg3,
5955 &ovalue.it_interval)
5956 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5957 &ovalue.it_value))
5958 goto efault;
5961 break;
5962 case TARGET_NR_getitimer:
5964 struct itimerval value;
5966 ret = get_errno(getitimer(arg1, &value));
5967 if (!is_error(ret) && arg2) {
5968 if (copy_to_user_timeval(arg2,
5969 &value.it_interval)
5970 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5971 &value.it_value))
5972 goto efault;
5975 break;
5976 case TARGET_NR_stat:
5977 if (!(p = lock_user_string(arg1)))
5978 goto efault;
5979 ret = get_errno(stat(path(p), &st));
5980 unlock_user(p, arg1, 0);
5981 goto do_stat;
5982 case TARGET_NR_lstat:
5983 if (!(p = lock_user_string(arg1)))
5984 goto efault;
5985 ret = get_errno(lstat(path(p), &st));
5986 unlock_user(p, arg1, 0);
5987 goto do_stat;
5988 case TARGET_NR_fstat:
5990 ret = get_errno(fstat(arg1, &st));
5991 do_stat:
5992 if (!is_error(ret)) {
5993 struct target_stat *target_st;
5995 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5996 goto efault;
5997 memset(target_st, 0, sizeof(*target_st));
5998 __put_user(st.st_dev, &target_st->st_dev);
5999 __put_user(st.st_ino, &target_st->st_ino);
6000 __put_user(st.st_mode, &target_st->st_mode);
6001 __put_user(st.st_uid, &target_st->st_uid);
6002 __put_user(st.st_gid, &target_st->st_gid);
6003 __put_user(st.st_nlink, &target_st->st_nlink);
6004 __put_user(st.st_rdev, &target_st->st_rdev);
6005 __put_user(st.st_size, &target_st->st_size);
6006 __put_user(st.st_blksize, &target_st->st_blksize);
6007 __put_user(st.st_blocks, &target_st->st_blocks);
6008 __put_user(st.st_atime, &target_st->target_st_atime);
6009 __put_user(st.st_mtime, &target_st->target_st_mtime);
6010 __put_user(st.st_ctime, &target_st->target_st_ctime);
6011 unlock_user_struct(target_st, arg2, 1);
6014 break;
6015 #ifdef TARGET_NR_olduname
6016 case TARGET_NR_olduname:
6017 goto unimplemented;
6018 #endif
6019 #ifdef TARGET_NR_iopl
6020 case TARGET_NR_iopl:
6021 goto unimplemented;
6022 #endif
6023 case TARGET_NR_vhangup:
6024 ret = get_errno(vhangup());
6025 break;
6026 #ifdef TARGET_NR_idle
6027 case TARGET_NR_idle:
6028 goto unimplemented;
6029 #endif
6030 #ifdef TARGET_NR_syscall
6031 case TARGET_NR_syscall:
6032 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
6033 break;
6034 #endif
6035 case TARGET_NR_wait4:
6037 int status;
6038 abi_long status_ptr = arg2;
6039 struct rusage rusage, *rusage_ptr;
6040 abi_ulong target_rusage = arg4;
6041 if (target_rusage)
6042 rusage_ptr = &rusage;
6043 else
6044 rusage_ptr = NULL;
6045 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6046 if (!is_error(ret)) {
6047 if (status_ptr) {
6048 status = host_to_target_waitstatus(status);
6049 if (put_user_s32(status, status_ptr))
6050 goto efault;
6052 if (target_rusage)
6053 host_to_target_rusage(target_rusage, &rusage);
6056 break;
6057 #ifdef TARGET_NR_swapoff
6058 case TARGET_NR_swapoff:
6059 if (!(p = lock_user_string(arg1)))
6060 goto efault;
6061 ret = get_errno(swapoff(p));
6062 unlock_user(p, arg1, 0);
6063 break;
6064 #endif
6065 case TARGET_NR_sysinfo:
6067 struct target_sysinfo *target_value;
6068 struct sysinfo value;
6069 ret = get_errno(sysinfo(&value));
6070 if (!is_error(ret) && arg1)
6072 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6073 goto efault;
6074 __put_user(value.uptime, &target_value->uptime);
6075 __put_user(value.loads[0], &target_value->loads[0]);
6076 __put_user(value.loads[1], &target_value->loads[1]);
6077 __put_user(value.loads[2], &target_value->loads[2]);
6078 __put_user(value.totalram, &target_value->totalram);
6079 __put_user(value.freeram, &target_value->freeram);
6080 __put_user(value.sharedram, &target_value->sharedram);
6081 __put_user(value.bufferram, &target_value->bufferram);
6082 __put_user(value.totalswap, &target_value->totalswap);
6083 __put_user(value.freeswap, &target_value->freeswap);
6084 __put_user(value.procs, &target_value->procs);
6085 __put_user(value.totalhigh, &target_value->totalhigh);
6086 __put_user(value.freehigh, &target_value->freehigh);
6087 __put_user(value.mem_unit, &target_value->mem_unit);
6088 unlock_user_struct(target_value, arg1, 1);
6091 break;
6092 #ifdef TARGET_NR_ipc
6093 case TARGET_NR_ipc:
6094 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6095 break;
6096 #endif
6097 #ifdef TARGET_NR_semget
6098 case TARGET_NR_semget:
6099 ret = get_errno(semget(arg1, arg2, arg3));
6100 break;
6101 #endif
6102 #ifdef TARGET_NR_semop
6103 case TARGET_NR_semop:
6104 ret = get_errno(do_semop(arg1, arg2, arg3));
6105 break;
6106 #endif
6107 #ifdef TARGET_NR_semctl
6108 case TARGET_NR_semctl:
6109 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6110 break;
6111 #endif
6112 #ifdef TARGET_NR_msgctl
6113 case TARGET_NR_msgctl:
6114 ret = do_msgctl(arg1, arg2, arg3);
6115 break;
6116 #endif
6117 #ifdef TARGET_NR_msgget
6118 case TARGET_NR_msgget:
6119 ret = get_errno(msgget(arg1, arg2));
6120 break;
6121 #endif
6122 #ifdef TARGET_NR_msgrcv
6123 case TARGET_NR_msgrcv:
6124 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6125 break;
6126 #endif
6127 #ifdef TARGET_NR_msgsnd
6128 case TARGET_NR_msgsnd:
6129 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6130 break;
6131 #endif
6132 #ifdef TARGET_NR_shmget
6133 case TARGET_NR_shmget:
6134 ret = get_errno(shmget(arg1, arg2, arg3));
6135 break;
6136 #endif
6137 #ifdef TARGET_NR_shmctl
6138 case TARGET_NR_shmctl:
6139 ret = do_shmctl(arg1, arg2, arg3);
6140 break;
6141 #endif
6142 #ifdef TARGET_NR_shmat
6143 case TARGET_NR_shmat:
6144 ret = do_shmat(arg1, arg2, arg3);
6145 break;
6146 #endif
6147 #ifdef TARGET_NR_shmdt
6148 case TARGET_NR_shmdt:
6149 ret = do_shmdt(arg1);
6150 break;
6151 #endif
6152 case TARGET_NR_fsync:
6153 ret = get_errno(fsync(arg1));
6154 break;
6155 case TARGET_NR_clone:
6156 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6157 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6158 #elif defined(TARGET_CRIS)
6159 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6160 #elif defined(TARGET_S390X)
6161 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6162 #else
6163 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6164 #endif
6165 break;
6166 #ifdef __NR_exit_group
6167 /* new thread calls */
6168 case TARGET_NR_exit_group:
6169 #ifdef TARGET_GPROF
6170 _mcleanup();
6171 #endif
6172 gdb_exit(cpu_env, arg1);
6173 ret = get_errno(exit_group(arg1));
6174 break;
6175 #endif
6176 case TARGET_NR_setdomainname:
6177 if (!(p = lock_user_string(arg1)))
6178 goto efault;
6179 ret = get_errno(setdomainname(p, arg2));
6180 unlock_user(p, arg1, 0);
6181 break;
6182 case TARGET_NR_uname:
6183 /* no need to transcode because we use the linux syscall */
6185 struct new_utsname * buf;
6187 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6188 goto efault;
6189 ret = get_errno(sys_uname(buf));
6190 if (!is_error(ret)) {
6191 /* Overrite the native machine name with whatever is being
6192 emulated. */
6193 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6194 /* Allow the user to override the reported release. */
6195 if (qemu_uname_release && *qemu_uname_release)
6196 strcpy (buf->release, qemu_uname_release);
6198 unlock_user_struct(buf, arg1, 1);
6200 break;
6201 #ifdef TARGET_I386
6202 case TARGET_NR_modify_ldt:
6203 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6204 break;
6205 #if !defined(TARGET_X86_64)
6206 case TARGET_NR_vm86old:
6207 goto unimplemented;
6208 case TARGET_NR_vm86:
6209 ret = do_vm86(cpu_env, arg1, arg2);
6210 break;
6211 #endif
6212 #endif
6213 case TARGET_NR_adjtimex:
6214 goto unimplemented;
6215 #ifdef TARGET_NR_create_module
6216 case TARGET_NR_create_module:
6217 #endif
6218 case TARGET_NR_init_module:
6219 case TARGET_NR_delete_module:
6220 #ifdef TARGET_NR_get_kernel_syms
6221 case TARGET_NR_get_kernel_syms:
6222 #endif
6223 goto unimplemented;
6224 case TARGET_NR_quotactl:
6225 goto unimplemented;
6226 case TARGET_NR_getpgid:
6227 ret = get_errno(getpgid(arg1));
6228 break;
6229 case TARGET_NR_fchdir:
6230 ret = get_errno(fchdir(arg1));
6231 break;
6232 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6233 case TARGET_NR_bdflush:
6234 goto unimplemented;
6235 #endif
6236 #ifdef TARGET_NR_sysfs
6237 case TARGET_NR_sysfs:
6238 goto unimplemented;
6239 #endif
6240 case TARGET_NR_personality:
6241 ret = get_errno(personality(arg1));
6242 break;
6243 #ifdef TARGET_NR_afs_syscall
6244 case TARGET_NR_afs_syscall:
6245 goto unimplemented;
6246 #endif
6247 #ifdef TARGET_NR__llseek /* Not on alpha */
6248 case TARGET_NR__llseek:
6250 int64_t res;
6251 #if !defined(__NR_llseek)
6252 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6253 if (res == -1) {
6254 ret = get_errno(res);
6255 } else {
6256 ret = 0;
6258 #else
6259 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6260 #endif
6261 if ((ret == 0) && put_user_s64(res, arg4)) {
6262 goto efault;
6265 break;
6266 #endif
6267 case TARGET_NR_getdents:
6268 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6270 struct target_dirent *target_dirp;
6271 struct linux_dirent *dirp;
6272 abi_long count = arg3;
6274 dirp = malloc(count);
6275 if (!dirp) {
6276 ret = -TARGET_ENOMEM;
6277 goto fail;
6280 ret = get_errno(sys_getdents(arg1, dirp, count));
6281 if (!is_error(ret)) {
6282 struct linux_dirent *de;
6283 struct target_dirent *tde;
6284 int len = ret;
6285 int reclen, treclen;
6286 int count1, tnamelen;
6288 count1 = 0;
6289 de = dirp;
6290 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6291 goto efault;
6292 tde = target_dirp;
6293 while (len > 0) {
6294 reclen = de->d_reclen;
6295 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6296 tde->d_reclen = tswap16(treclen);
6297 tde->d_ino = tswapl(de->d_ino);
6298 tde->d_off = tswapl(de->d_off);
6299 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6300 if (tnamelen > 256)
6301 tnamelen = 256;
6302 /* XXX: may not be correct */
6303 pstrcpy(tde->d_name, tnamelen, de->d_name);
6304 de = (struct linux_dirent *)((char *)de + reclen);
6305 len -= reclen;
6306 tde = (struct target_dirent *)((char *)tde + treclen);
6307 count1 += treclen;
6309 ret = count1;
6310 unlock_user(target_dirp, arg2, ret);
6312 free(dirp);
6314 #else
6316 struct linux_dirent *dirp;
6317 abi_long count = arg3;
6319 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6320 goto efault;
6321 ret = get_errno(sys_getdents(arg1, dirp, count));
6322 if (!is_error(ret)) {
6323 struct linux_dirent *de;
6324 int len = ret;
6325 int reclen;
6326 de = dirp;
6327 while (len > 0) {
6328 reclen = de->d_reclen;
6329 if (reclen > len)
6330 break;
6331 de->d_reclen = tswap16(reclen);
6332 tswapls(&de->d_ino);
6333 tswapls(&de->d_off);
6334 de = (struct linux_dirent *)((char *)de + reclen);
6335 len -= reclen;
6338 unlock_user(dirp, arg2, ret);
6340 #endif
6341 break;
6342 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6343 case TARGET_NR_getdents64:
6345 struct linux_dirent64 *dirp;
6346 abi_long count = arg3;
6347 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6348 goto efault;
6349 ret = get_errno(sys_getdents64(arg1, dirp, count));
6350 if (!is_error(ret)) {
6351 struct linux_dirent64 *de;
6352 int len = ret;
6353 int reclen;
6354 de = dirp;
6355 while (len > 0) {
6356 reclen = de->d_reclen;
6357 if (reclen > len)
6358 break;
6359 de->d_reclen = tswap16(reclen);
6360 tswap64s((uint64_t *)&de->d_ino);
6361 tswap64s((uint64_t *)&de->d_off);
6362 de = (struct linux_dirent64 *)((char *)de + reclen);
6363 len -= reclen;
6366 unlock_user(dirp, arg2, ret);
6368 break;
6369 #endif /* TARGET_NR_getdents64 */
6370 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6371 #ifdef TARGET_S390X
6372 case TARGET_NR_select:
6373 #else
6374 case TARGET_NR__newselect:
6375 #endif
6376 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6377 break;
6378 #endif
6379 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6380 # ifdef TARGET_NR_poll
6381 case TARGET_NR_poll:
6382 # endif
6383 # ifdef TARGET_NR_ppoll
6384 case TARGET_NR_ppoll:
6385 # endif
6387 struct target_pollfd *target_pfd;
6388 unsigned int nfds = arg2;
6389 int timeout = arg3;
6390 struct pollfd *pfd;
6391 unsigned int i;
6393 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6394 if (!target_pfd)
6395 goto efault;
6397 pfd = alloca(sizeof(struct pollfd) * nfds);
6398 for(i = 0; i < nfds; i++) {
6399 pfd[i].fd = tswap32(target_pfd[i].fd);
6400 pfd[i].events = tswap16(target_pfd[i].events);
6403 # ifdef TARGET_NR_ppoll
6404 if (num == TARGET_NR_ppoll) {
6405 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6406 target_sigset_t *target_set;
6407 sigset_t _set, *set = &_set;
6409 if (arg3) {
6410 if (target_to_host_timespec(timeout_ts, arg3)) {
6411 unlock_user(target_pfd, arg1, 0);
6412 goto efault;
6414 } else {
6415 timeout_ts = NULL;
6418 if (arg4) {
6419 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6420 if (!target_set) {
6421 unlock_user(target_pfd, arg1, 0);
6422 goto efault;
6424 target_to_host_sigset(set, target_set);
6425 } else {
6426 set = NULL;
6429 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6431 if (!is_error(ret) && arg3) {
6432 host_to_target_timespec(arg3, timeout_ts);
6434 if (arg4) {
6435 unlock_user(target_set, arg4, 0);
6437 } else
6438 # endif
6439 ret = get_errno(poll(pfd, nfds, timeout));
6441 if (!is_error(ret)) {
6442 for(i = 0; i < nfds; i++) {
6443 target_pfd[i].revents = tswap16(pfd[i].revents);
6446 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6448 break;
6449 #endif
6450 case TARGET_NR_flock:
6451 /* NOTE: the flock constant seems to be the same for every
6452 Linux platform */
6453 ret = get_errno(flock(arg1, arg2));
6454 break;
6455 case TARGET_NR_readv:
6457 int count = arg3;
6458 struct iovec *vec;
6460 vec = alloca(count * sizeof(struct iovec));
6461 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6462 goto efault;
6463 ret = get_errno(readv(arg1, vec, count));
6464 unlock_iovec(vec, arg2, count, 1);
6466 break;
6467 case TARGET_NR_writev:
6469 int count = arg3;
6470 struct iovec *vec;
6472 vec = alloca(count * sizeof(struct iovec));
6473 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6474 goto efault;
6475 ret = get_errno(writev(arg1, vec, count));
6476 unlock_iovec(vec, arg2, count, 0);
6478 break;
6479 case TARGET_NR_getsid:
6480 ret = get_errno(getsid(arg1));
6481 break;
6482 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6483 case TARGET_NR_fdatasync:
6484 ret = get_errno(fdatasync(arg1));
6485 break;
6486 #endif
6487 case TARGET_NR__sysctl:
6488 /* We don't implement this, but ENOTDIR is always a safe
6489 return value. */
6490 ret = -TARGET_ENOTDIR;
6491 break;
6492 case TARGET_NR_sched_getaffinity:
6494 unsigned int mask_size;
6495 unsigned long *mask;
6498 * sched_getaffinity needs multiples of ulong, so need to take
6499 * care of mismatches between target ulong and host ulong sizes.
6501 if (arg2 & (sizeof(abi_ulong) - 1)) {
6502 ret = -TARGET_EINVAL;
6503 break;
6505 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6507 mask = alloca(mask_size);
6508 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6510 if (!is_error(ret)) {
6511 if (copy_to_user(arg3, mask, ret)) {
6512 goto efault;
6516 break;
6517 case TARGET_NR_sched_setaffinity:
6519 unsigned int mask_size;
6520 unsigned long *mask;
6523 * sched_setaffinity needs multiples of ulong, so need to take
6524 * care of mismatches between target ulong and host ulong sizes.
6526 if (arg2 & (sizeof(abi_ulong) - 1)) {
6527 ret = -TARGET_EINVAL;
6528 break;
6530 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6532 mask = alloca(mask_size);
6533 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6534 goto efault;
6536 memcpy(mask, p, arg2);
6537 unlock_user_struct(p, arg2, 0);
6539 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6541 break;
6542 case TARGET_NR_sched_setparam:
6544 struct sched_param *target_schp;
6545 struct sched_param schp;
6547 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6548 goto efault;
6549 schp.sched_priority = tswap32(target_schp->sched_priority);
6550 unlock_user_struct(target_schp, arg2, 0);
6551 ret = get_errno(sched_setparam(arg1, &schp));
6553 break;
6554 case TARGET_NR_sched_getparam:
6556 struct sched_param *target_schp;
6557 struct sched_param schp;
6558 ret = get_errno(sched_getparam(arg1, &schp));
6559 if (!is_error(ret)) {
6560 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6561 goto efault;
6562 target_schp->sched_priority = tswap32(schp.sched_priority);
6563 unlock_user_struct(target_schp, arg2, 1);
6566 break;
6567 case TARGET_NR_sched_setscheduler:
6569 struct sched_param *target_schp;
6570 struct sched_param schp;
6571 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6572 goto efault;
6573 schp.sched_priority = tswap32(target_schp->sched_priority);
6574 unlock_user_struct(target_schp, arg3, 0);
6575 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6577 break;
6578 case TARGET_NR_sched_getscheduler:
6579 ret = get_errno(sched_getscheduler(arg1));
6580 break;
6581 case TARGET_NR_sched_yield:
6582 ret = get_errno(sched_yield());
6583 break;
6584 case TARGET_NR_sched_get_priority_max:
6585 ret = get_errno(sched_get_priority_max(arg1));
6586 break;
6587 case TARGET_NR_sched_get_priority_min:
6588 ret = get_errno(sched_get_priority_min(arg1));
6589 break;
6590 case TARGET_NR_sched_rr_get_interval:
6592 struct timespec ts;
6593 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6594 if (!is_error(ret)) {
6595 host_to_target_timespec(arg2, &ts);
6598 break;
6599 case TARGET_NR_nanosleep:
6601 struct timespec req, rem;
6602 target_to_host_timespec(&req, arg1);
6603 ret = get_errno(nanosleep(&req, &rem));
6604 if (is_error(ret) && arg2) {
6605 host_to_target_timespec(arg2, &rem);
6608 break;
6609 #ifdef TARGET_NR_query_module
6610 case TARGET_NR_query_module:
6611 goto unimplemented;
6612 #endif
6613 #ifdef TARGET_NR_nfsservctl
6614 case TARGET_NR_nfsservctl:
6615 goto unimplemented;
6616 #endif
6617 case TARGET_NR_prctl:
6618 switch (arg1)
6620 case PR_GET_PDEATHSIG:
6622 int deathsig;
6623 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6624 if (!is_error(ret) && arg2
6625 && put_user_ual(deathsig, arg2))
6626 goto efault;
6628 break;
6629 default:
6630 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6631 break;
6633 break;
6634 #ifdef TARGET_NR_arch_prctl
6635 case TARGET_NR_arch_prctl:
6636 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6637 ret = do_arch_prctl(cpu_env, arg1, arg2);
6638 break;
6639 #else
6640 goto unimplemented;
6641 #endif
6642 #endif
6643 #ifdef TARGET_NR_pread
6644 case TARGET_NR_pread:
6645 #ifdef TARGET_ARM
6646 if (((CPUARMState *)cpu_env)->eabi)
6647 arg4 = arg5;
6648 #endif
6649 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6650 goto efault;
6651 ret = get_errno(pread(arg1, p, arg3, arg4));
6652 unlock_user(p, arg2, ret);
6653 break;
6654 case TARGET_NR_pwrite:
6655 #ifdef TARGET_ARM
6656 if (((CPUARMState *)cpu_env)->eabi)
6657 arg4 = arg5;
6658 #endif
6659 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6660 goto efault;
6661 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6662 unlock_user(p, arg2, 0);
6663 break;
6664 #endif
6665 #ifdef TARGET_NR_pread64
6666 case TARGET_NR_pread64:
6667 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6668 goto efault;
6669 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6670 unlock_user(p, arg2, ret);
6671 break;
6672 case TARGET_NR_pwrite64:
6673 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6674 goto efault;
6675 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6676 unlock_user(p, arg2, 0);
6677 break;
6678 #endif
6679 case TARGET_NR_getcwd:
6680 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6681 goto efault;
6682 ret = get_errno(sys_getcwd1(p, arg2));
6683 unlock_user(p, arg1, ret);
6684 break;
6685 case TARGET_NR_capget:
6686 goto unimplemented;
6687 case TARGET_NR_capset:
6688 goto unimplemented;
6689 case TARGET_NR_sigaltstack:
6690 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6691 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6692 defined(TARGET_M68K) || defined(TARGET_S390X)
6693 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6694 break;
6695 #else
6696 goto unimplemented;
6697 #endif
6698 case TARGET_NR_sendfile:
6699 goto unimplemented;
6700 #ifdef TARGET_NR_getpmsg
6701 case TARGET_NR_getpmsg:
6702 goto unimplemented;
6703 #endif
6704 #ifdef TARGET_NR_putpmsg
6705 case TARGET_NR_putpmsg:
6706 goto unimplemented;
6707 #endif
6708 #ifdef TARGET_NR_vfork
6709 case TARGET_NR_vfork:
6710 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6711 0, 0, 0, 0));
6712 break;
6713 #endif
6714 #ifdef TARGET_NR_ugetrlimit
6715 case TARGET_NR_ugetrlimit:
6717 struct rlimit rlim;
6718 ret = get_errno(getrlimit(arg1, &rlim));
6719 if (!is_error(ret)) {
6720 struct target_rlimit *target_rlim;
6721 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6722 goto efault;
6723 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6724 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6725 unlock_user_struct(target_rlim, arg2, 1);
6727 break;
6729 #endif
6730 #ifdef TARGET_NR_truncate64
6731 case TARGET_NR_truncate64:
6732 if (!(p = lock_user_string(arg1)))
6733 goto efault;
6734 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6735 unlock_user(p, arg1, 0);
6736 break;
6737 #endif
6738 #ifdef TARGET_NR_ftruncate64
6739 case TARGET_NR_ftruncate64:
6740 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6741 break;
6742 #endif
6743 #ifdef TARGET_NR_stat64
6744 case TARGET_NR_stat64:
6745 if (!(p = lock_user_string(arg1)))
6746 goto efault;
6747 ret = get_errno(stat(path(p), &st));
6748 unlock_user(p, arg1, 0);
6749 if (!is_error(ret))
6750 ret = host_to_target_stat64(cpu_env, arg2, &st);
6751 break;
6752 #endif
6753 #ifdef TARGET_NR_lstat64
6754 case TARGET_NR_lstat64:
6755 if (!(p = lock_user_string(arg1)))
6756 goto efault;
6757 ret = get_errno(lstat(path(p), &st));
6758 unlock_user(p, arg1, 0);
6759 if (!is_error(ret))
6760 ret = host_to_target_stat64(cpu_env, arg2, &st);
6761 break;
6762 #endif
6763 #ifdef TARGET_NR_fstat64
6764 case TARGET_NR_fstat64:
6765 ret = get_errno(fstat(arg1, &st));
6766 if (!is_error(ret))
6767 ret = host_to_target_stat64(cpu_env, arg2, &st);
6768 break;
6769 #endif
6770 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6771 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6772 #ifdef TARGET_NR_fstatat64
6773 case TARGET_NR_fstatat64:
6774 #endif
6775 #ifdef TARGET_NR_newfstatat
6776 case TARGET_NR_newfstatat:
6777 #endif
6778 if (!(p = lock_user_string(arg2)))
6779 goto efault;
6780 #ifdef __NR_fstatat64
6781 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6782 #else
6783 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6784 #endif
6785 if (!is_error(ret))
6786 ret = host_to_target_stat64(cpu_env, arg3, &st);
6787 break;
6788 #endif
6789 case TARGET_NR_lchown:
6790 if (!(p = lock_user_string(arg1)))
6791 goto efault;
6792 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6793 unlock_user(p, arg1, 0);
6794 break;
6795 #ifdef TARGET_NR_getuid
6796 case TARGET_NR_getuid:
6797 ret = get_errno(high2lowuid(getuid()));
6798 break;
6799 #endif
6800 #ifdef TARGET_NR_getgid
6801 case TARGET_NR_getgid:
6802 ret = get_errno(high2lowgid(getgid()));
6803 break;
6804 #endif
6805 #ifdef TARGET_NR_geteuid
6806 case TARGET_NR_geteuid:
6807 ret = get_errno(high2lowuid(geteuid()));
6808 break;
6809 #endif
6810 #ifdef TARGET_NR_getegid
6811 case TARGET_NR_getegid:
6812 ret = get_errno(high2lowgid(getegid()));
6813 break;
6814 #endif
6815 case TARGET_NR_setreuid:
6816 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6817 break;
6818 case TARGET_NR_setregid:
6819 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6820 break;
6821 case TARGET_NR_getgroups:
6823 int gidsetsize = arg1;
6824 target_id *target_grouplist;
6825 gid_t *grouplist;
6826 int i;
6828 grouplist = alloca(gidsetsize * sizeof(gid_t));
6829 ret = get_errno(getgroups(gidsetsize, grouplist));
6830 if (gidsetsize == 0)
6831 break;
6832 if (!is_error(ret)) {
6833 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6834 if (!target_grouplist)
6835 goto efault;
6836 for(i = 0;i < ret; i++)
6837 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
6838 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6841 break;
6842 case TARGET_NR_setgroups:
6844 int gidsetsize = arg1;
6845 target_id *target_grouplist;
6846 gid_t *grouplist;
6847 int i;
6849 grouplist = alloca(gidsetsize * sizeof(gid_t));
6850 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6851 if (!target_grouplist) {
6852 ret = -TARGET_EFAULT;
6853 goto fail;
6855 for(i = 0;i < gidsetsize; i++)
6856 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
6857 unlock_user(target_grouplist, arg2, 0);
6858 ret = get_errno(setgroups(gidsetsize, grouplist));
6860 break;
6861 case TARGET_NR_fchown:
6862 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6863 break;
6864 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6865 case TARGET_NR_fchownat:
6866 if (!(p = lock_user_string(arg2)))
6867 goto efault;
6868 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6869 unlock_user(p, arg2, 0);
6870 break;
6871 #endif
6872 #ifdef TARGET_NR_setresuid
6873 case TARGET_NR_setresuid:
6874 ret = get_errno(setresuid(low2highuid(arg1),
6875 low2highuid(arg2),
6876 low2highuid(arg3)));
6877 break;
6878 #endif
6879 #ifdef TARGET_NR_getresuid
6880 case TARGET_NR_getresuid:
6882 uid_t ruid, euid, suid;
6883 ret = get_errno(getresuid(&ruid, &euid, &suid));
6884 if (!is_error(ret)) {
6885 if (put_user_u16(high2lowuid(ruid), arg1)
6886 || put_user_u16(high2lowuid(euid), arg2)
6887 || put_user_u16(high2lowuid(suid), arg3))
6888 goto efault;
6891 break;
6892 #endif
6893 #ifdef TARGET_NR_getresgid
6894 case TARGET_NR_setresgid:
6895 ret = get_errno(setresgid(low2highgid(arg1),
6896 low2highgid(arg2),
6897 low2highgid(arg3)));
6898 break;
6899 #endif
6900 #ifdef TARGET_NR_getresgid
6901 case TARGET_NR_getresgid:
6903 gid_t rgid, egid, sgid;
6904 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6905 if (!is_error(ret)) {
6906 if (put_user_u16(high2lowgid(rgid), arg1)
6907 || put_user_u16(high2lowgid(egid), arg2)
6908 || put_user_u16(high2lowgid(sgid), arg3))
6909 goto efault;
6912 break;
6913 #endif
6914 case TARGET_NR_chown:
6915 if (!(p = lock_user_string(arg1)))
6916 goto efault;
6917 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6918 unlock_user(p, arg1, 0);
6919 break;
6920 case TARGET_NR_setuid:
6921 ret = get_errno(setuid(low2highuid(arg1)));
6922 break;
6923 case TARGET_NR_setgid:
6924 ret = get_errno(setgid(low2highgid(arg1)));
6925 break;
6926 case TARGET_NR_setfsuid:
6927 ret = get_errno(setfsuid(arg1));
6928 break;
6929 case TARGET_NR_setfsgid:
6930 ret = get_errno(setfsgid(arg1));
6931 break;
6933 #ifdef TARGET_NR_lchown32
6934 case TARGET_NR_lchown32:
6935 if (!(p = lock_user_string(arg1)))
6936 goto efault;
6937 ret = get_errno(lchown(p, arg2, arg3));
6938 unlock_user(p, arg1, 0);
6939 break;
6940 #endif
6941 #ifdef TARGET_NR_getuid32
6942 case TARGET_NR_getuid32:
6943 ret = get_errno(getuid());
6944 break;
6945 #endif
6947 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6948 /* Alpha specific */
6949 case TARGET_NR_getxuid:
6951 uid_t euid;
6952 euid=geteuid();
6953 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6955 ret = get_errno(getuid());
6956 break;
6957 #endif
6958 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6959 /* Alpha specific */
6960 case TARGET_NR_getxgid:
6962 uid_t egid;
6963 egid=getegid();
6964 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6966 ret = get_errno(getgid());
6967 break;
6968 #endif
6969 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6970 /* Alpha specific */
6971 case TARGET_NR_osf_getsysinfo:
6972 ret = -TARGET_EOPNOTSUPP;
6973 switch (arg1) {
6974 case TARGET_GSI_IEEE_FP_CONTROL:
6976 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
6978 /* Copied from linux ieee_fpcr_to_swcr. */
6979 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
6980 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
6981 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
6982 | SWCR_TRAP_ENABLE_DZE
6983 | SWCR_TRAP_ENABLE_OVF);
6984 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
6985 | SWCR_TRAP_ENABLE_INE);
6986 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
6987 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
6989 if (put_user_u64 (swcr, arg2))
6990 goto efault;
6991 ret = 0;
6993 break;
6995 /* case GSI_IEEE_STATE_AT_SIGNAL:
6996 -- Not implemented in linux kernel.
6997 case GSI_UACPROC:
6998 -- Retrieves current unaligned access state; not much used.
6999 case GSI_PROC_TYPE:
7000 -- Retrieves implver information; surely not used.
7001 case GSI_GET_HWRPB:
7002 -- Grabs a copy of the HWRPB; surely not used.
7005 break;
7006 #endif
7007 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7008 /* Alpha specific */
7009 case TARGET_NR_osf_setsysinfo:
7010 ret = -TARGET_EOPNOTSUPP;
7011 switch (arg1) {
7012 case TARGET_SSI_IEEE_FP_CONTROL:
7013 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7015 uint64_t swcr, fpcr, orig_fpcr;
7017 if (get_user_u64 (swcr, arg2))
7018 goto efault;
7019 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7020 fpcr = orig_fpcr & FPCR_DYN_MASK;
7022 /* Copied from linux ieee_swcr_to_fpcr. */
7023 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7024 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7025 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7026 | SWCR_TRAP_ENABLE_DZE
7027 | SWCR_TRAP_ENABLE_OVF)) << 48;
7028 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7029 | SWCR_TRAP_ENABLE_INE)) << 57;
7030 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7031 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7033 cpu_alpha_store_fpcr (cpu_env, fpcr);
7034 ret = 0;
7036 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7037 /* Old exceptions are not signaled. */
7038 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7040 /* If any exceptions set by this call, and are unmasked,
7041 send a signal. */
7042 /* ??? FIXME */
7045 break;
7047 /* case SSI_NVPAIRS:
7048 -- Used with SSIN_UACPROC to enable unaligned accesses.
7049 case SSI_IEEE_STATE_AT_SIGNAL:
7050 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7051 -- Not implemented in linux kernel
7054 break;
7055 #endif
7056 #ifdef TARGET_NR_osf_sigprocmask
7057 /* Alpha specific. */
7058 case TARGET_NR_osf_sigprocmask:
7060 abi_ulong mask;
7061 int how = arg1;
7062 sigset_t set, oldset;
7064 switch(arg1) {
7065 case TARGET_SIG_BLOCK:
7066 how = SIG_BLOCK;
7067 break;
7068 case TARGET_SIG_UNBLOCK:
7069 how = SIG_UNBLOCK;
7070 break;
7071 case TARGET_SIG_SETMASK:
7072 how = SIG_SETMASK;
7073 break;
7074 default:
7075 ret = -TARGET_EINVAL;
7076 goto fail;
7078 mask = arg2;
7079 target_to_host_old_sigset(&set, &mask);
7080 sigprocmask(arg1, &set, &oldset);
7081 host_to_target_old_sigset(&mask, &oldset);
7082 ret = mask;
7084 break;
7085 #endif
7087 #ifdef TARGET_NR_getgid32
7088 case TARGET_NR_getgid32:
7089 ret = get_errno(getgid());
7090 break;
7091 #endif
7092 #ifdef TARGET_NR_geteuid32
7093 case TARGET_NR_geteuid32:
7094 ret = get_errno(geteuid());
7095 break;
7096 #endif
7097 #ifdef TARGET_NR_getegid32
7098 case TARGET_NR_getegid32:
7099 ret = get_errno(getegid());
7100 break;
7101 #endif
7102 #ifdef TARGET_NR_setreuid32
7103 case TARGET_NR_setreuid32:
7104 ret = get_errno(setreuid(arg1, arg2));
7105 break;
7106 #endif
7107 #ifdef TARGET_NR_setregid32
7108 case TARGET_NR_setregid32:
7109 ret = get_errno(setregid(arg1, arg2));
7110 break;
7111 #endif
7112 #ifdef TARGET_NR_getgroups32
7113 case TARGET_NR_getgroups32:
7115 int gidsetsize = arg1;
7116 uint32_t *target_grouplist;
7117 gid_t *grouplist;
7118 int i;
7120 grouplist = alloca(gidsetsize * sizeof(gid_t));
7121 ret = get_errno(getgroups(gidsetsize, grouplist));
7122 if (gidsetsize == 0)
7123 break;
7124 if (!is_error(ret)) {
7125 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7126 if (!target_grouplist) {
7127 ret = -TARGET_EFAULT;
7128 goto fail;
7130 for(i = 0;i < ret; i++)
7131 target_grouplist[i] = tswap32(grouplist[i]);
7132 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7135 break;
7136 #endif
7137 #ifdef TARGET_NR_setgroups32
7138 case TARGET_NR_setgroups32:
7140 int gidsetsize = arg1;
7141 uint32_t *target_grouplist;
7142 gid_t *grouplist;
7143 int i;
7145 grouplist = alloca(gidsetsize * sizeof(gid_t));
7146 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7147 if (!target_grouplist) {
7148 ret = -TARGET_EFAULT;
7149 goto fail;
7151 for(i = 0;i < gidsetsize; i++)
7152 grouplist[i] = tswap32(target_grouplist[i]);
7153 unlock_user(target_grouplist, arg2, 0);
7154 ret = get_errno(setgroups(gidsetsize, grouplist));
7156 break;
7157 #endif
7158 #ifdef TARGET_NR_fchown32
7159 case TARGET_NR_fchown32:
7160 ret = get_errno(fchown(arg1, arg2, arg3));
7161 break;
7162 #endif
7163 #ifdef TARGET_NR_setresuid32
7164 case TARGET_NR_setresuid32:
7165 ret = get_errno(setresuid(arg1, arg2, arg3));
7166 break;
7167 #endif
7168 #ifdef TARGET_NR_getresuid32
7169 case TARGET_NR_getresuid32:
7171 uid_t ruid, euid, suid;
7172 ret = get_errno(getresuid(&ruid, &euid, &suid));
7173 if (!is_error(ret)) {
7174 if (put_user_u32(ruid, arg1)
7175 || put_user_u32(euid, arg2)
7176 || put_user_u32(suid, arg3))
7177 goto efault;
7180 break;
7181 #endif
7182 #ifdef TARGET_NR_setresgid32
7183 case TARGET_NR_setresgid32:
7184 ret = get_errno(setresgid(arg1, arg2, arg3));
7185 break;
7186 #endif
7187 #ifdef TARGET_NR_getresgid32
7188 case TARGET_NR_getresgid32:
7190 gid_t rgid, egid, sgid;
7191 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7192 if (!is_error(ret)) {
7193 if (put_user_u32(rgid, arg1)
7194 || put_user_u32(egid, arg2)
7195 || put_user_u32(sgid, arg3))
7196 goto efault;
7199 break;
7200 #endif
7201 #ifdef TARGET_NR_chown32
7202 case TARGET_NR_chown32:
7203 if (!(p = lock_user_string(arg1)))
7204 goto efault;
7205 ret = get_errno(chown(p, arg2, arg3));
7206 unlock_user(p, arg1, 0);
7207 break;
7208 #endif
7209 #ifdef TARGET_NR_setuid32
7210 case TARGET_NR_setuid32:
7211 ret = get_errno(setuid(arg1));
7212 break;
7213 #endif
7214 #ifdef TARGET_NR_setgid32
7215 case TARGET_NR_setgid32:
7216 ret = get_errno(setgid(arg1));
7217 break;
7218 #endif
7219 #ifdef TARGET_NR_setfsuid32
7220 case TARGET_NR_setfsuid32:
7221 ret = get_errno(setfsuid(arg1));
7222 break;
7223 #endif
7224 #ifdef TARGET_NR_setfsgid32
7225 case TARGET_NR_setfsgid32:
7226 ret = get_errno(setfsgid(arg1));
7227 break;
7228 #endif
7230 case TARGET_NR_pivot_root:
7231 goto unimplemented;
7232 #ifdef TARGET_NR_mincore
7233 case TARGET_NR_mincore:
7235 void *a;
7236 ret = -TARGET_EFAULT;
7237 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7238 goto efault;
7239 if (!(p = lock_user_string(arg3)))
7240 goto mincore_fail;
7241 ret = get_errno(mincore(a, arg2, p));
7242 unlock_user(p, arg3, ret);
7243 mincore_fail:
7244 unlock_user(a, arg1, 0);
7246 break;
7247 #endif
7248 #ifdef TARGET_NR_arm_fadvise64_64
7249 case TARGET_NR_arm_fadvise64_64:
7252 * arm_fadvise64_64 looks like fadvise64_64 but
7253 * with different argument order
7255 abi_long temp;
7256 temp = arg3;
7257 arg3 = arg4;
7258 arg4 = temp;
7260 #endif
7261 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7262 #ifdef TARGET_NR_fadvise64_64
7263 case TARGET_NR_fadvise64_64:
7264 #endif
7265 #ifdef TARGET_NR_fadvise64
7266 case TARGET_NR_fadvise64:
7267 #endif
7268 #ifdef TARGET_S390X
7269 switch (arg4) {
7270 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7271 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7272 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7273 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7274 default: break;
7276 #endif
7277 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7278 break;
7279 #endif
7280 #ifdef TARGET_NR_madvise
7281 case TARGET_NR_madvise:
7282 /* A straight passthrough may not be safe because qemu sometimes
7283 turns private flie-backed mappings into anonymous mappings.
7284 This will break MADV_DONTNEED.
7285 This is a hint, so ignoring and returning success is ok. */
7286 ret = get_errno(0);
7287 break;
7288 #endif
7289 #if TARGET_ABI_BITS == 32
7290 case TARGET_NR_fcntl64:
7292 int cmd;
7293 struct flock64 fl;
7294 struct target_flock64 *target_fl;
7295 #ifdef TARGET_ARM
7296 struct target_eabi_flock64 *target_efl;
7297 #endif
7299 cmd = target_to_host_fcntl_cmd(arg2);
7300 if (cmd == -TARGET_EINVAL)
7301 return cmd;
7303 switch(arg2) {
7304 case TARGET_F_GETLK64:
7305 #ifdef TARGET_ARM
7306 if (((CPUARMState *)cpu_env)->eabi) {
7307 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7308 goto efault;
7309 fl.l_type = tswap16(target_efl->l_type);
7310 fl.l_whence = tswap16(target_efl->l_whence);
7311 fl.l_start = tswap64(target_efl->l_start);
7312 fl.l_len = tswap64(target_efl->l_len);
7313 fl.l_pid = tswap32(target_efl->l_pid);
7314 unlock_user_struct(target_efl, arg3, 0);
7315 } else
7316 #endif
7318 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7319 goto efault;
7320 fl.l_type = tswap16(target_fl->l_type);
7321 fl.l_whence = tswap16(target_fl->l_whence);
7322 fl.l_start = tswap64(target_fl->l_start);
7323 fl.l_len = tswap64(target_fl->l_len);
7324 fl.l_pid = tswap32(target_fl->l_pid);
7325 unlock_user_struct(target_fl, arg3, 0);
7327 ret = get_errno(fcntl(arg1, cmd, &fl));
7328 if (ret == 0) {
7329 #ifdef TARGET_ARM
7330 if (((CPUARMState *)cpu_env)->eabi) {
7331 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7332 goto efault;
7333 target_efl->l_type = tswap16(fl.l_type);
7334 target_efl->l_whence = tswap16(fl.l_whence);
7335 target_efl->l_start = tswap64(fl.l_start);
7336 target_efl->l_len = tswap64(fl.l_len);
7337 target_efl->l_pid = tswap32(fl.l_pid);
7338 unlock_user_struct(target_efl, arg3, 1);
7339 } else
7340 #endif
7342 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7343 goto efault;
7344 target_fl->l_type = tswap16(fl.l_type);
7345 target_fl->l_whence = tswap16(fl.l_whence);
7346 target_fl->l_start = tswap64(fl.l_start);
7347 target_fl->l_len = tswap64(fl.l_len);
7348 target_fl->l_pid = tswap32(fl.l_pid);
7349 unlock_user_struct(target_fl, arg3, 1);
7352 break;
7354 case TARGET_F_SETLK64:
7355 case TARGET_F_SETLKW64:
7356 #ifdef TARGET_ARM
7357 if (((CPUARMState *)cpu_env)->eabi) {
7358 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7359 goto efault;
7360 fl.l_type = tswap16(target_efl->l_type);
7361 fl.l_whence = tswap16(target_efl->l_whence);
7362 fl.l_start = tswap64(target_efl->l_start);
7363 fl.l_len = tswap64(target_efl->l_len);
7364 fl.l_pid = tswap32(target_efl->l_pid);
7365 unlock_user_struct(target_efl, arg3, 0);
7366 } else
7367 #endif
7369 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7370 goto efault;
7371 fl.l_type = tswap16(target_fl->l_type);
7372 fl.l_whence = tswap16(target_fl->l_whence);
7373 fl.l_start = tswap64(target_fl->l_start);
7374 fl.l_len = tswap64(target_fl->l_len);
7375 fl.l_pid = tswap32(target_fl->l_pid);
7376 unlock_user_struct(target_fl, arg3, 0);
7378 ret = get_errno(fcntl(arg1, cmd, &fl));
7379 break;
7380 default:
7381 ret = do_fcntl(arg1, arg2, arg3);
7382 break;
7384 break;
7386 #endif
7387 #ifdef TARGET_NR_cacheflush
7388 case TARGET_NR_cacheflush:
7389 /* self-modifying code is handled automatically, so nothing needed */
7390 ret = 0;
7391 break;
7392 #endif
7393 #ifdef TARGET_NR_security
7394 case TARGET_NR_security:
7395 goto unimplemented;
7396 #endif
7397 #ifdef TARGET_NR_getpagesize
7398 case TARGET_NR_getpagesize:
7399 ret = TARGET_PAGE_SIZE;
7400 break;
7401 #endif
7402 case TARGET_NR_gettid:
7403 ret = get_errno(gettid());
7404 break;
7405 #ifdef TARGET_NR_readahead
7406 case TARGET_NR_readahead:
7407 #if TARGET_ABI_BITS == 32
7408 #ifdef TARGET_ARM
7409 if (((CPUARMState *)cpu_env)->eabi)
7411 arg2 = arg3;
7412 arg3 = arg4;
7413 arg4 = arg5;
7415 #endif
7416 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7417 #else
7418 ret = get_errno(readahead(arg1, arg2, arg3));
7419 #endif
7420 break;
7421 #endif
7422 #ifdef TARGET_NR_setxattr
7423 case TARGET_NR_setxattr:
7424 case TARGET_NR_lsetxattr:
7425 case TARGET_NR_fsetxattr:
7426 case TARGET_NR_getxattr:
7427 case TARGET_NR_lgetxattr:
7428 case TARGET_NR_fgetxattr:
7429 case TARGET_NR_listxattr:
7430 case TARGET_NR_llistxattr:
7431 case TARGET_NR_flistxattr:
7432 case TARGET_NR_removexattr:
7433 case TARGET_NR_lremovexattr:
7434 case TARGET_NR_fremovexattr:
7435 ret = -TARGET_EOPNOTSUPP;
7436 break;
7437 #endif
7438 #ifdef TARGET_NR_set_thread_area
7439 case TARGET_NR_set_thread_area:
7440 #if defined(TARGET_MIPS)
7441 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7442 ret = 0;
7443 break;
7444 #elif defined(TARGET_CRIS)
7445 if (arg1 & 0xff)
7446 ret = -TARGET_EINVAL;
7447 else {
7448 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7449 ret = 0;
7451 break;
7452 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7453 ret = do_set_thread_area(cpu_env, arg1);
7454 break;
7455 #else
7456 goto unimplemented_nowarn;
7457 #endif
7458 #endif
7459 #ifdef TARGET_NR_get_thread_area
7460 case TARGET_NR_get_thread_area:
7461 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7462 ret = do_get_thread_area(cpu_env, arg1);
7463 #else
7464 goto unimplemented_nowarn;
7465 #endif
7466 #endif
7467 #ifdef TARGET_NR_getdomainname
7468 case TARGET_NR_getdomainname:
7469 goto unimplemented_nowarn;
7470 #endif
7472 #ifdef TARGET_NR_clock_gettime
7473 case TARGET_NR_clock_gettime:
7475 struct timespec ts;
7476 ret = get_errno(clock_gettime(arg1, &ts));
7477 if (!is_error(ret)) {
7478 host_to_target_timespec(arg2, &ts);
7480 break;
7482 #endif
7483 #ifdef TARGET_NR_clock_getres
7484 case TARGET_NR_clock_getres:
7486 struct timespec ts;
7487 ret = get_errno(clock_getres(arg1, &ts));
7488 if (!is_error(ret)) {
7489 host_to_target_timespec(arg2, &ts);
7491 break;
7493 #endif
7494 #ifdef TARGET_NR_clock_nanosleep
7495 case TARGET_NR_clock_nanosleep:
7497 struct timespec ts;
7498 target_to_host_timespec(&ts, arg3);
7499 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7500 if (arg4)
7501 host_to_target_timespec(arg4, &ts);
7502 break;
7504 #endif
7506 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7507 case TARGET_NR_set_tid_address:
7508 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7509 break;
7510 #endif
7512 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7513 case TARGET_NR_tkill:
7514 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7515 break;
7516 #endif
7518 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7519 case TARGET_NR_tgkill:
7520 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7521 target_to_host_signal(arg3)));
7522 break;
7523 #endif
7525 #ifdef TARGET_NR_set_robust_list
7526 case TARGET_NR_set_robust_list:
7527 goto unimplemented_nowarn;
7528 #endif
7530 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7531 case TARGET_NR_utimensat:
7533 struct timespec *tsp, ts[2];
7534 if (!arg3) {
7535 tsp = NULL;
7536 } else {
7537 target_to_host_timespec(ts, arg3);
7538 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7539 tsp = ts;
7541 if (!arg2)
7542 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7543 else {
7544 if (!(p = lock_user_string(arg2))) {
7545 ret = -TARGET_EFAULT;
7546 goto fail;
7548 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7549 unlock_user(p, arg2, 0);
7552 break;
7553 #endif
7554 #if defined(CONFIG_USE_NPTL)
7555 case TARGET_NR_futex:
7556 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7557 break;
7558 #endif
7559 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7560 case TARGET_NR_inotify_init:
7561 ret = get_errno(sys_inotify_init());
7562 break;
7563 #endif
7564 #ifdef CONFIG_INOTIFY1
7565 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7566 case TARGET_NR_inotify_init1:
7567 ret = get_errno(sys_inotify_init1(arg1));
7568 break;
7569 #endif
7570 #endif
7571 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7572 case TARGET_NR_inotify_add_watch:
7573 p = lock_user_string(arg2);
7574 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7575 unlock_user(p, arg2, 0);
7576 break;
7577 #endif
7578 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7579 case TARGET_NR_inotify_rm_watch:
7580 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7581 break;
7582 #endif
7584 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7585 case TARGET_NR_mq_open:
7587 struct mq_attr posix_mq_attr;
7589 p = lock_user_string(arg1 - 1);
7590 if (arg4 != 0)
7591 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7592 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7593 unlock_user (p, arg1, 0);
7595 break;
7597 case TARGET_NR_mq_unlink:
7598 p = lock_user_string(arg1 - 1);
7599 ret = get_errno(mq_unlink(p));
7600 unlock_user (p, arg1, 0);
7601 break;
7603 case TARGET_NR_mq_timedsend:
7605 struct timespec ts;
7607 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7608 if (arg5 != 0) {
7609 target_to_host_timespec(&ts, arg5);
7610 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7611 host_to_target_timespec(arg5, &ts);
7613 else
7614 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7615 unlock_user (p, arg2, arg3);
7617 break;
7619 case TARGET_NR_mq_timedreceive:
7621 struct timespec ts;
7622 unsigned int prio;
7624 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7625 if (arg5 != 0) {
7626 target_to_host_timespec(&ts, arg5);
7627 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7628 host_to_target_timespec(arg5, &ts);
7630 else
7631 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7632 unlock_user (p, arg2, arg3);
7633 if (arg4 != 0)
7634 put_user_u32(prio, arg4);
7636 break;
7638 /* Not implemented for now... */
7639 /* case TARGET_NR_mq_notify: */
7640 /* break; */
7642 case TARGET_NR_mq_getsetattr:
7644 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7645 ret = 0;
7646 if (arg3 != 0) {
7647 ret = mq_getattr(arg1, &posix_mq_attr_out);
7648 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7650 if (arg2 != 0) {
7651 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7652 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7656 break;
7657 #endif
7659 #ifdef CONFIG_SPLICE
7660 #ifdef TARGET_NR_tee
7661 case TARGET_NR_tee:
7663 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7665 break;
7666 #endif
7667 #ifdef TARGET_NR_splice
7668 case TARGET_NR_splice:
7670 loff_t loff_in, loff_out;
7671 loff_t *ploff_in = NULL, *ploff_out = NULL;
7672 if(arg2) {
7673 get_user_u64(loff_in, arg2);
7674 ploff_in = &loff_in;
7676 if(arg4) {
7677 get_user_u64(loff_out, arg2);
7678 ploff_out = &loff_out;
7680 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7682 break;
7683 #endif
7684 #ifdef TARGET_NR_vmsplice
7685 case TARGET_NR_vmsplice:
7687 int count = arg3;
7688 struct iovec *vec;
7690 vec = alloca(count * sizeof(struct iovec));
7691 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7692 goto efault;
7693 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7694 unlock_iovec(vec, arg2, count, 0);
7696 break;
7697 #endif
7698 #endif /* CONFIG_SPLICE */
7699 #ifdef CONFIG_EVENTFD
7700 #if defined(TARGET_NR_eventfd)
7701 case TARGET_NR_eventfd:
7702 ret = get_errno(eventfd(arg1, 0));
7703 break;
7704 #endif
7705 #if defined(TARGET_NR_eventfd2)
7706 case TARGET_NR_eventfd2:
7707 ret = get_errno(eventfd(arg1, arg2));
7708 break;
7709 #endif
7710 #endif /* CONFIG_EVENTFD */
7711 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7712 case TARGET_NR_fallocate:
7713 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7714 break;
7715 #endif
7716 #if defined(CONFIG_SYNC_FILE_RANGE)
7717 #if defined(TARGET_NR_sync_file_range)
7718 case TARGET_NR_sync_file_range:
7719 #if TARGET_ABI_BITS == 32
7720 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7721 target_offset64(arg4, arg5), arg6));
7722 #else
7723 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7724 #endif
7725 break;
7726 #endif
7727 #if defined(TARGET_NR_sync_file_range2)
7728 case TARGET_NR_sync_file_range2:
7729 /* This is like sync_file_range but the arguments are reordered */
7730 #if TARGET_ABI_BITS == 32
7731 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7732 target_offset64(arg5, arg6), arg2));
7733 #else
7734 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7735 #endif
7736 break;
7737 #endif
7738 #endif
7739 #if defined(CONFIG_EPOLL)
7740 #if defined(TARGET_NR_epoll_create)
7741 case TARGET_NR_epoll_create:
7742 ret = get_errno(epoll_create(arg1));
7743 break;
7744 #endif
7745 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7746 case TARGET_NR_epoll_create1:
7747 ret = get_errno(epoll_create1(arg1));
7748 break;
7749 #endif
7750 #if defined(TARGET_NR_epoll_ctl)
7751 case TARGET_NR_epoll_ctl:
7753 struct epoll_event ep;
7754 struct epoll_event *epp = 0;
7755 if (arg4) {
7756 struct target_epoll_event *target_ep;
7757 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7758 goto efault;
7760 ep.events = tswap32(target_ep->events);
7761 /* The epoll_data_t union is just opaque data to the kernel,
7762 * so we transfer all 64 bits across and need not worry what
7763 * actual data type it is.
7765 ep.data.u64 = tswap64(target_ep->data.u64);
7766 unlock_user_struct(target_ep, arg4, 0);
7767 epp = &ep;
7769 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7770 break;
7772 #endif
7774 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7775 #define IMPLEMENT_EPOLL_PWAIT
7776 #endif
7777 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7778 #if defined(TARGET_NR_epoll_wait)
7779 case TARGET_NR_epoll_wait:
7780 #endif
7781 #if defined(IMPLEMENT_EPOLL_PWAIT)
7782 case TARGET_NR_epoll_pwait:
7783 #endif
7785 struct target_epoll_event *target_ep;
7786 struct epoll_event *ep;
7787 int epfd = arg1;
7788 int maxevents = arg3;
7789 int timeout = arg4;
7791 target_ep = lock_user(VERIFY_WRITE, arg2,
7792 maxevents * sizeof(struct target_epoll_event), 1);
7793 if (!target_ep) {
7794 goto efault;
7797 ep = alloca(maxevents * sizeof(struct epoll_event));
7799 switch (num) {
7800 #if defined(IMPLEMENT_EPOLL_PWAIT)
7801 case TARGET_NR_epoll_pwait:
7803 target_sigset_t *target_set;
7804 sigset_t _set, *set = &_set;
7806 if (arg5) {
7807 target_set = lock_user(VERIFY_READ, arg5,
7808 sizeof(target_sigset_t), 1);
7809 if (!target_set) {
7810 unlock_user(target_ep, arg2, 0);
7811 goto efault;
7813 target_to_host_sigset(set, target_set);
7814 unlock_user(target_set, arg5, 0);
7815 } else {
7816 set = NULL;
7819 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
7820 break;
7822 #endif
7823 #if defined(TARGET_NR_epoll_wait)
7824 case TARGET_NR_epoll_wait:
7825 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
7826 break;
7827 #endif
7828 default:
7829 ret = -TARGET_ENOSYS;
7831 if (!is_error(ret)) {
7832 int i;
7833 for (i = 0; i < ret; i++) {
7834 target_ep[i].events = tswap32(ep[i].events);
7835 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
7838 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
7839 break;
7841 #endif
7842 #endif
7843 default:
7844 unimplemented:
7845 gemu_log("qemu: Unsupported syscall: %d\n", num);
7846 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7847 unimplemented_nowarn:
7848 #endif
7849 ret = -TARGET_ENOSYS;
7850 break;
7852 fail:
7853 #ifdef DEBUG
7854 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
7855 #endif
7856 if(do_strace)
7857 print_syscall_ret(num, ret);
7858 return ret;
7859 efault:
7860 ret = -TARGET_EFAULT;
7861 goto fail;