qemu-img: allow rebase to a NULL backing file when unsafe
[qemu.git] / linux-user / syscall.c
blobe969d1b61d5db94c1a1a6a6504a64d891040bfc8
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #include <linux/vt.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
98 #include "qemu.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
104 #else
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
107 #endif
109 //#define DEBUG
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
116 #undef _syscall0
117 #undef _syscall1
118 #undef _syscall2
119 #undef _syscall3
120 #undef _syscall4
121 #undef _syscall5
122 #undef _syscall6
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
155 type5,arg5) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
165 type6 arg6) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 defined(__s390x__)
202 #define __NR__llseek __NR_lseek
203 #endif
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
213 #endif
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 _syscall2(int, sys_getpriority, int, which, int, who);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
227 #endif
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
230 #endif
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
241 #endif
242 #endif
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
250 static bitmask_transtbl fcntl_flags_tbl[] = {
251 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
252 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
253 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
254 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
255 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
256 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
257 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
258 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
259 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
260 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
261 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
262 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
263 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
266 #endif
267 { 0, 0, 0, 0 }
270 #define COPY_UTSNAME_FIELD(dest, src) \
271 do { \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
275 } while (0)
277 static int sys_uname(struct new_utsname *buf)
279 struct utsname uts_buf;
281 if (uname(&uts_buf) < 0)
282 return (-1);
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 bzero(buf, sizeof (*buf));
291 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
292 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
293 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
294 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
295 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
296 #ifdef _GNU_SOURCE
297 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
298 #endif
299 return (0);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf, size_t size)
306 if (getcwd(buf, size) == NULL) {
307 /* getcwd() sets errno */
308 return (-1);
310 return strlen(buf)+1;
313 #ifdef CONFIG_ATFILE
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd, const char *pathname, int mode)
322 return (faccessat(dirfd, pathname, mode, 0));
324 #endif
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
328 return (fchmodat(dirfd, pathname, mode, 0));
330 #endif
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
333 gid_t group, int flags)
335 return (fchownat(dirfd, pathname, owner, group, flags));
337 #endif
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
340 int flags)
342 return (fstatat(dirfd, pathname, buf, flags));
344 #endif
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
347 int flags)
349 return (fstatat(dirfd, pathname, buf, flags));
351 #endif
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd, const char *pathname,
354 const struct timeval times[2])
356 return (futimesat(dirfd, pathname, times));
358 #endif
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd, const char *oldpath,
361 int newdirfd, const char *newpath, int flags)
363 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
365 #endif
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
369 return (mkdirat(dirfd, pathname, mode));
371 #endif
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
374 dev_t dev)
376 return (mknodat(dirfd, pathname, mode, dev));
378 #endif
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
383 * open(2) has extra parameter 'mode' when called with
384 * flag O_CREAT.
386 if ((flags & O_CREAT) != 0) {
387 va_list ap;
388 mode_t mode;
391 * Get the 'mode' parameter and translate it to
392 * host bits.
394 va_start(ap, flags);
395 mode = va_arg(ap, mode_t);
396 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
397 va_end(ap);
399 return (openat(dirfd, pathname, flags, mode));
401 return (openat(dirfd, pathname, flags));
403 #endif
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
407 return (readlinkat(dirfd, pathname, buf, bufsiz));
409 #endif
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd, const char *oldpath,
412 int newdirfd, const char *newpath)
414 return (renameat(olddirfd, oldpath, newdirfd, newpath));
416 #endif
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
420 return (symlinkat(oldpath, newdirfd, newpath));
422 #endif
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
426 return (unlinkat(dirfd, pathname, flags));
428 #endif
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
436 #endif
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
439 #endif
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
442 uid_t,owner,gid_t,group,int,flags)
443 #endif
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
447 struct stat *,buf,int,flags)
448 #endif
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
451 const struct timeval *,times)
452 #endif
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
456 struct stat *,buf,int,flags)
457 #endif
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
460 int,newdirfd,const char *,newpath,int,flags)
461 #endif
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
464 #endif
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
467 mode_t,mode,dev_t,dev)
468 #endif
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
471 #endif
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
474 char *,buf,size_t,bufsize)
475 #endif
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
478 int,newdirfd,const char *,newpath)
479 #endif
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat,const char *,oldpath,
482 int,newdirfd,const char *,newpath)
483 #endif
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
486 #endif
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd, const char *pathname,
492 const struct timespec times[2], int flags)
494 if (pathname == NULL)
495 return futimens(dirfd, times);
496 else
497 return utimensat(dirfd, pathname, times, flags);
499 #else
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
502 const struct timespec *,tsp,int,flags)
503 #endif
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
514 #endif
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
518 return (inotify_add_watch(fd, pathname, mask));
520 #endif
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd, int32_t wd)
524 return (inotify_rm_watch(fd, wd));
526 #endif
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags)
531 return (inotify_init1(flags));
533 #endif
534 #endif
535 #else
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
544 #ifndef __NR_ppoll
545 # define __NR_ppoll -1
546 #endif
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
549 struct timespec *, timeout, const __sigset_t *, sigmask,
550 size_t, sigsetsize)
551 #endif
553 extern int personality(int);
554 extern int flock(int, int);
555 extern int setfsuid(int);
556 extern int setfsgid(int);
557 extern int setgroups(int, gid_t *);
559 #define ERRNO_TABLE_SIZE 1200
561 /* target_to_host_errno_table[] is initialized from
562 * host_to_target_errno_table[] in syscall_init(). */
563 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
567 * This list is the union of errno values overridden in asm-<arch>/errno.h
568 * minus the errnos that are not actually generic to all archs.
570 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
571 [EIDRM] = TARGET_EIDRM,
572 [ECHRNG] = TARGET_ECHRNG,
573 [EL2NSYNC] = TARGET_EL2NSYNC,
574 [EL3HLT] = TARGET_EL3HLT,
575 [EL3RST] = TARGET_EL3RST,
576 [ELNRNG] = TARGET_ELNRNG,
577 [EUNATCH] = TARGET_EUNATCH,
578 [ENOCSI] = TARGET_ENOCSI,
579 [EL2HLT] = TARGET_EL2HLT,
580 [EDEADLK] = TARGET_EDEADLK,
581 [ENOLCK] = TARGET_ENOLCK,
582 [EBADE] = TARGET_EBADE,
583 [EBADR] = TARGET_EBADR,
584 [EXFULL] = TARGET_EXFULL,
585 [ENOANO] = TARGET_ENOANO,
586 [EBADRQC] = TARGET_EBADRQC,
587 [EBADSLT] = TARGET_EBADSLT,
588 [EBFONT] = TARGET_EBFONT,
589 [ENOSTR] = TARGET_ENOSTR,
590 [ENODATA] = TARGET_ENODATA,
591 [ETIME] = TARGET_ETIME,
592 [ENOSR] = TARGET_ENOSR,
593 [ENONET] = TARGET_ENONET,
594 [ENOPKG] = TARGET_ENOPKG,
595 [EREMOTE] = TARGET_EREMOTE,
596 [ENOLINK] = TARGET_ENOLINK,
597 [EADV] = TARGET_EADV,
598 [ESRMNT] = TARGET_ESRMNT,
599 [ECOMM] = TARGET_ECOMM,
600 [EPROTO] = TARGET_EPROTO,
601 [EDOTDOT] = TARGET_EDOTDOT,
602 [EMULTIHOP] = TARGET_EMULTIHOP,
603 [EBADMSG] = TARGET_EBADMSG,
604 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
605 [EOVERFLOW] = TARGET_EOVERFLOW,
606 [ENOTUNIQ] = TARGET_ENOTUNIQ,
607 [EBADFD] = TARGET_EBADFD,
608 [EREMCHG] = TARGET_EREMCHG,
609 [ELIBACC] = TARGET_ELIBACC,
610 [ELIBBAD] = TARGET_ELIBBAD,
611 [ELIBSCN] = TARGET_ELIBSCN,
612 [ELIBMAX] = TARGET_ELIBMAX,
613 [ELIBEXEC] = TARGET_ELIBEXEC,
614 [EILSEQ] = TARGET_EILSEQ,
615 [ENOSYS] = TARGET_ENOSYS,
616 [ELOOP] = TARGET_ELOOP,
617 [ERESTART] = TARGET_ERESTART,
618 [ESTRPIPE] = TARGET_ESTRPIPE,
619 [ENOTEMPTY] = TARGET_ENOTEMPTY,
620 [EUSERS] = TARGET_EUSERS,
621 [ENOTSOCK] = TARGET_ENOTSOCK,
622 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
623 [EMSGSIZE] = TARGET_EMSGSIZE,
624 [EPROTOTYPE] = TARGET_EPROTOTYPE,
625 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
626 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
627 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
628 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
629 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
630 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
631 [EADDRINUSE] = TARGET_EADDRINUSE,
632 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
633 [ENETDOWN] = TARGET_ENETDOWN,
634 [ENETUNREACH] = TARGET_ENETUNREACH,
635 [ENETRESET] = TARGET_ENETRESET,
636 [ECONNABORTED] = TARGET_ECONNABORTED,
637 [ECONNRESET] = TARGET_ECONNRESET,
638 [ENOBUFS] = TARGET_ENOBUFS,
639 [EISCONN] = TARGET_EISCONN,
640 [ENOTCONN] = TARGET_ENOTCONN,
641 [EUCLEAN] = TARGET_EUCLEAN,
642 [ENOTNAM] = TARGET_ENOTNAM,
643 [ENAVAIL] = TARGET_ENAVAIL,
644 [EISNAM] = TARGET_EISNAM,
645 [EREMOTEIO] = TARGET_EREMOTEIO,
646 [ESHUTDOWN] = TARGET_ESHUTDOWN,
647 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
648 [ETIMEDOUT] = TARGET_ETIMEDOUT,
649 [ECONNREFUSED] = TARGET_ECONNREFUSED,
650 [EHOSTDOWN] = TARGET_EHOSTDOWN,
651 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
652 [EALREADY] = TARGET_EALREADY,
653 [EINPROGRESS] = TARGET_EINPROGRESS,
654 [ESTALE] = TARGET_ESTALE,
655 [ECANCELED] = TARGET_ECANCELED,
656 [ENOMEDIUM] = TARGET_ENOMEDIUM,
657 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
658 #ifdef ENOKEY
659 [ENOKEY] = TARGET_ENOKEY,
660 #endif
661 #ifdef EKEYEXPIRED
662 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
663 #endif
664 #ifdef EKEYREVOKED
665 [EKEYREVOKED] = TARGET_EKEYREVOKED,
666 #endif
667 #ifdef EKEYREJECTED
668 [EKEYREJECTED] = TARGET_EKEYREJECTED,
669 #endif
670 #ifdef EOWNERDEAD
671 [EOWNERDEAD] = TARGET_EOWNERDEAD,
672 #endif
673 #ifdef ENOTRECOVERABLE
674 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
675 #endif
678 static inline int host_to_target_errno(int err)
680 if(host_to_target_errno_table[err])
681 return host_to_target_errno_table[err];
682 return err;
685 static inline int target_to_host_errno(int err)
687 if (target_to_host_errno_table[err])
688 return target_to_host_errno_table[err];
689 return err;
692 static inline abi_long get_errno(abi_long ret)
694 if (ret == -1)
695 return -host_to_target_errno(errno);
696 else
697 return ret;
700 static inline int is_error(abi_long ret)
702 return (abi_ulong)ret >= (abi_ulong)(-4096);
705 char *target_strerror(int err)
707 return strerror(target_to_host_errno(err));
710 static abi_ulong target_brk;
711 static abi_ulong target_original_brk;
713 void target_set_brk(abi_ulong new_brk)
715 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
718 /* do_brk() must return target values and target errnos. */
719 abi_long do_brk(abi_ulong new_brk)
721 abi_ulong brk_page;
722 abi_long mapped_addr;
723 int new_alloc_size;
725 if (!new_brk)
726 return target_brk;
727 if (new_brk < target_original_brk)
728 return target_brk;
730 brk_page = HOST_PAGE_ALIGN(target_brk);
732 /* If the new brk is less than this, set it and we're done... */
733 if (new_brk < brk_page) {
734 target_brk = new_brk;
735 return target_brk;
738 /* We need to allocate more memory after the brk... */
739 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
740 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
741 PROT_READ|PROT_WRITE,
742 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
744 #if defined(TARGET_ALPHA)
745 /* We (partially) emulate OSF/1 on Alpha, which requires we
746 return a proper errno, not an unchanged brk value. */
747 if (is_error(mapped_addr)) {
748 return -TARGET_ENOMEM;
750 #endif
752 if (!is_error(mapped_addr)) {
753 target_brk = new_brk;
755 return target_brk;
758 static inline abi_long copy_from_user_fdset(fd_set *fds,
759 abi_ulong target_fds_addr,
760 int n)
762 int i, nw, j, k;
763 abi_ulong b, *target_fds;
765 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
766 if (!(target_fds = lock_user(VERIFY_READ,
767 target_fds_addr,
768 sizeof(abi_ulong) * nw,
769 1)))
770 return -TARGET_EFAULT;
772 FD_ZERO(fds);
773 k = 0;
774 for (i = 0; i < nw; i++) {
775 /* grab the abi_ulong */
776 __get_user(b, &target_fds[i]);
777 for (j = 0; j < TARGET_ABI_BITS; j++) {
778 /* check the bit inside the abi_ulong */
779 if ((b >> j) & 1)
780 FD_SET(k, fds);
781 k++;
785 unlock_user(target_fds, target_fds_addr, 0);
787 return 0;
790 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
791 const fd_set *fds,
792 int n)
794 int i, nw, j, k;
795 abi_long v;
796 abi_ulong *target_fds;
798 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
799 if (!(target_fds = lock_user(VERIFY_WRITE,
800 target_fds_addr,
801 sizeof(abi_ulong) * nw,
802 0)))
803 return -TARGET_EFAULT;
805 k = 0;
806 for (i = 0; i < nw; i++) {
807 v = 0;
808 for (j = 0; j < TARGET_ABI_BITS; j++) {
809 v |= ((FD_ISSET(k, fds) != 0) << j);
810 k++;
812 __put_user(v, &target_fds[i]);
815 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
817 return 0;
820 #if defined(__alpha__)
821 #define HOST_HZ 1024
822 #else
823 #define HOST_HZ 100
824 #endif
826 static inline abi_long host_to_target_clock_t(long ticks)
828 #if HOST_HZ == TARGET_HZ
829 return ticks;
830 #else
831 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
832 #endif
835 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
836 const struct rusage *rusage)
838 struct target_rusage *target_rusage;
840 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
841 return -TARGET_EFAULT;
842 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
843 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
844 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
845 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
846 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
847 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
848 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
849 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
850 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
851 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
852 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
853 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
854 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
855 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
856 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
857 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
858 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
859 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
860 unlock_user_struct(target_rusage, target_addr, 1);
862 return 0;
865 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
867 if (target_rlim == TARGET_RLIM_INFINITY)
868 return RLIM_INFINITY;
869 else
870 return tswapl(target_rlim);
873 static inline target_ulong host_to_target_rlim(rlim_t rlim)
875 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
876 return TARGET_RLIM_INFINITY;
877 else
878 return tswapl(rlim);
881 static inline abi_long copy_from_user_timeval(struct timeval *tv,
882 abi_ulong target_tv_addr)
884 struct target_timeval *target_tv;
886 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
887 return -TARGET_EFAULT;
889 __get_user(tv->tv_sec, &target_tv->tv_sec);
890 __get_user(tv->tv_usec, &target_tv->tv_usec);
892 unlock_user_struct(target_tv, target_tv_addr, 0);
894 return 0;
897 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
898 const struct timeval *tv)
900 struct target_timeval *target_tv;
902 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
903 return -TARGET_EFAULT;
905 __put_user(tv->tv_sec, &target_tv->tv_sec);
906 __put_user(tv->tv_usec, &target_tv->tv_usec);
908 unlock_user_struct(target_tv, target_tv_addr, 1);
910 return 0;
913 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
914 #include <mqueue.h>
916 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
917 abi_ulong target_mq_attr_addr)
919 struct target_mq_attr *target_mq_attr;
921 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
922 target_mq_attr_addr, 1))
923 return -TARGET_EFAULT;
925 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
926 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
927 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
928 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
930 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
932 return 0;
935 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
936 const struct mq_attr *attr)
938 struct target_mq_attr *target_mq_attr;
940 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
941 target_mq_attr_addr, 0))
942 return -TARGET_EFAULT;
944 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
945 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
946 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
947 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
949 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
951 return 0;
953 #endif
955 /* do_select() must return target values and target errnos. */
956 static abi_long do_select(int n,
957 abi_ulong rfd_addr, abi_ulong wfd_addr,
958 abi_ulong efd_addr, abi_ulong target_tv_addr)
960 fd_set rfds, wfds, efds;
961 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
962 struct timeval tv, *tv_ptr;
963 abi_long ret;
965 if (rfd_addr) {
966 if (copy_from_user_fdset(&rfds, rfd_addr, n))
967 return -TARGET_EFAULT;
968 rfds_ptr = &rfds;
969 } else {
970 rfds_ptr = NULL;
972 if (wfd_addr) {
973 if (copy_from_user_fdset(&wfds, wfd_addr, n))
974 return -TARGET_EFAULT;
975 wfds_ptr = &wfds;
976 } else {
977 wfds_ptr = NULL;
979 if (efd_addr) {
980 if (copy_from_user_fdset(&efds, efd_addr, n))
981 return -TARGET_EFAULT;
982 efds_ptr = &efds;
983 } else {
984 efds_ptr = NULL;
987 if (target_tv_addr) {
988 if (copy_from_user_timeval(&tv, target_tv_addr))
989 return -TARGET_EFAULT;
990 tv_ptr = &tv;
991 } else {
992 tv_ptr = NULL;
995 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
997 if (!is_error(ret)) {
998 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
999 return -TARGET_EFAULT;
1000 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1001 return -TARGET_EFAULT;
1002 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1003 return -TARGET_EFAULT;
1005 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1006 return -TARGET_EFAULT;
1009 return ret;
1012 static abi_long do_pipe2(int host_pipe[], int flags)
1014 #ifdef CONFIG_PIPE2
1015 return pipe2(host_pipe, flags);
1016 #else
1017 return -ENOSYS;
1018 #endif
1021 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1022 int flags, int is_pipe2)
1024 int host_pipe[2];
1025 abi_long ret;
1026 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1028 if (is_error(ret))
1029 return get_errno(ret);
1031 /* Several targets have special calling conventions for the original
1032 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1033 if (!is_pipe2) {
1034 #if defined(TARGET_ALPHA)
1035 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1036 return host_pipe[0];
1037 #elif defined(TARGET_MIPS)
1038 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1039 return host_pipe[0];
1040 #elif defined(TARGET_SH4)
1041 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1042 return host_pipe[0];
1043 #endif
1046 if (put_user_s32(host_pipe[0], pipedes)
1047 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1048 return -TARGET_EFAULT;
1049 return get_errno(ret);
1052 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1053 abi_ulong target_addr,
1054 socklen_t len)
1056 struct target_ip_mreqn *target_smreqn;
1058 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1059 if (!target_smreqn)
1060 return -TARGET_EFAULT;
1061 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1062 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1063 if (len == sizeof(struct target_ip_mreqn))
1064 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1065 unlock_user(target_smreqn, target_addr, 0);
1067 return 0;
1070 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1071 abi_ulong target_addr,
1072 socklen_t len)
1074 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1075 sa_family_t sa_family;
1076 struct target_sockaddr *target_saddr;
1078 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1079 if (!target_saddr)
1080 return -TARGET_EFAULT;
1082 sa_family = tswap16(target_saddr->sa_family);
1084 /* Oops. The caller might send a incomplete sun_path; sun_path
1085 * must be terminated by \0 (see the manual page), but
1086 * unfortunately it is quite common to specify sockaddr_un
1087 * length as "strlen(x->sun_path)" while it should be
1088 * "strlen(...) + 1". We'll fix that here if needed.
1089 * Linux kernel has a similar feature.
1092 if (sa_family == AF_UNIX) {
1093 if (len < unix_maxlen && len > 0) {
1094 char *cp = (char*)target_saddr;
1096 if ( cp[len-1] && !cp[len] )
1097 len++;
1099 if (len > unix_maxlen)
1100 len = unix_maxlen;
1103 memcpy(addr, target_saddr, len);
1104 addr->sa_family = sa_family;
1105 unlock_user(target_saddr, target_addr, 0);
1107 return 0;
1110 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1111 struct sockaddr *addr,
1112 socklen_t len)
1114 struct target_sockaddr *target_saddr;
1116 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1117 if (!target_saddr)
1118 return -TARGET_EFAULT;
1119 memcpy(target_saddr, addr, len);
1120 target_saddr->sa_family = tswap16(addr->sa_family);
1121 unlock_user(target_saddr, target_addr, len);
1123 return 0;
1126 /* ??? Should this also swap msgh->name? */
1127 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1128 struct target_msghdr *target_msgh)
1130 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1131 abi_long msg_controllen;
1132 abi_ulong target_cmsg_addr;
1133 struct target_cmsghdr *target_cmsg;
1134 socklen_t space = 0;
1136 msg_controllen = tswapl(target_msgh->msg_controllen);
1137 if (msg_controllen < sizeof (struct target_cmsghdr))
1138 goto the_end;
1139 target_cmsg_addr = tswapl(target_msgh->msg_control);
1140 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1141 if (!target_cmsg)
1142 return -TARGET_EFAULT;
1144 while (cmsg && target_cmsg) {
1145 void *data = CMSG_DATA(cmsg);
1146 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1148 int len = tswapl(target_cmsg->cmsg_len)
1149 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1151 space += CMSG_SPACE(len);
1152 if (space > msgh->msg_controllen) {
1153 space -= CMSG_SPACE(len);
1154 gemu_log("Host cmsg overflow\n");
1155 break;
1158 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1159 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1160 cmsg->cmsg_len = CMSG_LEN(len);
1162 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1163 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1164 memcpy(data, target_data, len);
1165 } else {
1166 int *fd = (int *)data;
1167 int *target_fd = (int *)target_data;
1168 int i, numfds = len / sizeof(int);
1170 for (i = 0; i < numfds; i++)
1171 fd[i] = tswap32(target_fd[i]);
1174 cmsg = CMSG_NXTHDR(msgh, cmsg);
1175 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1177 unlock_user(target_cmsg, target_cmsg_addr, 0);
1178 the_end:
1179 msgh->msg_controllen = space;
1180 return 0;
1183 /* ??? Should this also swap msgh->name? */
1184 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1185 struct msghdr *msgh)
1187 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1188 abi_long msg_controllen;
1189 abi_ulong target_cmsg_addr;
1190 struct target_cmsghdr *target_cmsg;
1191 socklen_t space = 0;
1193 msg_controllen = tswapl(target_msgh->msg_controllen);
1194 if (msg_controllen < sizeof (struct target_cmsghdr))
1195 goto the_end;
1196 target_cmsg_addr = tswapl(target_msgh->msg_control);
1197 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1198 if (!target_cmsg)
1199 return -TARGET_EFAULT;
1201 while (cmsg && target_cmsg) {
1202 void *data = CMSG_DATA(cmsg);
1203 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1205 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1207 space += TARGET_CMSG_SPACE(len);
1208 if (space > msg_controllen) {
1209 space -= TARGET_CMSG_SPACE(len);
1210 gemu_log("Target cmsg overflow\n");
1211 break;
1214 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1215 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1216 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1218 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1219 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1220 memcpy(target_data, data, len);
1221 } else {
1222 int *fd = (int *)data;
1223 int *target_fd = (int *)target_data;
1224 int i, numfds = len / sizeof(int);
1226 for (i = 0; i < numfds; i++)
1227 target_fd[i] = tswap32(fd[i]);
1230 cmsg = CMSG_NXTHDR(msgh, cmsg);
1231 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1233 unlock_user(target_cmsg, target_cmsg_addr, space);
1234 the_end:
1235 target_msgh->msg_controllen = tswapl(space);
1236 return 0;
1239 /* do_setsockopt() Must return target values and target errnos. */
1240 static abi_long do_setsockopt(int sockfd, int level, int optname,
1241 abi_ulong optval_addr, socklen_t optlen)
1243 abi_long ret;
1244 int val;
1245 struct ip_mreqn *ip_mreq;
1246 struct ip_mreq_source *ip_mreq_source;
1248 switch(level) {
1249 case SOL_TCP:
1250 /* TCP options all take an 'int' value. */
1251 if (optlen < sizeof(uint32_t))
1252 return -TARGET_EINVAL;
1254 if (get_user_u32(val, optval_addr))
1255 return -TARGET_EFAULT;
1256 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1257 break;
1258 case SOL_IP:
1259 switch(optname) {
1260 case IP_TOS:
1261 case IP_TTL:
1262 case IP_HDRINCL:
1263 case IP_ROUTER_ALERT:
1264 case IP_RECVOPTS:
1265 case IP_RETOPTS:
1266 case IP_PKTINFO:
1267 case IP_MTU_DISCOVER:
1268 case IP_RECVERR:
1269 case IP_RECVTOS:
1270 #ifdef IP_FREEBIND
1271 case IP_FREEBIND:
1272 #endif
1273 case IP_MULTICAST_TTL:
1274 case IP_MULTICAST_LOOP:
1275 val = 0;
1276 if (optlen >= sizeof(uint32_t)) {
1277 if (get_user_u32(val, optval_addr))
1278 return -TARGET_EFAULT;
1279 } else if (optlen >= 1) {
1280 if (get_user_u8(val, optval_addr))
1281 return -TARGET_EFAULT;
1283 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1284 break;
1285 case IP_ADD_MEMBERSHIP:
1286 case IP_DROP_MEMBERSHIP:
1287 if (optlen < sizeof (struct target_ip_mreq) ||
1288 optlen > sizeof (struct target_ip_mreqn))
1289 return -TARGET_EINVAL;
1291 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1292 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1293 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1294 break;
1296 case IP_BLOCK_SOURCE:
1297 case IP_UNBLOCK_SOURCE:
1298 case IP_ADD_SOURCE_MEMBERSHIP:
1299 case IP_DROP_SOURCE_MEMBERSHIP:
1300 if (optlen != sizeof (struct target_ip_mreq_source))
1301 return -TARGET_EINVAL;
1303 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1304 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1305 unlock_user (ip_mreq_source, optval_addr, 0);
1306 break;
1308 default:
1309 goto unimplemented;
1311 break;
1312 case TARGET_SOL_SOCKET:
1313 switch (optname) {
1314 /* Options with 'int' argument. */
1315 case TARGET_SO_DEBUG:
1316 optname = SO_DEBUG;
1317 break;
1318 case TARGET_SO_REUSEADDR:
1319 optname = SO_REUSEADDR;
1320 break;
1321 case TARGET_SO_TYPE:
1322 optname = SO_TYPE;
1323 break;
1324 case TARGET_SO_ERROR:
1325 optname = SO_ERROR;
1326 break;
1327 case TARGET_SO_DONTROUTE:
1328 optname = SO_DONTROUTE;
1329 break;
1330 case TARGET_SO_BROADCAST:
1331 optname = SO_BROADCAST;
1332 break;
1333 case TARGET_SO_SNDBUF:
1334 optname = SO_SNDBUF;
1335 break;
1336 case TARGET_SO_RCVBUF:
1337 optname = SO_RCVBUF;
1338 break;
1339 case TARGET_SO_KEEPALIVE:
1340 optname = SO_KEEPALIVE;
1341 break;
1342 case TARGET_SO_OOBINLINE:
1343 optname = SO_OOBINLINE;
1344 break;
1345 case TARGET_SO_NO_CHECK:
1346 optname = SO_NO_CHECK;
1347 break;
1348 case TARGET_SO_PRIORITY:
1349 optname = SO_PRIORITY;
1350 break;
1351 #ifdef SO_BSDCOMPAT
1352 case TARGET_SO_BSDCOMPAT:
1353 optname = SO_BSDCOMPAT;
1354 break;
1355 #endif
1356 case TARGET_SO_PASSCRED:
1357 optname = SO_PASSCRED;
1358 break;
1359 case TARGET_SO_TIMESTAMP:
1360 optname = SO_TIMESTAMP;
1361 break;
1362 case TARGET_SO_RCVLOWAT:
1363 optname = SO_RCVLOWAT;
1364 break;
1365 case TARGET_SO_RCVTIMEO:
1366 optname = SO_RCVTIMEO;
1367 break;
1368 case TARGET_SO_SNDTIMEO:
1369 optname = SO_SNDTIMEO;
1370 break;
1371 break;
1372 default:
1373 goto unimplemented;
1375 if (optlen < sizeof(uint32_t))
1376 return -TARGET_EINVAL;
1378 if (get_user_u32(val, optval_addr))
1379 return -TARGET_EFAULT;
1380 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1381 break;
1382 default:
1383 unimplemented:
1384 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1385 ret = -TARGET_ENOPROTOOPT;
1387 return ret;
1390 /* do_getsockopt() Must return target values and target errnos. */
1391 static abi_long do_getsockopt(int sockfd, int level, int optname,
1392 abi_ulong optval_addr, abi_ulong optlen)
1394 abi_long ret;
1395 int len, val;
1396 socklen_t lv;
1398 switch(level) {
1399 case TARGET_SOL_SOCKET:
1400 level = SOL_SOCKET;
1401 switch (optname) {
1402 /* These don't just return a single integer */
1403 case TARGET_SO_LINGER:
1404 case TARGET_SO_RCVTIMEO:
1405 case TARGET_SO_SNDTIMEO:
1406 case TARGET_SO_PEERCRED:
1407 case TARGET_SO_PEERNAME:
1408 goto unimplemented;
1409 /* Options with 'int' argument. */
1410 case TARGET_SO_DEBUG:
1411 optname = SO_DEBUG;
1412 goto int_case;
1413 case TARGET_SO_REUSEADDR:
1414 optname = SO_REUSEADDR;
1415 goto int_case;
1416 case TARGET_SO_TYPE:
1417 optname = SO_TYPE;
1418 goto int_case;
1419 case TARGET_SO_ERROR:
1420 optname = SO_ERROR;
1421 goto int_case;
1422 case TARGET_SO_DONTROUTE:
1423 optname = SO_DONTROUTE;
1424 goto int_case;
1425 case TARGET_SO_BROADCAST:
1426 optname = SO_BROADCAST;
1427 goto int_case;
1428 case TARGET_SO_SNDBUF:
1429 optname = SO_SNDBUF;
1430 goto int_case;
1431 case TARGET_SO_RCVBUF:
1432 optname = SO_RCVBUF;
1433 goto int_case;
1434 case TARGET_SO_KEEPALIVE:
1435 optname = SO_KEEPALIVE;
1436 goto int_case;
1437 case TARGET_SO_OOBINLINE:
1438 optname = SO_OOBINLINE;
1439 goto int_case;
1440 case TARGET_SO_NO_CHECK:
1441 optname = SO_NO_CHECK;
1442 goto int_case;
1443 case TARGET_SO_PRIORITY:
1444 optname = SO_PRIORITY;
1445 goto int_case;
1446 #ifdef SO_BSDCOMPAT
1447 case TARGET_SO_BSDCOMPAT:
1448 optname = SO_BSDCOMPAT;
1449 goto int_case;
1450 #endif
1451 case TARGET_SO_PASSCRED:
1452 optname = SO_PASSCRED;
1453 goto int_case;
1454 case TARGET_SO_TIMESTAMP:
1455 optname = SO_TIMESTAMP;
1456 goto int_case;
1457 case TARGET_SO_RCVLOWAT:
1458 optname = SO_RCVLOWAT;
1459 goto int_case;
1460 default:
1461 goto int_case;
1463 break;
1464 case SOL_TCP:
1465 /* TCP options all take an 'int' value. */
1466 int_case:
1467 if (get_user_u32(len, optlen))
1468 return -TARGET_EFAULT;
1469 if (len < 0)
1470 return -TARGET_EINVAL;
1471 lv = sizeof(lv);
1472 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1473 if (ret < 0)
1474 return ret;
1475 if (len > lv)
1476 len = lv;
1477 if (len == 4) {
1478 if (put_user_u32(val, optval_addr))
1479 return -TARGET_EFAULT;
1480 } else {
1481 if (put_user_u8(val, optval_addr))
1482 return -TARGET_EFAULT;
1484 if (put_user_u32(len, optlen))
1485 return -TARGET_EFAULT;
1486 break;
1487 case SOL_IP:
1488 switch(optname) {
1489 case IP_TOS:
1490 case IP_TTL:
1491 case IP_HDRINCL:
1492 case IP_ROUTER_ALERT:
1493 case IP_RECVOPTS:
1494 case IP_RETOPTS:
1495 case IP_PKTINFO:
1496 case IP_MTU_DISCOVER:
1497 case IP_RECVERR:
1498 case IP_RECVTOS:
1499 #ifdef IP_FREEBIND
1500 case IP_FREEBIND:
1501 #endif
1502 case IP_MULTICAST_TTL:
1503 case IP_MULTICAST_LOOP:
1504 if (get_user_u32(len, optlen))
1505 return -TARGET_EFAULT;
1506 if (len < 0)
1507 return -TARGET_EINVAL;
1508 lv = sizeof(lv);
1509 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1510 if (ret < 0)
1511 return ret;
1512 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1513 len = 1;
1514 if (put_user_u32(len, optlen)
1515 || put_user_u8(val, optval_addr))
1516 return -TARGET_EFAULT;
1517 } else {
1518 if (len > sizeof(int))
1519 len = sizeof(int);
1520 if (put_user_u32(len, optlen)
1521 || put_user_u32(val, optval_addr))
1522 return -TARGET_EFAULT;
1524 break;
1525 default:
1526 ret = -TARGET_ENOPROTOOPT;
1527 break;
1529 break;
1530 default:
1531 unimplemented:
1532 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1533 level, optname);
1534 ret = -TARGET_EOPNOTSUPP;
1535 break;
1537 return ret;
1540 /* FIXME
1541 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1542 * other lock functions have a return code of 0 for failure.
1544 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1545 int count, int copy)
1547 struct target_iovec *target_vec;
1548 abi_ulong base;
1549 int i;
1551 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1552 if (!target_vec)
1553 return -TARGET_EFAULT;
1554 for(i = 0;i < count; i++) {
1555 base = tswapl(target_vec[i].iov_base);
1556 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1557 if (vec[i].iov_len != 0) {
1558 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1559 /* Don't check lock_user return value. We must call writev even
1560 if a element has invalid base address. */
1561 } else {
1562 /* zero length pointer is ignored */
1563 vec[i].iov_base = NULL;
1566 unlock_user (target_vec, target_addr, 0);
1567 return 0;
1570 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1571 int count, int copy)
1573 struct target_iovec *target_vec;
1574 abi_ulong base;
1575 int i;
1577 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1578 if (!target_vec)
1579 return -TARGET_EFAULT;
1580 for(i = 0;i < count; i++) {
1581 if (target_vec[i].iov_base) {
1582 base = tswapl(target_vec[i].iov_base);
1583 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1586 unlock_user (target_vec, target_addr, 0);
1588 return 0;
1591 /* do_socket() Must return target values and target errnos. */
1592 static abi_long do_socket(int domain, int type, int protocol)
1594 #if defined(TARGET_MIPS)
1595 switch(type) {
1596 case TARGET_SOCK_DGRAM:
1597 type = SOCK_DGRAM;
1598 break;
1599 case TARGET_SOCK_STREAM:
1600 type = SOCK_STREAM;
1601 break;
1602 case TARGET_SOCK_RAW:
1603 type = SOCK_RAW;
1604 break;
1605 case TARGET_SOCK_RDM:
1606 type = SOCK_RDM;
1607 break;
1608 case TARGET_SOCK_SEQPACKET:
1609 type = SOCK_SEQPACKET;
1610 break;
1611 case TARGET_SOCK_PACKET:
1612 type = SOCK_PACKET;
1613 break;
1615 #endif
1616 if (domain == PF_NETLINK)
1617 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1618 return get_errno(socket(domain, type, protocol));
1621 /* do_bind() Must return target values and target errnos. */
1622 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1623 socklen_t addrlen)
1625 void *addr;
1626 abi_long ret;
1628 if ((int)addrlen < 0) {
1629 return -TARGET_EINVAL;
1632 addr = alloca(addrlen+1);
1634 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1635 if (ret)
1636 return ret;
1638 return get_errno(bind(sockfd, addr, addrlen));
1641 /* do_connect() Must return target values and target errnos. */
1642 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1643 socklen_t addrlen)
1645 void *addr;
1646 abi_long ret;
1648 if ((int)addrlen < 0) {
1649 return -TARGET_EINVAL;
1652 addr = alloca(addrlen);
1654 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1655 if (ret)
1656 return ret;
1658 return get_errno(connect(sockfd, addr, addrlen));
1661 /* do_sendrecvmsg() Must return target values and target errnos. */
1662 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1663 int flags, int send)
1665 abi_long ret, len;
1666 struct target_msghdr *msgp;
1667 struct msghdr msg;
1668 int count;
1669 struct iovec *vec;
1670 abi_ulong target_vec;
1672 /* FIXME */
1673 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1674 msgp,
1675 target_msg,
1676 send ? 1 : 0))
1677 return -TARGET_EFAULT;
1678 if (msgp->msg_name) {
1679 msg.msg_namelen = tswap32(msgp->msg_namelen);
1680 msg.msg_name = alloca(msg.msg_namelen);
1681 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1682 msg.msg_namelen);
1683 if (ret) {
1684 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1685 return ret;
1687 } else {
1688 msg.msg_name = NULL;
1689 msg.msg_namelen = 0;
1691 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1692 msg.msg_control = alloca(msg.msg_controllen);
1693 msg.msg_flags = tswap32(msgp->msg_flags);
1695 count = tswapl(msgp->msg_iovlen);
1696 vec = alloca(count * sizeof(struct iovec));
1697 target_vec = tswapl(msgp->msg_iov);
1698 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1699 msg.msg_iovlen = count;
1700 msg.msg_iov = vec;
1702 if (send) {
1703 ret = target_to_host_cmsg(&msg, msgp);
1704 if (ret == 0)
1705 ret = get_errno(sendmsg(fd, &msg, flags));
1706 } else {
1707 ret = get_errno(recvmsg(fd, &msg, flags));
1708 if (!is_error(ret)) {
1709 len = ret;
1710 ret = host_to_target_cmsg(msgp, &msg);
1711 if (!is_error(ret))
1712 ret = len;
1715 unlock_iovec(vec, target_vec, count, !send);
1716 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1717 return ret;
1720 /* do_accept() Must return target values and target errnos. */
1721 static abi_long do_accept(int fd, abi_ulong target_addr,
1722 abi_ulong target_addrlen_addr)
1724 socklen_t addrlen;
1725 void *addr;
1726 abi_long ret;
1728 if (target_addr == 0)
1729 return get_errno(accept(fd, NULL, NULL));
1731 /* linux returns EINVAL if addrlen pointer is invalid */
1732 if (get_user_u32(addrlen, target_addrlen_addr))
1733 return -TARGET_EINVAL;
1735 if ((int)addrlen < 0) {
1736 return -TARGET_EINVAL;
1739 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1740 return -TARGET_EINVAL;
1742 addr = alloca(addrlen);
1744 ret = get_errno(accept(fd, addr, &addrlen));
1745 if (!is_error(ret)) {
1746 host_to_target_sockaddr(target_addr, addr, addrlen);
1747 if (put_user_u32(addrlen, target_addrlen_addr))
1748 ret = -TARGET_EFAULT;
1750 return ret;
1753 /* do_getpeername() Must return target values and target errnos. */
1754 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1755 abi_ulong target_addrlen_addr)
1757 socklen_t addrlen;
1758 void *addr;
1759 abi_long ret;
1761 if (get_user_u32(addrlen, target_addrlen_addr))
1762 return -TARGET_EFAULT;
1764 if ((int)addrlen < 0) {
1765 return -TARGET_EINVAL;
1768 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1769 return -TARGET_EFAULT;
1771 addr = alloca(addrlen);
1773 ret = get_errno(getpeername(fd, addr, &addrlen));
1774 if (!is_error(ret)) {
1775 host_to_target_sockaddr(target_addr, addr, addrlen);
1776 if (put_user_u32(addrlen, target_addrlen_addr))
1777 ret = -TARGET_EFAULT;
1779 return ret;
1782 /* do_getsockname() Must return target values and target errnos. */
1783 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1784 abi_ulong target_addrlen_addr)
1786 socklen_t addrlen;
1787 void *addr;
1788 abi_long ret;
1790 if (get_user_u32(addrlen, target_addrlen_addr))
1791 return -TARGET_EFAULT;
1793 if ((int)addrlen < 0) {
1794 return -TARGET_EINVAL;
1797 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1798 return -TARGET_EFAULT;
1800 addr = alloca(addrlen);
1802 ret = get_errno(getsockname(fd, addr, &addrlen));
1803 if (!is_error(ret)) {
1804 host_to_target_sockaddr(target_addr, addr, addrlen);
1805 if (put_user_u32(addrlen, target_addrlen_addr))
1806 ret = -TARGET_EFAULT;
1808 return ret;
1811 /* do_socketpair() Must return target values and target errnos. */
1812 static abi_long do_socketpair(int domain, int type, int protocol,
1813 abi_ulong target_tab_addr)
1815 int tab[2];
1816 abi_long ret;
1818 ret = get_errno(socketpair(domain, type, protocol, tab));
1819 if (!is_error(ret)) {
1820 if (put_user_s32(tab[0], target_tab_addr)
1821 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1822 ret = -TARGET_EFAULT;
1824 return ret;
1827 /* do_sendto() Must return target values and target errnos. */
1828 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1829 abi_ulong target_addr, socklen_t addrlen)
1831 void *addr;
1832 void *host_msg;
1833 abi_long ret;
1835 if ((int)addrlen < 0) {
1836 return -TARGET_EINVAL;
1839 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1840 if (!host_msg)
1841 return -TARGET_EFAULT;
1842 if (target_addr) {
1843 addr = alloca(addrlen);
1844 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1845 if (ret) {
1846 unlock_user(host_msg, msg, 0);
1847 return ret;
1849 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1850 } else {
1851 ret = get_errno(send(fd, host_msg, len, flags));
1853 unlock_user(host_msg, msg, 0);
1854 return ret;
1857 /* do_recvfrom() Must return target values and target errnos. */
1858 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1859 abi_ulong target_addr,
1860 abi_ulong target_addrlen)
1862 socklen_t addrlen;
1863 void *addr;
1864 void *host_msg;
1865 abi_long ret;
1867 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1868 if (!host_msg)
1869 return -TARGET_EFAULT;
1870 if (target_addr) {
1871 if (get_user_u32(addrlen, target_addrlen)) {
1872 ret = -TARGET_EFAULT;
1873 goto fail;
1875 if ((int)addrlen < 0) {
1876 ret = -TARGET_EINVAL;
1877 goto fail;
1879 addr = alloca(addrlen);
1880 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1881 } else {
1882 addr = NULL; /* To keep compiler quiet. */
1883 ret = get_errno(recv(fd, host_msg, len, flags));
1885 if (!is_error(ret)) {
1886 if (target_addr) {
1887 host_to_target_sockaddr(target_addr, addr, addrlen);
1888 if (put_user_u32(addrlen, target_addrlen)) {
1889 ret = -TARGET_EFAULT;
1890 goto fail;
1893 unlock_user(host_msg, msg, len);
1894 } else {
1895 fail:
1896 unlock_user(host_msg, msg, 0);
1898 return ret;
1901 #ifdef TARGET_NR_socketcall
1902 /* do_socketcall() Must return target values and target errnos. */
1903 static abi_long do_socketcall(int num, abi_ulong vptr)
1905 abi_long ret;
1906 const int n = sizeof(abi_ulong);
1908 switch(num) {
1909 case SOCKOP_socket:
1911 abi_ulong domain, type, protocol;
1913 if (get_user_ual(domain, vptr)
1914 || get_user_ual(type, vptr + n)
1915 || get_user_ual(protocol, vptr + 2 * n))
1916 return -TARGET_EFAULT;
1918 ret = do_socket(domain, type, protocol);
1920 break;
1921 case SOCKOP_bind:
1923 abi_ulong sockfd;
1924 abi_ulong target_addr;
1925 socklen_t addrlen;
1927 if (get_user_ual(sockfd, vptr)
1928 || get_user_ual(target_addr, vptr + n)
1929 || get_user_ual(addrlen, vptr + 2 * n))
1930 return -TARGET_EFAULT;
1932 ret = do_bind(sockfd, target_addr, addrlen);
1934 break;
1935 case SOCKOP_connect:
1937 abi_ulong sockfd;
1938 abi_ulong target_addr;
1939 socklen_t addrlen;
1941 if (get_user_ual(sockfd, vptr)
1942 || get_user_ual(target_addr, vptr + n)
1943 || get_user_ual(addrlen, vptr + 2 * n))
1944 return -TARGET_EFAULT;
1946 ret = do_connect(sockfd, target_addr, addrlen);
1948 break;
1949 case SOCKOP_listen:
1951 abi_ulong sockfd, backlog;
1953 if (get_user_ual(sockfd, vptr)
1954 || get_user_ual(backlog, vptr + n))
1955 return -TARGET_EFAULT;
1957 ret = get_errno(listen(sockfd, backlog));
1959 break;
1960 case SOCKOP_accept:
1962 abi_ulong sockfd;
1963 abi_ulong target_addr, target_addrlen;
1965 if (get_user_ual(sockfd, vptr)
1966 || get_user_ual(target_addr, vptr + n)
1967 || get_user_ual(target_addrlen, vptr + 2 * n))
1968 return -TARGET_EFAULT;
1970 ret = do_accept(sockfd, target_addr, target_addrlen);
1972 break;
1973 case SOCKOP_getsockname:
1975 abi_ulong sockfd;
1976 abi_ulong target_addr, target_addrlen;
1978 if (get_user_ual(sockfd, vptr)
1979 || get_user_ual(target_addr, vptr + n)
1980 || get_user_ual(target_addrlen, vptr + 2 * n))
1981 return -TARGET_EFAULT;
1983 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1985 break;
1986 case SOCKOP_getpeername:
1988 abi_ulong sockfd;
1989 abi_ulong target_addr, target_addrlen;
1991 if (get_user_ual(sockfd, vptr)
1992 || get_user_ual(target_addr, vptr + n)
1993 || get_user_ual(target_addrlen, vptr + 2 * n))
1994 return -TARGET_EFAULT;
1996 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1998 break;
1999 case SOCKOP_socketpair:
2001 abi_ulong domain, type, protocol;
2002 abi_ulong tab;
2004 if (get_user_ual(domain, vptr)
2005 || get_user_ual(type, vptr + n)
2006 || get_user_ual(protocol, vptr + 2 * n)
2007 || get_user_ual(tab, vptr + 3 * n))
2008 return -TARGET_EFAULT;
2010 ret = do_socketpair(domain, type, protocol, tab);
2012 break;
2013 case SOCKOP_send:
2015 abi_ulong sockfd;
2016 abi_ulong msg;
2017 size_t len;
2018 abi_ulong flags;
2020 if (get_user_ual(sockfd, vptr)
2021 || get_user_ual(msg, vptr + n)
2022 || get_user_ual(len, vptr + 2 * n)
2023 || get_user_ual(flags, vptr + 3 * n))
2024 return -TARGET_EFAULT;
2026 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2028 break;
2029 case SOCKOP_recv:
2031 abi_ulong sockfd;
2032 abi_ulong msg;
2033 size_t len;
2034 abi_ulong flags;
2036 if (get_user_ual(sockfd, vptr)
2037 || get_user_ual(msg, vptr + n)
2038 || get_user_ual(len, vptr + 2 * n)
2039 || get_user_ual(flags, vptr + 3 * n))
2040 return -TARGET_EFAULT;
2042 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2044 break;
2045 case SOCKOP_sendto:
2047 abi_ulong sockfd;
2048 abi_ulong msg;
2049 size_t len;
2050 abi_ulong flags;
2051 abi_ulong addr;
2052 socklen_t addrlen;
2054 if (get_user_ual(sockfd, vptr)
2055 || get_user_ual(msg, vptr + n)
2056 || get_user_ual(len, vptr + 2 * n)
2057 || get_user_ual(flags, vptr + 3 * n)
2058 || get_user_ual(addr, vptr + 4 * n)
2059 || get_user_ual(addrlen, vptr + 5 * n))
2060 return -TARGET_EFAULT;
2062 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2064 break;
2065 case SOCKOP_recvfrom:
2067 abi_ulong sockfd;
2068 abi_ulong msg;
2069 size_t len;
2070 abi_ulong flags;
2071 abi_ulong addr;
2072 socklen_t addrlen;
2074 if (get_user_ual(sockfd, vptr)
2075 || get_user_ual(msg, vptr + n)
2076 || get_user_ual(len, vptr + 2 * n)
2077 || get_user_ual(flags, vptr + 3 * n)
2078 || get_user_ual(addr, vptr + 4 * n)
2079 || get_user_ual(addrlen, vptr + 5 * n))
2080 return -TARGET_EFAULT;
2082 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2084 break;
2085 case SOCKOP_shutdown:
2087 abi_ulong sockfd, how;
2089 if (get_user_ual(sockfd, vptr)
2090 || get_user_ual(how, vptr + n))
2091 return -TARGET_EFAULT;
2093 ret = get_errno(shutdown(sockfd, how));
2095 break;
2096 case SOCKOP_sendmsg:
2097 case SOCKOP_recvmsg:
2099 abi_ulong fd;
2100 abi_ulong target_msg;
2101 abi_ulong flags;
2103 if (get_user_ual(fd, vptr)
2104 || get_user_ual(target_msg, vptr + n)
2105 || get_user_ual(flags, vptr + 2 * n))
2106 return -TARGET_EFAULT;
2108 ret = do_sendrecvmsg(fd, target_msg, flags,
2109 (num == SOCKOP_sendmsg));
2111 break;
2112 case SOCKOP_setsockopt:
2114 abi_ulong sockfd;
2115 abi_ulong level;
2116 abi_ulong optname;
2117 abi_ulong optval;
2118 socklen_t optlen;
2120 if (get_user_ual(sockfd, vptr)
2121 || get_user_ual(level, vptr + n)
2122 || get_user_ual(optname, vptr + 2 * n)
2123 || get_user_ual(optval, vptr + 3 * n)
2124 || get_user_ual(optlen, vptr + 4 * n))
2125 return -TARGET_EFAULT;
2127 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2129 break;
2130 case SOCKOP_getsockopt:
2132 abi_ulong sockfd;
2133 abi_ulong level;
2134 abi_ulong optname;
2135 abi_ulong optval;
2136 socklen_t optlen;
2138 if (get_user_ual(sockfd, vptr)
2139 || get_user_ual(level, vptr + n)
2140 || get_user_ual(optname, vptr + 2 * n)
2141 || get_user_ual(optval, vptr + 3 * n)
2142 || get_user_ual(optlen, vptr + 4 * n))
2143 return -TARGET_EFAULT;
2145 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2147 break;
2148 default:
2149 gemu_log("Unsupported socketcall: %d\n", num);
2150 ret = -TARGET_ENOSYS;
2151 break;
2153 return ret;
2155 #endif
2157 #define N_SHM_REGIONS 32
2159 static struct shm_region {
2160 abi_ulong start;
2161 abi_ulong size;
2162 } shm_regions[N_SHM_REGIONS];
2164 struct target_ipc_perm
2166 abi_long __key;
2167 abi_ulong uid;
2168 abi_ulong gid;
2169 abi_ulong cuid;
2170 abi_ulong cgid;
2171 unsigned short int mode;
2172 unsigned short int __pad1;
2173 unsigned short int __seq;
2174 unsigned short int __pad2;
2175 abi_ulong __unused1;
2176 abi_ulong __unused2;
2179 struct target_semid_ds
2181 struct target_ipc_perm sem_perm;
2182 abi_ulong sem_otime;
2183 abi_ulong __unused1;
2184 abi_ulong sem_ctime;
2185 abi_ulong __unused2;
2186 abi_ulong sem_nsems;
2187 abi_ulong __unused3;
2188 abi_ulong __unused4;
2191 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2192 abi_ulong target_addr)
2194 struct target_ipc_perm *target_ip;
2195 struct target_semid_ds *target_sd;
2197 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2198 return -TARGET_EFAULT;
2199 target_ip = &(target_sd->sem_perm);
2200 host_ip->__key = tswapl(target_ip->__key);
2201 host_ip->uid = tswapl(target_ip->uid);
2202 host_ip->gid = tswapl(target_ip->gid);
2203 host_ip->cuid = tswapl(target_ip->cuid);
2204 host_ip->cgid = tswapl(target_ip->cgid);
2205 host_ip->mode = tswapl(target_ip->mode);
2206 unlock_user_struct(target_sd, target_addr, 0);
2207 return 0;
2210 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2211 struct ipc_perm *host_ip)
2213 struct target_ipc_perm *target_ip;
2214 struct target_semid_ds *target_sd;
2216 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2217 return -TARGET_EFAULT;
2218 target_ip = &(target_sd->sem_perm);
2219 target_ip->__key = tswapl(host_ip->__key);
2220 target_ip->uid = tswapl(host_ip->uid);
2221 target_ip->gid = tswapl(host_ip->gid);
2222 target_ip->cuid = tswapl(host_ip->cuid);
2223 target_ip->cgid = tswapl(host_ip->cgid);
2224 target_ip->mode = tswapl(host_ip->mode);
2225 unlock_user_struct(target_sd, target_addr, 1);
2226 return 0;
2229 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2230 abi_ulong target_addr)
2232 struct target_semid_ds *target_sd;
2234 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2235 return -TARGET_EFAULT;
2236 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2237 return -TARGET_EFAULT;
2238 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2239 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2240 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2241 unlock_user_struct(target_sd, target_addr, 0);
2242 return 0;
2245 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2246 struct semid_ds *host_sd)
2248 struct target_semid_ds *target_sd;
2250 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2251 return -TARGET_EFAULT;
2252 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2253 return -TARGET_EFAULT;;
2254 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2255 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2256 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2257 unlock_user_struct(target_sd, target_addr, 1);
2258 return 0;
2261 struct target_seminfo {
2262 int semmap;
2263 int semmni;
2264 int semmns;
2265 int semmnu;
2266 int semmsl;
2267 int semopm;
2268 int semume;
2269 int semusz;
2270 int semvmx;
2271 int semaem;
2274 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2275 struct seminfo *host_seminfo)
2277 struct target_seminfo *target_seminfo;
2278 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2279 return -TARGET_EFAULT;
2280 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2281 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2282 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2283 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2284 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2285 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2286 __put_user(host_seminfo->semume, &target_seminfo->semume);
2287 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2288 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2289 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2290 unlock_user_struct(target_seminfo, target_addr, 1);
2291 return 0;
2294 union semun {
2295 int val;
2296 struct semid_ds *buf;
2297 unsigned short *array;
2298 struct seminfo *__buf;
2301 union target_semun {
2302 int val;
2303 abi_ulong buf;
2304 abi_ulong array;
2305 abi_ulong __buf;
2308 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2309 abi_ulong target_addr)
2311 int nsems;
2312 unsigned short *array;
2313 union semun semun;
2314 struct semid_ds semid_ds;
2315 int i, ret;
2317 semun.buf = &semid_ds;
2319 ret = semctl(semid, 0, IPC_STAT, semun);
2320 if (ret == -1)
2321 return get_errno(ret);
2323 nsems = semid_ds.sem_nsems;
2325 *host_array = malloc(nsems*sizeof(unsigned short));
2326 array = lock_user(VERIFY_READ, target_addr,
2327 nsems*sizeof(unsigned short), 1);
2328 if (!array)
2329 return -TARGET_EFAULT;
2331 for(i=0; i<nsems; i++) {
2332 __get_user((*host_array)[i], &array[i]);
2334 unlock_user(array, target_addr, 0);
2336 return 0;
2339 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2340 unsigned short **host_array)
2342 int nsems;
2343 unsigned short *array;
2344 union semun semun;
2345 struct semid_ds semid_ds;
2346 int i, ret;
2348 semun.buf = &semid_ds;
2350 ret = semctl(semid, 0, IPC_STAT, semun);
2351 if (ret == -1)
2352 return get_errno(ret);
2354 nsems = semid_ds.sem_nsems;
2356 array = lock_user(VERIFY_WRITE, target_addr,
2357 nsems*sizeof(unsigned short), 0);
2358 if (!array)
2359 return -TARGET_EFAULT;
2361 for(i=0; i<nsems; i++) {
2362 __put_user((*host_array)[i], &array[i]);
2364 free(*host_array);
2365 unlock_user(array, target_addr, 1);
2367 return 0;
2370 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2371 union target_semun target_su)
2373 union semun arg;
2374 struct semid_ds dsarg;
2375 unsigned short *array = NULL;
2376 struct seminfo seminfo;
2377 abi_long ret = -TARGET_EINVAL;
2378 abi_long err;
2379 cmd &= 0xff;
2381 switch( cmd ) {
2382 case GETVAL:
2383 case SETVAL:
2384 arg.val = tswapl(target_su.val);
2385 ret = get_errno(semctl(semid, semnum, cmd, arg));
2386 target_su.val = tswapl(arg.val);
2387 break;
2388 case GETALL:
2389 case SETALL:
2390 err = target_to_host_semarray(semid, &array, target_su.array);
2391 if (err)
2392 return err;
2393 arg.array = array;
2394 ret = get_errno(semctl(semid, semnum, cmd, arg));
2395 err = host_to_target_semarray(semid, target_su.array, &array);
2396 if (err)
2397 return err;
2398 break;
2399 case IPC_STAT:
2400 case IPC_SET:
2401 case SEM_STAT:
2402 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2403 if (err)
2404 return err;
2405 arg.buf = &dsarg;
2406 ret = get_errno(semctl(semid, semnum, cmd, arg));
2407 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2408 if (err)
2409 return err;
2410 break;
2411 case IPC_INFO:
2412 case SEM_INFO:
2413 arg.__buf = &seminfo;
2414 ret = get_errno(semctl(semid, semnum, cmd, arg));
2415 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2416 if (err)
2417 return err;
2418 break;
2419 case IPC_RMID:
2420 case GETPID:
2421 case GETNCNT:
2422 case GETZCNT:
2423 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2424 break;
2427 return ret;
2430 struct target_sembuf {
2431 unsigned short sem_num;
2432 short sem_op;
2433 short sem_flg;
2436 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2437 abi_ulong target_addr,
2438 unsigned nsops)
2440 struct target_sembuf *target_sembuf;
2441 int i;
2443 target_sembuf = lock_user(VERIFY_READ, target_addr,
2444 nsops*sizeof(struct target_sembuf), 1);
2445 if (!target_sembuf)
2446 return -TARGET_EFAULT;
2448 for(i=0; i<nsops; i++) {
2449 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2450 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2451 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2454 unlock_user(target_sembuf, target_addr, 0);
2456 return 0;
2459 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2461 struct sembuf sops[nsops];
2463 if (target_to_host_sembuf(sops, ptr, nsops))
2464 return -TARGET_EFAULT;
2466 return semop(semid, sops, nsops);
2469 struct target_msqid_ds
2471 struct target_ipc_perm msg_perm;
2472 abi_ulong msg_stime;
2473 #if TARGET_ABI_BITS == 32
2474 abi_ulong __unused1;
2475 #endif
2476 abi_ulong msg_rtime;
2477 #if TARGET_ABI_BITS == 32
2478 abi_ulong __unused2;
2479 #endif
2480 abi_ulong msg_ctime;
2481 #if TARGET_ABI_BITS == 32
2482 abi_ulong __unused3;
2483 #endif
2484 abi_ulong __msg_cbytes;
2485 abi_ulong msg_qnum;
2486 abi_ulong msg_qbytes;
2487 abi_ulong msg_lspid;
2488 abi_ulong msg_lrpid;
2489 abi_ulong __unused4;
2490 abi_ulong __unused5;
2493 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2494 abi_ulong target_addr)
2496 struct target_msqid_ds *target_md;
2498 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2499 return -TARGET_EFAULT;
2500 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2501 return -TARGET_EFAULT;
2502 host_md->msg_stime = tswapl(target_md->msg_stime);
2503 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2504 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2505 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2506 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2507 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2508 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2509 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2510 unlock_user_struct(target_md, target_addr, 0);
2511 return 0;
2514 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2515 struct msqid_ds *host_md)
2517 struct target_msqid_ds *target_md;
2519 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2520 return -TARGET_EFAULT;
2521 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2522 return -TARGET_EFAULT;
2523 target_md->msg_stime = tswapl(host_md->msg_stime);
2524 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2525 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2526 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2527 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2528 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2529 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2530 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2531 unlock_user_struct(target_md, target_addr, 1);
2532 return 0;
2535 struct target_msginfo {
2536 int msgpool;
2537 int msgmap;
2538 int msgmax;
2539 int msgmnb;
2540 int msgmni;
2541 int msgssz;
2542 int msgtql;
2543 unsigned short int msgseg;
2546 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2547 struct msginfo *host_msginfo)
2549 struct target_msginfo *target_msginfo;
2550 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2551 return -TARGET_EFAULT;
2552 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2553 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2554 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2555 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2556 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2557 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2558 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2559 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2560 unlock_user_struct(target_msginfo, target_addr, 1);
2561 return 0;
2564 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2566 struct msqid_ds dsarg;
2567 struct msginfo msginfo;
2568 abi_long ret = -TARGET_EINVAL;
2570 cmd &= 0xff;
2572 switch (cmd) {
2573 case IPC_STAT:
2574 case IPC_SET:
2575 case MSG_STAT:
2576 if (target_to_host_msqid_ds(&dsarg,ptr))
2577 return -TARGET_EFAULT;
2578 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2579 if (host_to_target_msqid_ds(ptr,&dsarg))
2580 return -TARGET_EFAULT;
2581 break;
2582 case IPC_RMID:
2583 ret = get_errno(msgctl(msgid, cmd, NULL));
2584 break;
2585 case IPC_INFO:
2586 case MSG_INFO:
2587 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2588 if (host_to_target_msginfo(ptr, &msginfo))
2589 return -TARGET_EFAULT;
2590 break;
2593 return ret;
2596 struct target_msgbuf {
2597 abi_long mtype;
2598 char mtext[1];
2601 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2602 unsigned int msgsz, int msgflg)
2604 struct target_msgbuf *target_mb;
2605 struct msgbuf *host_mb;
2606 abi_long ret = 0;
2608 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2609 return -TARGET_EFAULT;
2610 host_mb = malloc(msgsz+sizeof(long));
2611 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2612 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2613 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2614 free(host_mb);
2615 unlock_user_struct(target_mb, msgp, 0);
2617 return ret;
2620 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2621 unsigned int msgsz, abi_long msgtyp,
2622 int msgflg)
2624 struct target_msgbuf *target_mb;
2625 char *target_mtext;
2626 struct msgbuf *host_mb;
2627 abi_long ret = 0;
2629 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2630 return -TARGET_EFAULT;
2632 host_mb = malloc(msgsz+sizeof(long));
2633 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2635 if (ret > 0) {
2636 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2637 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2638 if (!target_mtext) {
2639 ret = -TARGET_EFAULT;
2640 goto end;
2642 memcpy(target_mb->mtext, host_mb->mtext, ret);
2643 unlock_user(target_mtext, target_mtext_addr, ret);
2646 target_mb->mtype = tswapl(host_mb->mtype);
2647 free(host_mb);
2649 end:
2650 if (target_mb)
2651 unlock_user_struct(target_mb, msgp, 1);
2652 return ret;
2655 struct target_shmid_ds
2657 struct target_ipc_perm shm_perm;
2658 abi_ulong shm_segsz;
2659 abi_ulong shm_atime;
2660 #if TARGET_ABI_BITS == 32
2661 abi_ulong __unused1;
2662 #endif
2663 abi_ulong shm_dtime;
2664 #if TARGET_ABI_BITS == 32
2665 abi_ulong __unused2;
2666 #endif
2667 abi_ulong shm_ctime;
2668 #if TARGET_ABI_BITS == 32
2669 abi_ulong __unused3;
2670 #endif
2671 int shm_cpid;
2672 int shm_lpid;
2673 abi_ulong shm_nattch;
2674 unsigned long int __unused4;
2675 unsigned long int __unused5;
2678 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2679 abi_ulong target_addr)
2681 struct target_shmid_ds *target_sd;
2683 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2684 return -TARGET_EFAULT;
2685 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2686 return -TARGET_EFAULT;
2687 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2688 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2689 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2690 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2691 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2692 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2693 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2694 unlock_user_struct(target_sd, target_addr, 0);
2695 return 0;
2698 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2699 struct shmid_ds *host_sd)
2701 struct target_shmid_ds *target_sd;
2703 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2704 return -TARGET_EFAULT;
2705 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2706 return -TARGET_EFAULT;
2707 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2708 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2709 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2710 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2711 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2712 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2713 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2714 unlock_user_struct(target_sd, target_addr, 1);
2715 return 0;
2718 struct target_shminfo {
2719 abi_ulong shmmax;
2720 abi_ulong shmmin;
2721 abi_ulong shmmni;
2722 abi_ulong shmseg;
2723 abi_ulong shmall;
2726 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2727 struct shminfo *host_shminfo)
2729 struct target_shminfo *target_shminfo;
2730 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2731 return -TARGET_EFAULT;
2732 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2733 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2734 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2735 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2736 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2737 unlock_user_struct(target_shminfo, target_addr, 1);
2738 return 0;
2741 struct target_shm_info {
2742 int used_ids;
2743 abi_ulong shm_tot;
2744 abi_ulong shm_rss;
2745 abi_ulong shm_swp;
2746 abi_ulong swap_attempts;
2747 abi_ulong swap_successes;
2750 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2751 struct shm_info *host_shm_info)
2753 struct target_shm_info *target_shm_info;
2754 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2755 return -TARGET_EFAULT;
2756 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2757 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2758 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2759 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2760 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2761 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2762 unlock_user_struct(target_shm_info, target_addr, 1);
2763 return 0;
2766 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2768 struct shmid_ds dsarg;
2769 struct shminfo shminfo;
2770 struct shm_info shm_info;
2771 abi_long ret = -TARGET_EINVAL;
2773 cmd &= 0xff;
2775 switch(cmd) {
2776 case IPC_STAT:
2777 case IPC_SET:
2778 case SHM_STAT:
2779 if (target_to_host_shmid_ds(&dsarg, buf))
2780 return -TARGET_EFAULT;
2781 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2782 if (host_to_target_shmid_ds(buf, &dsarg))
2783 return -TARGET_EFAULT;
2784 break;
2785 case IPC_INFO:
2786 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2787 if (host_to_target_shminfo(buf, &shminfo))
2788 return -TARGET_EFAULT;
2789 break;
2790 case SHM_INFO:
2791 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2792 if (host_to_target_shm_info(buf, &shm_info))
2793 return -TARGET_EFAULT;
2794 break;
2795 case IPC_RMID:
2796 case SHM_LOCK:
2797 case SHM_UNLOCK:
2798 ret = get_errno(shmctl(shmid, cmd, NULL));
2799 break;
2802 return ret;
2805 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2807 abi_long raddr;
2808 void *host_raddr;
2809 struct shmid_ds shm_info;
2810 int i,ret;
2812 /* find out the length of the shared memory segment */
2813 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2814 if (is_error(ret)) {
2815 /* can't get length, bail out */
2816 return ret;
2819 mmap_lock();
2821 if (shmaddr)
2822 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2823 else {
2824 abi_ulong mmap_start;
2826 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2828 if (mmap_start == -1) {
2829 errno = ENOMEM;
2830 host_raddr = (void *)-1;
2831 } else
2832 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2835 if (host_raddr == (void *)-1) {
2836 mmap_unlock();
2837 return get_errno((long)host_raddr);
2839 raddr=h2g((unsigned long)host_raddr);
2841 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2842 PAGE_VALID | PAGE_READ |
2843 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2845 for (i = 0; i < N_SHM_REGIONS; i++) {
2846 if (shm_regions[i].start == 0) {
2847 shm_regions[i].start = raddr;
2848 shm_regions[i].size = shm_info.shm_segsz;
2849 break;
2853 mmap_unlock();
2854 return raddr;
2858 static inline abi_long do_shmdt(abi_ulong shmaddr)
2860 int i;
2862 for (i = 0; i < N_SHM_REGIONS; ++i) {
2863 if (shm_regions[i].start == shmaddr) {
2864 shm_regions[i].start = 0;
2865 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2866 break;
2870 return get_errno(shmdt(g2h(shmaddr)));
2873 #ifdef TARGET_NR_ipc
2874 /* ??? This only works with linear mappings. */
2875 /* do_ipc() must return target values and target errnos. */
2876 static abi_long do_ipc(unsigned int call, int first,
2877 int second, int third,
2878 abi_long ptr, abi_long fifth)
2880 int version;
2881 abi_long ret = 0;
2883 version = call >> 16;
2884 call &= 0xffff;
2886 switch (call) {
2887 case IPCOP_semop:
2888 ret = do_semop(first, ptr, second);
2889 break;
2891 case IPCOP_semget:
2892 ret = get_errno(semget(first, second, third));
2893 break;
2895 case IPCOP_semctl:
2896 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2897 break;
2899 case IPCOP_msgget:
2900 ret = get_errno(msgget(first, second));
2901 break;
2903 case IPCOP_msgsnd:
2904 ret = do_msgsnd(first, ptr, second, third);
2905 break;
2907 case IPCOP_msgctl:
2908 ret = do_msgctl(first, second, ptr);
2909 break;
2911 case IPCOP_msgrcv:
2912 switch (version) {
2913 case 0:
2915 struct target_ipc_kludge {
2916 abi_long msgp;
2917 abi_long msgtyp;
2918 } *tmp;
2920 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2921 ret = -TARGET_EFAULT;
2922 break;
2925 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2927 unlock_user_struct(tmp, ptr, 0);
2928 break;
2930 default:
2931 ret = do_msgrcv(first, ptr, second, fifth, third);
2933 break;
2935 case IPCOP_shmat:
2936 switch (version) {
2937 default:
2939 abi_ulong raddr;
2940 raddr = do_shmat(first, ptr, second);
2941 if (is_error(raddr))
2942 return get_errno(raddr);
2943 if (put_user_ual(raddr, third))
2944 return -TARGET_EFAULT;
2945 break;
2947 case 1:
2948 ret = -TARGET_EINVAL;
2949 break;
2951 break;
2952 case IPCOP_shmdt:
2953 ret = do_shmdt(ptr);
2954 break;
2956 case IPCOP_shmget:
2957 /* IPC_* flag values are the same on all linux platforms */
2958 ret = get_errno(shmget(first, second, third));
2959 break;
2961 /* IPC_* and SHM_* command values are the same on all linux platforms */
2962 case IPCOP_shmctl:
2963 ret = do_shmctl(first, second, third);
2964 break;
2965 default:
2966 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2967 ret = -TARGET_ENOSYS;
2968 break;
2970 return ret;
2972 #endif
2974 /* kernel structure types definitions */
2976 #define STRUCT(name, ...) STRUCT_ ## name,
2977 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2978 enum {
2979 #include "syscall_types.h"
2981 #undef STRUCT
2982 #undef STRUCT_SPECIAL
2984 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2985 #define STRUCT_SPECIAL(name)
2986 #include "syscall_types.h"
2987 #undef STRUCT
2988 #undef STRUCT_SPECIAL
2990 typedef struct IOCTLEntry IOCTLEntry;
2992 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
2993 int fd, abi_long cmd, abi_long arg);
2995 struct IOCTLEntry {
2996 unsigned int target_cmd;
2997 unsigned int host_cmd;
2998 const char *name;
2999 int access;
3000 do_ioctl_fn *do_ioctl;
3001 const argtype arg_type[5];
3004 #define IOC_R 0x0001
3005 #define IOC_W 0x0002
3006 #define IOC_RW (IOC_R | IOC_W)
3008 #define MAX_STRUCT_SIZE 4096
3010 #ifdef CONFIG_FIEMAP
3011 /* So fiemap access checks don't overflow on 32 bit systems.
3012 * This is very slightly smaller than the limit imposed by
3013 * the underlying kernel.
3015 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3016 / sizeof(struct fiemap_extent))
3018 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3019 int fd, abi_long cmd, abi_long arg)
3021 /* The parameter for this ioctl is a struct fiemap followed
3022 * by an array of struct fiemap_extent whose size is set
3023 * in fiemap->fm_extent_count. The array is filled in by the
3024 * ioctl.
3026 int target_size_in, target_size_out;
3027 struct fiemap *fm;
3028 const argtype *arg_type = ie->arg_type;
3029 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3030 void *argptr, *p;
3031 abi_long ret;
3032 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3033 uint32_t outbufsz;
3034 int free_fm = 0;
3036 assert(arg_type[0] == TYPE_PTR);
3037 assert(ie->access == IOC_RW);
3038 arg_type++;
3039 target_size_in = thunk_type_size(arg_type, 0);
3040 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3041 if (!argptr) {
3042 return -TARGET_EFAULT;
3044 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3045 unlock_user(argptr, arg, 0);
3046 fm = (struct fiemap *)buf_temp;
3047 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3048 return -TARGET_EINVAL;
3051 outbufsz = sizeof (*fm) +
3052 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3054 if (outbufsz > MAX_STRUCT_SIZE) {
3055 /* We can't fit all the extents into the fixed size buffer.
3056 * Allocate one that is large enough and use it instead.
3058 fm = malloc(outbufsz);
3059 if (!fm) {
3060 return -TARGET_ENOMEM;
3062 memcpy(fm, buf_temp, sizeof(struct fiemap));
3063 free_fm = 1;
3065 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3066 if (!is_error(ret)) {
3067 target_size_out = target_size_in;
3068 /* An extent_count of 0 means we were only counting the extents
3069 * so there are no structs to copy
3071 if (fm->fm_extent_count != 0) {
3072 target_size_out += fm->fm_mapped_extents * extent_size;
3074 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3075 if (!argptr) {
3076 ret = -TARGET_EFAULT;
3077 } else {
3078 /* Convert the struct fiemap */
3079 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3080 if (fm->fm_extent_count != 0) {
3081 p = argptr + target_size_in;
3082 /* ...and then all the struct fiemap_extents */
3083 for (i = 0; i < fm->fm_mapped_extents; i++) {
3084 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3085 THUNK_TARGET);
3086 p += extent_size;
3089 unlock_user(argptr, arg, target_size_out);
3092 if (free_fm) {
3093 free(fm);
3095 return ret;
3097 #endif
3099 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3100 int fd, abi_long cmd, abi_long arg)
3102 const argtype *arg_type = ie->arg_type;
3103 int target_size;
3104 void *argptr;
3105 int ret;
3106 struct ifconf *host_ifconf;
3107 uint32_t outbufsz;
3108 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3109 int target_ifreq_size;
3110 int nb_ifreq;
3111 int free_buf = 0;
3112 int i;
3113 int target_ifc_len;
3114 abi_long target_ifc_buf;
3115 int host_ifc_len;
3116 char *host_ifc_buf;
3118 assert(arg_type[0] == TYPE_PTR);
3119 assert(ie->access == IOC_RW);
3121 arg_type++;
3122 target_size = thunk_type_size(arg_type, 0);
3124 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3125 if (!argptr)
3126 return -TARGET_EFAULT;
3127 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3128 unlock_user(argptr, arg, 0);
3130 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3131 target_ifc_len = host_ifconf->ifc_len;
3132 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3134 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3135 nb_ifreq = target_ifc_len / target_ifreq_size;
3136 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3138 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3139 if (outbufsz > MAX_STRUCT_SIZE) {
3140 /* We can't fit all the extents into the fixed size buffer.
3141 * Allocate one that is large enough and use it instead.
3143 host_ifconf = malloc(outbufsz);
3144 if (!host_ifconf) {
3145 return -TARGET_ENOMEM;
3147 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3148 free_buf = 1;
3150 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3152 host_ifconf->ifc_len = host_ifc_len;
3153 host_ifconf->ifc_buf = host_ifc_buf;
3155 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3156 if (!is_error(ret)) {
3157 /* convert host ifc_len to target ifc_len */
3159 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3160 target_ifc_len = nb_ifreq * target_ifreq_size;
3161 host_ifconf->ifc_len = target_ifc_len;
3163 /* restore target ifc_buf */
3165 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3167 /* copy struct ifconf to target user */
3169 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3170 if (!argptr)
3171 return -TARGET_EFAULT;
3172 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3173 unlock_user(argptr, arg, target_size);
3175 /* copy ifreq[] to target user */
3177 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3178 for (i = 0; i < nb_ifreq ; i++) {
3179 thunk_convert(argptr + i * target_ifreq_size,
3180 host_ifc_buf + i * sizeof(struct ifreq),
3181 ifreq_arg_type, THUNK_TARGET);
3183 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3186 if (free_buf) {
3187 free(host_ifconf);
3190 return ret;
3193 static IOCTLEntry ioctl_entries[] = {
3194 #define IOCTL(cmd, access, ...) \
3195 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3196 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3197 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3198 #include "ioctls.h"
3199 { 0, 0, },
3202 /* ??? Implement proper locking for ioctls. */
3203 /* do_ioctl() Must return target values and target errnos. */
3204 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3206 const IOCTLEntry *ie;
3207 const argtype *arg_type;
3208 abi_long ret;
3209 uint8_t buf_temp[MAX_STRUCT_SIZE];
3210 int target_size;
3211 void *argptr;
3213 ie = ioctl_entries;
3214 for(;;) {
3215 if (ie->target_cmd == 0) {
3216 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3217 return -TARGET_ENOSYS;
3219 if (ie->target_cmd == cmd)
3220 break;
3221 ie++;
3223 arg_type = ie->arg_type;
3224 #if defined(DEBUG)
3225 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3226 #endif
3227 if (ie->do_ioctl) {
3228 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3231 switch(arg_type[0]) {
3232 case TYPE_NULL:
3233 /* no argument */
3234 ret = get_errno(ioctl(fd, ie->host_cmd));
3235 break;
3236 case TYPE_PTRVOID:
3237 case TYPE_INT:
3238 /* int argment */
3239 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3240 break;
3241 case TYPE_PTR:
3242 arg_type++;
3243 target_size = thunk_type_size(arg_type, 0);
3244 switch(ie->access) {
3245 case IOC_R:
3246 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3247 if (!is_error(ret)) {
3248 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3249 if (!argptr)
3250 return -TARGET_EFAULT;
3251 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3252 unlock_user(argptr, arg, target_size);
3254 break;
3255 case IOC_W:
3256 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3257 if (!argptr)
3258 return -TARGET_EFAULT;
3259 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3260 unlock_user(argptr, arg, 0);
3261 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3262 break;
3263 default:
3264 case IOC_RW:
3265 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3266 if (!argptr)
3267 return -TARGET_EFAULT;
3268 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3269 unlock_user(argptr, arg, 0);
3270 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3271 if (!is_error(ret)) {
3272 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3273 if (!argptr)
3274 return -TARGET_EFAULT;
3275 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3276 unlock_user(argptr, arg, target_size);
3278 break;
3280 break;
3281 default:
3282 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3283 (long)cmd, arg_type[0]);
3284 ret = -TARGET_ENOSYS;
3285 break;
3287 return ret;
3290 static const bitmask_transtbl iflag_tbl[] = {
3291 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3292 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3293 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3294 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3295 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3296 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3297 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3298 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3299 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3300 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3301 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3302 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3303 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3304 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3305 { 0, 0, 0, 0 }
3308 static const bitmask_transtbl oflag_tbl[] = {
3309 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3310 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3311 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3312 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3313 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3314 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3315 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3316 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3317 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3318 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3319 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3320 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3321 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3322 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3323 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3324 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3325 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3326 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3327 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3328 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3329 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3330 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3331 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3332 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3333 { 0, 0, 0, 0 }
3336 static const bitmask_transtbl cflag_tbl[] = {
3337 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3338 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3339 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3340 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3341 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3342 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3343 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3344 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3345 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3346 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3347 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3348 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3349 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3350 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3351 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3352 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3353 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3354 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3355 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3356 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3357 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3358 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3359 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3360 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3361 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3362 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3363 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3364 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3365 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3366 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3367 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3368 { 0, 0, 0, 0 }
3371 static const bitmask_transtbl lflag_tbl[] = {
3372 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3373 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3374 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3375 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3376 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3377 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3378 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3379 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3380 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3381 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3382 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3383 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3384 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3385 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3386 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3387 { 0, 0, 0, 0 }
3390 static void target_to_host_termios (void *dst, const void *src)
3392 struct host_termios *host = dst;
3393 const struct target_termios *target = src;
3395 host->c_iflag =
3396 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3397 host->c_oflag =
3398 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3399 host->c_cflag =
3400 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3401 host->c_lflag =
3402 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3403 host->c_line = target->c_line;
3405 memset(host->c_cc, 0, sizeof(host->c_cc));
3406 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3407 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3408 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3409 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3410 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3411 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3412 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3413 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3414 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3415 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3416 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3417 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3418 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3419 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3420 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3421 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3422 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3425 static void host_to_target_termios (void *dst, const void *src)
3427 struct target_termios *target = dst;
3428 const struct host_termios *host = src;
3430 target->c_iflag =
3431 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3432 target->c_oflag =
3433 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3434 target->c_cflag =
3435 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3436 target->c_lflag =
3437 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3438 target->c_line = host->c_line;
3440 memset(target->c_cc, 0, sizeof(target->c_cc));
3441 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3442 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3443 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3444 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3445 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3446 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3447 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3448 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3449 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3450 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3451 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3452 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3453 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3454 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3455 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3456 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3457 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3460 static const StructEntry struct_termios_def = {
3461 .convert = { host_to_target_termios, target_to_host_termios },
3462 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3463 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3466 static bitmask_transtbl mmap_flags_tbl[] = {
3467 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3468 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3469 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3470 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3471 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3472 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3473 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3474 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3475 { 0, 0, 0, 0 }
3478 #if defined(TARGET_I386)
3480 /* NOTE: there is really one LDT for all the threads */
3481 static uint8_t *ldt_table;
3483 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3485 int size;
3486 void *p;
3488 if (!ldt_table)
3489 return 0;
3490 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3491 if (size > bytecount)
3492 size = bytecount;
3493 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3494 if (!p)
3495 return -TARGET_EFAULT;
3496 /* ??? Should this by byteswapped? */
3497 memcpy(p, ldt_table, size);
3498 unlock_user(p, ptr, size);
3499 return size;
3502 /* XXX: add locking support */
3503 static abi_long write_ldt(CPUX86State *env,
3504 abi_ulong ptr, unsigned long bytecount, int oldmode)
3506 struct target_modify_ldt_ldt_s ldt_info;
3507 struct target_modify_ldt_ldt_s *target_ldt_info;
3508 int seg_32bit, contents, read_exec_only, limit_in_pages;
3509 int seg_not_present, useable, lm;
3510 uint32_t *lp, entry_1, entry_2;
3512 if (bytecount != sizeof(ldt_info))
3513 return -TARGET_EINVAL;
3514 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3515 return -TARGET_EFAULT;
3516 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3517 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3518 ldt_info.limit = tswap32(target_ldt_info->limit);
3519 ldt_info.flags = tswap32(target_ldt_info->flags);
3520 unlock_user_struct(target_ldt_info, ptr, 0);
3522 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3523 return -TARGET_EINVAL;
3524 seg_32bit = ldt_info.flags & 1;
3525 contents = (ldt_info.flags >> 1) & 3;
3526 read_exec_only = (ldt_info.flags >> 3) & 1;
3527 limit_in_pages = (ldt_info.flags >> 4) & 1;
3528 seg_not_present = (ldt_info.flags >> 5) & 1;
3529 useable = (ldt_info.flags >> 6) & 1;
3530 #ifdef TARGET_ABI32
3531 lm = 0;
3532 #else
3533 lm = (ldt_info.flags >> 7) & 1;
3534 #endif
3535 if (contents == 3) {
3536 if (oldmode)
3537 return -TARGET_EINVAL;
3538 if (seg_not_present == 0)
3539 return -TARGET_EINVAL;
3541 /* allocate the LDT */
3542 if (!ldt_table) {
3543 env->ldt.base = target_mmap(0,
3544 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3545 PROT_READ|PROT_WRITE,
3546 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3547 if (env->ldt.base == -1)
3548 return -TARGET_ENOMEM;
3549 memset(g2h(env->ldt.base), 0,
3550 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3551 env->ldt.limit = 0xffff;
3552 ldt_table = g2h(env->ldt.base);
3555 /* NOTE: same code as Linux kernel */
3556 /* Allow LDTs to be cleared by the user. */
3557 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3558 if (oldmode ||
3559 (contents == 0 &&
3560 read_exec_only == 1 &&
3561 seg_32bit == 0 &&
3562 limit_in_pages == 0 &&
3563 seg_not_present == 1 &&
3564 useable == 0 )) {
3565 entry_1 = 0;
3566 entry_2 = 0;
3567 goto install;
3571 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3572 (ldt_info.limit & 0x0ffff);
3573 entry_2 = (ldt_info.base_addr & 0xff000000) |
3574 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3575 (ldt_info.limit & 0xf0000) |
3576 ((read_exec_only ^ 1) << 9) |
3577 (contents << 10) |
3578 ((seg_not_present ^ 1) << 15) |
3579 (seg_32bit << 22) |
3580 (limit_in_pages << 23) |
3581 (lm << 21) |
3582 0x7000;
3583 if (!oldmode)
3584 entry_2 |= (useable << 20);
3586 /* Install the new entry ... */
3587 install:
3588 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3589 lp[0] = tswap32(entry_1);
3590 lp[1] = tswap32(entry_2);
3591 return 0;
3594 /* specific and weird i386 syscalls */
3595 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3596 unsigned long bytecount)
3598 abi_long ret;
3600 switch (func) {
3601 case 0:
3602 ret = read_ldt(ptr, bytecount);
3603 break;
3604 case 1:
3605 ret = write_ldt(env, ptr, bytecount, 1);
3606 break;
3607 case 0x11:
3608 ret = write_ldt(env, ptr, bytecount, 0);
3609 break;
3610 default:
3611 ret = -TARGET_ENOSYS;
3612 break;
3614 return ret;
3617 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3618 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3620 uint64_t *gdt_table = g2h(env->gdt.base);
3621 struct target_modify_ldt_ldt_s ldt_info;
3622 struct target_modify_ldt_ldt_s *target_ldt_info;
3623 int seg_32bit, contents, read_exec_only, limit_in_pages;
3624 int seg_not_present, useable, lm;
3625 uint32_t *lp, entry_1, entry_2;
3626 int i;
3628 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3629 if (!target_ldt_info)
3630 return -TARGET_EFAULT;
3631 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3632 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3633 ldt_info.limit = tswap32(target_ldt_info->limit);
3634 ldt_info.flags = tswap32(target_ldt_info->flags);
3635 if (ldt_info.entry_number == -1) {
3636 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3637 if (gdt_table[i] == 0) {
3638 ldt_info.entry_number = i;
3639 target_ldt_info->entry_number = tswap32(i);
3640 break;
3644 unlock_user_struct(target_ldt_info, ptr, 1);
3646 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3647 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3648 return -TARGET_EINVAL;
3649 seg_32bit = ldt_info.flags & 1;
3650 contents = (ldt_info.flags >> 1) & 3;
3651 read_exec_only = (ldt_info.flags >> 3) & 1;
3652 limit_in_pages = (ldt_info.flags >> 4) & 1;
3653 seg_not_present = (ldt_info.flags >> 5) & 1;
3654 useable = (ldt_info.flags >> 6) & 1;
3655 #ifdef TARGET_ABI32
3656 lm = 0;
3657 #else
3658 lm = (ldt_info.flags >> 7) & 1;
3659 #endif
3661 if (contents == 3) {
3662 if (seg_not_present == 0)
3663 return -TARGET_EINVAL;
3666 /* NOTE: same code as Linux kernel */
3667 /* Allow LDTs to be cleared by the user. */
3668 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3669 if ((contents == 0 &&
3670 read_exec_only == 1 &&
3671 seg_32bit == 0 &&
3672 limit_in_pages == 0 &&
3673 seg_not_present == 1 &&
3674 useable == 0 )) {
3675 entry_1 = 0;
3676 entry_2 = 0;
3677 goto install;
3681 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3682 (ldt_info.limit & 0x0ffff);
3683 entry_2 = (ldt_info.base_addr & 0xff000000) |
3684 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3685 (ldt_info.limit & 0xf0000) |
3686 ((read_exec_only ^ 1) << 9) |
3687 (contents << 10) |
3688 ((seg_not_present ^ 1) << 15) |
3689 (seg_32bit << 22) |
3690 (limit_in_pages << 23) |
3691 (useable << 20) |
3692 (lm << 21) |
3693 0x7000;
3695 /* Install the new entry ... */
3696 install:
3697 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3698 lp[0] = tswap32(entry_1);
3699 lp[1] = tswap32(entry_2);
3700 return 0;
3703 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3705 struct target_modify_ldt_ldt_s *target_ldt_info;
3706 uint64_t *gdt_table = g2h(env->gdt.base);
3707 uint32_t base_addr, limit, flags;
3708 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3709 int seg_not_present, useable, lm;
3710 uint32_t *lp, entry_1, entry_2;
3712 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3713 if (!target_ldt_info)
3714 return -TARGET_EFAULT;
3715 idx = tswap32(target_ldt_info->entry_number);
3716 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3717 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3718 unlock_user_struct(target_ldt_info, ptr, 1);
3719 return -TARGET_EINVAL;
3721 lp = (uint32_t *)(gdt_table + idx);
3722 entry_1 = tswap32(lp[0]);
3723 entry_2 = tswap32(lp[1]);
3725 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3726 contents = (entry_2 >> 10) & 3;
3727 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3728 seg_32bit = (entry_2 >> 22) & 1;
3729 limit_in_pages = (entry_2 >> 23) & 1;
3730 useable = (entry_2 >> 20) & 1;
3731 #ifdef TARGET_ABI32
3732 lm = 0;
3733 #else
3734 lm = (entry_2 >> 21) & 1;
3735 #endif
3736 flags = (seg_32bit << 0) | (contents << 1) |
3737 (read_exec_only << 3) | (limit_in_pages << 4) |
3738 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3739 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3740 base_addr = (entry_1 >> 16) |
3741 (entry_2 & 0xff000000) |
3742 ((entry_2 & 0xff) << 16);
3743 target_ldt_info->base_addr = tswapl(base_addr);
3744 target_ldt_info->limit = tswap32(limit);
3745 target_ldt_info->flags = tswap32(flags);
3746 unlock_user_struct(target_ldt_info, ptr, 1);
3747 return 0;
3749 #endif /* TARGET_I386 && TARGET_ABI32 */
3751 #ifndef TARGET_ABI32
3752 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3754 abi_long ret;
3755 abi_ulong val;
3756 int idx;
3758 switch(code) {
3759 case TARGET_ARCH_SET_GS:
3760 case TARGET_ARCH_SET_FS:
3761 if (code == TARGET_ARCH_SET_GS)
3762 idx = R_GS;
3763 else
3764 idx = R_FS;
3765 cpu_x86_load_seg(env, idx, 0);
3766 env->segs[idx].base = addr;
3767 break;
3768 case TARGET_ARCH_GET_GS:
3769 case TARGET_ARCH_GET_FS:
3770 if (code == TARGET_ARCH_GET_GS)
3771 idx = R_GS;
3772 else
3773 idx = R_FS;
3774 val = env->segs[idx].base;
3775 if (put_user(val, addr, abi_ulong))
3776 return -TARGET_EFAULT;
3777 break;
3778 default:
3779 ret = -TARGET_EINVAL;
3780 break;
3782 return 0;
3784 #endif
3786 #endif /* defined(TARGET_I386) */
3788 #define NEW_STACK_SIZE 0x40000
3790 #if defined(CONFIG_USE_NPTL)
3792 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3793 typedef struct {
3794 CPUState *env;
3795 pthread_mutex_t mutex;
3796 pthread_cond_t cond;
3797 pthread_t thread;
3798 uint32_t tid;
3799 abi_ulong child_tidptr;
3800 abi_ulong parent_tidptr;
3801 sigset_t sigmask;
3802 } new_thread_info;
3804 static void *clone_func(void *arg)
3806 new_thread_info *info = arg;
3807 CPUState *env;
3808 TaskState *ts;
3810 env = info->env;
3811 thread_env = env;
3812 ts = (TaskState *)thread_env->opaque;
3813 info->tid = gettid();
3814 env->host_tid = info->tid;
3815 task_settid(ts);
3816 if (info->child_tidptr)
3817 put_user_u32(info->tid, info->child_tidptr);
3818 if (info->parent_tidptr)
3819 put_user_u32(info->tid, info->parent_tidptr);
3820 /* Enable signals. */
3821 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3822 /* Signal to the parent that we're ready. */
3823 pthread_mutex_lock(&info->mutex);
3824 pthread_cond_broadcast(&info->cond);
3825 pthread_mutex_unlock(&info->mutex);
3826 /* Wait until the parent has finshed initializing the tls state. */
3827 pthread_mutex_lock(&clone_lock);
3828 pthread_mutex_unlock(&clone_lock);
3829 cpu_loop(env);
3830 /* never exits */
3831 return NULL;
3833 #else
3835 static int clone_func(void *arg)
3837 CPUState *env = arg;
3838 cpu_loop(env);
3839 /* never exits */
3840 return 0;
3842 #endif
3844 /* do_fork() Must return host values and target errnos (unlike most
3845 do_*() functions). */
3846 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3847 abi_ulong parent_tidptr, target_ulong newtls,
3848 abi_ulong child_tidptr)
3850 int ret;
3851 TaskState *ts;
3852 CPUState *new_env;
3853 #if defined(CONFIG_USE_NPTL)
3854 unsigned int nptl_flags;
3855 sigset_t sigmask;
3856 #else
3857 uint8_t *new_stack;
3858 #endif
3860 /* Emulate vfork() with fork() */
3861 if (flags & CLONE_VFORK)
3862 flags &= ~(CLONE_VFORK | CLONE_VM);
3864 if (flags & CLONE_VM) {
3865 TaskState *parent_ts = (TaskState *)env->opaque;
3866 #if defined(CONFIG_USE_NPTL)
3867 new_thread_info info;
3868 pthread_attr_t attr;
3869 #endif
3870 ts = qemu_mallocz(sizeof(TaskState));
3871 init_task_state(ts);
3872 /* we create a new CPU instance. */
3873 new_env = cpu_copy(env);
3874 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3875 cpu_reset(new_env);
3876 #endif
3877 /* Init regs that differ from the parent. */
3878 cpu_clone_regs(new_env, newsp);
3879 new_env->opaque = ts;
3880 ts->bprm = parent_ts->bprm;
3881 ts->info = parent_ts->info;
3882 #if defined(CONFIG_USE_NPTL)
3883 nptl_flags = flags;
3884 flags &= ~CLONE_NPTL_FLAGS2;
3886 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3887 ts->child_tidptr = child_tidptr;
3890 if (nptl_flags & CLONE_SETTLS)
3891 cpu_set_tls (new_env, newtls);
3893 /* Grab a mutex so that thread setup appears atomic. */
3894 pthread_mutex_lock(&clone_lock);
3896 memset(&info, 0, sizeof(info));
3897 pthread_mutex_init(&info.mutex, NULL);
3898 pthread_mutex_lock(&info.mutex);
3899 pthread_cond_init(&info.cond, NULL);
3900 info.env = new_env;
3901 if (nptl_flags & CLONE_CHILD_SETTID)
3902 info.child_tidptr = child_tidptr;
3903 if (nptl_flags & CLONE_PARENT_SETTID)
3904 info.parent_tidptr = parent_tidptr;
3906 ret = pthread_attr_init(&attr);
3907 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
3908 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3909 /* It is not safe to deliver signals until the child has finished
3910 initializing, so temporarily block all signals. */
3911 sigfillset(&sigmask);
3912 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3914 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3915 /* TODO: Free new CPU state if thread creation failed. */
3917 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3918 pthread_attr_destroy(&attr);
3919 if (ret == 0) {
3920 /* Wait for the child to initialize. */
3921 pthread_cond_wait(&info.cond, &info.mutex);
3922 ret = info.tid;
3923 if (flags & CLONE_PARENT_SETTID)
3924 put_user_u32(ret, parent_tidptr);
3925 } else {
3926 ret = -1;
3928 pthread_mutex_unlock(&info.mutex);
3929 pthread_cond_destroy(&info.cond);
3930 pthread_mutex_destroy(&info.mutex);
3931 pthread_mutex_unlock(&clone_lock);
3932 #else
3933 if (flags & CLONE_NPTL_FLAGS2)
3934 return -EINVAL;
3935 /* This is probably going to die very quickly, but do it anyway. */
3936 new_stack = qemu_mallocz (NEW_STACK_SIZE);
3937 #ifdef __ia64__
3938 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
3939 #else
3940 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3941 #endif
3942 #endif
3943 } else {
3944 /* if no CLONE_VM, we consider it is a fork */
3945 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3946 return -EINVAL;
3947 fork_start();
3948 ret = fork();
3949 if (ret == 0) {
3950 /* Child Process. */
3951 cpu_clone_regs(env, newsp);
3952 fork_end(1);
3953 #if defined(CONFIG_USE_NPTL)
3954 /* There is a race condition here. The parent process could
3955 theoretically read the TID in the child process before the child
3956 tid is set. This would require using either ptrace
3957 (not implemented) or having *_tidptr to point at a shared memory
3958 mapping. We can't repeat the spinlock hack used above because
3959 the child process gets its own copy of the lock. */
3960 if (flags & CLONE_CHILD_SETTID)
3961 put_user_u32(gettid(), child_tidptr);
3962 if (flags & CLONE_PARENT_SETTID)
3963 put_user_u32(gettid(), parent_tidptr);
3964 ts = (TaskState *)env->opaque;
3965 if (flags & CLONE_SETTLS)
3966 cpu_set_tls (env, newtls);
3967 if (flags & CLONE_CHILD_CLEARTID)
3968 ts->child_tidptr = child_tidptr;
3969 #endif
3970 } else {
3971 fork_end(0);
3974 return ret;
3977 /* warning : doesn't handle linux specific flags... */
3978 static int target_to_host_fcntl_cmd(int cmd)
3980 switch(cmd) {
3981 case TARGET_F_DUPFD:
3982 case TARGET_F_GETFD:
3983 case TARGET_F_SETFD:
3984 case TARGET_F_GETFL:
3985 case TARGET_F_SETFL:
3986 return cmd;
3987 case TARGET_F_GETLK:
3988 return F_GETLK;
3989 case TARGET_F_SETLK:
3990 return F_SETLK;
3991 case TARGET_F_SETLKW:
3992 return F_SETLKW;
3993 case TARGET_F_GETOWN:
3994 return F_GETOWN;
3995 case TARGET_F_SETOWN:
3996 return F_SETOWN;
3997 case TARGET_F_GETSIG:
3998 return F_GETSIG;
3999 case TARGET_F_SETSIG:
4000 return F_SETSIG;
4001 #if TARGET_ABI_BITS == 32
4002 case TARGET_F_GETLK64:
4003 return F_GETLK64;
4004 case TARGET_F_SETLK64:
4005 return F_SETLK64;
4006 case TARGET_F_SETLKW64:
4007 return F_SETLKW64;
4008 #endif
4009 case TARGET_F_SETLEASE:
4010 return F_SETLEASE;
4011 case TARGET_F_GETLEASE:
4012 return F_GETLEASE;
4013 #ifdef F_DUPFD_CLOEXEC
4014 case TARGET_F_DUPFD_CLOEXEC:
4015 return F_DUPFD_CLOEXEC;
4016 #endif
4017 case TARGET_F_NOTIFY:
4018 return F_NOTIFY;
4019 default:
4020 return -TARGET_EINVAL;
4022 return -TARGET_EINVAL;
4025 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4027 struct flock fl;
4028 struct target_flock *target_fl;
4029 struct flock64 fl64;
4030 struct target_flock64 *target_fl64;
4031 abi_long ret;
4032 int host_cmd = target_to_host_fcntl_cmd(cmd);
4034 if (host_cmd == -TARGET_EINVAL)
4035 return host_cmd;
4037 switch(cmd) {
4038 case TARGET_F_GETLK:
4039 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4040 return -TARGET_EFAULT;
4041 fl.l_type = tswap16(target_fl->l_type);
4042 fl.l_whence = tswap16(target_fl->l_whence);
4043 fl.l_start = tswapl(target_fl->l_start);
4044 fl.l_len = tswapl(target_fl->l_len);
4045 fl.l_pid = tswap32(target_fl->l_pid);
4046 unlock_user_struct(target_fl, arg, 0);
4047 ret = get_errno(fcntl(fd, host_cmd, &fl));
4048 if (ret == 0) {
4049 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4050 return -TARGET_EFAULT;
4051 target_fl->l_type = tswap16(fl.l_type);
4052 target_fl->l_whence = tswap16(fl.l_whence);
4053 target_fl->l_start = tswapl(fl.l_start);
4054 target_fl->l_len = tswapl(fl.l_len);
4055 target_fl->l_pid = tswap32(fl.l_pid);
4056 unlock_user_struct(target_fl, arg, 1);
4058 break;
4060 case TARGET_F_SETLK:
4061 case TARGET_F_SETLKW:
4062 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4063 return -TARGET_EFAULT;
4064 fl.l_type = tswap16(target_fl->l_type);
4065 fl.l_whence = tswap16(target_fl->l_whence);
4066 fl.l_start = tswapl(target_fl->l_start);
4067 fl.l_len = tswapl(target_fl->l_len);
4068 fl.l_pid = tswap32(target_fl->l_pid);
4069 unlock_user_struct(target_fl, arg, 0);
4070 ret = get_errno(fcntl(fd, host_cmd, &fl));
4071 break;
4073 case TARGET_F_GETLK64:
4074 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4075 return -TARGET_EFAULT;
4076 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4077 fl64.l_whence = tswap16(target_fl64->l_whence);
4078 fl64.l_start = tswapl(target_fl64->l_start);
4079 fl64.l_len = tswapl(target_fl64->l_len);
4080 fl64.l_pid = tswap32(target_fl64->l_pid);
4081 unlock_user_struct(target_fl64, arg, 0);
4082 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4083 if (ret == 0) {
4084 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4085 return -TARGET_EFAULT;
4086 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4087 target_fl64->l_whence = tswap16(fl64.l_whence);
4088 target_fl64->l_start = tswapl(fl64.l_start);
4089 target_fl64->l_len = tswapl(fl64.l_len);
4090 target_fl64->l_pid = tswap32(fl64.l_pid);
4091 unlock_user_struct(target_fl64, arg, 1);
4093 break;
4094 case TARGET_F_SETLK64:
4095 case TARGET_F_SETLKW64:
4096 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4097 return -TARGET_EFAULT;
4098 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4099 fl64.l_whence = tswap16(target_fl64->l_whence);
4100 fl64.l_start = tswapl(target_fl64->l_start);
4101 fl64.l_len = tswapl(target_fl64->l_len);
4102 fl64.l_pid = tswap32(target_fl64->l_pid);
4103 unlock_user_struct(target_fl64, arg, 0);
4104 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4105 break;
4107 case TARGET_F_GETFL:
4108 ret = get_errno(fcntl(fd, host_cmd, arg));
4109 if (ret >= 0) {
4110 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4112 break;
4114 case TARGET_F_SETFL:
4115 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4116 break;
4118 case TARGET_F_SETOWN:
4119 case TARGET_F_GETOWN:
4120 case TARGET_F_SETSIG:
4121 case TARGET_F_GETSIG:
4122 case TARGET_F_SETLEASE:
4123 case TARGET_F_GETLEASE:
4124 ret = get_errno(fcntl(fd, host_cmd, arg));
4125 break;
4127 default:
4128 ret = get_errno(fcntl(fd, cmd, arg));
4129 break;
4131 return ret;
4134 #ifdef USE_UID16
4136 static inline int high2lowuid(int uid)
4138 if (uid > 65535)
4139 return 65534;
4140 else
4141 return uid;
4144 static inline int high2lowgid(int gid)
4146 if (gid > 65535)
4147 return 65534;
4148 else
4149 return gid;
4152 static inline int low2highuid(int uid)
4154 if ((int16_t)uid == -1)
4155 return -1;
4156 else
4157 return uid;
4160 static inline int low2highgid(int gid)
4162 if ((int16_t)gid == -1)
4163 return -1;
4164 else
4165 return gid;
4167 static inline int tswapid(int id)
4169 return tswap16(id);
4171 #else /* !USE_UID16 */
4172 static inline int high2lowuid(int uid)
4174 return uid;
4176 static inline int high2lowgid(int gid)
4178 return gid;
4180 static inline int low2highuid(int uid)
4182 return uid;
4184 static inline int low2highgid(int gid)
4186 return gid;
4188 static inline int tswapid(int id)
4190 return tswap32(id);
4192 #endif /* USE_UID16 */
4194 void syscall_init(void)
4196 IOCTLEntry *ie;
4197 const argtype *arg_type;
4198 int size;
4199 int i;
4201 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4202 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4203 #include "syscall_types.h"
4204 #undef STRUCT
4205 #undef STRUCT_SPECIAL
4207 /* we patch the ioctl size if necessary. We rely on the fact that
4208 no ioctl has all the bits at '1' in the size field */
4209 ie = ioctl_entries;
4210 while (ie->target_cmd != 0) {
4211 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4212 TARGET_IOC_SIZEMASK) {
4213 arg_type = ie->arg_type;
4214 if (arg_type[0] != TYPE_PTR) {
4215 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4216 ie->target_cmd);
4217 exit(1);
4219 arg_type++;
4220 size = thunk_type_size(arg_type, 0);
4221 ie->target_cmd = (ie->target_cmd &
4222 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4223 (size << TARGET_IOC_SIZESHIFT);
4226 /* Build target_to_host_errno_table[] table from
4227 * host_to_target_errno_table[]. */
4228 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4229 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4231 /* automatic consistency check if same arch */
4232 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4233 (defined(__x86_64__) && defined(TARGET_X86_64))
4234 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4235 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4236 ie->name, ie->target_cmd, ie->host_cmd);
4238 #endif
4239 ie++;
4243 #if TARGET_ABI_BITS == 32
4244 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4246 #ifdef TARGET_WORDS_BIGENDIAN
4247 return ((uint64_t)word0 << 32) | word1;
4248 #else
4249 return ((uint64_t)word1 << 32) | word0;
4250 #endif
4252 #else /* TARGET_ABI_BITS == 32 */
4253 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4255 return word0;
4257 #endif /* TARGET_ABI_BITS != 32 */
4259 #ifdef TARGET_NR_truncate64
4260 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4261 abi_long arg2,
4262 abi_long arg3,
4263 abi_long arg4)
4265 #ifdef TARGET_ARM
4266 if (((CPUARMState *)cpu_env)->eabi)
4268 arg2 = arg3;
4269 arg3 = arg4;
4271 #endif
4272 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4274 #endif
4276 #ifdef TARGET_NR_ftruncate64
4277 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4278 abi_long arg2,
4279 abi_long arg3,
4280 abi_long arg4)
4282 #ifdef TARGET_ARM
4283 if (((CPUARMState *)cpu_env)->eabi)
4285 arg2 = arg3;
4286 arg3 = arg4;
4288 #endif
4289 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4291 #endif
4293 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4294 abi_ulong target_addr)
4296 struct target_timespec *target_ts;
4298 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4299 return -TARGET_EFAULT;
4300 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4301 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4302 unlock_user_struct(target_ts, target_addr, 0);
4303 return 0;
4306 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4307 struct timespec *host_ts)
4309 struct target_timespec *target_ts;
4311 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4312 return -TARGET_EFAULT;
4313 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4314 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4315 unlock_user_struct(target_ts, target_addr, 1);
4316 return 0;
4319 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4320 static inline abi_long host_to_target_stat64(void *cpu_env,
4321 abi_ulong target_addr,
4322 struct stat *host_st)
4324 #ifdef TARGET_ARM
4325 if (((CPUARMState *)cpu_env)->eabi) {
4326 struct target_eabi_stat64 *target_st;
4328 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4329 return -TARGET_EFAULT;
4330 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4331 __put_user(host_st->st_dev, &target_st->st_dev);
4332 __put_user(host_st->st_ino, &target_st->st_ino);
4333 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4334 __put_user(host_st->st_ino, &target_st->__st_ino);
4335 #endif
4336 __put_user(host_st->st_mode, &target_st->st_mode);
4337 __put_user(host_st->st_nlink, &target_st->st_nlink);
4338 __put_user(host_st->st_uid, &target_st->st_uid);
4339 __put_user(host_st->st_gid, &target_st->st_gid);
4340 __put_user(host_st->st_rdev, &target_st->st_rdev);
4341 __put_user(host_st->st_size, &target_st->st_size);
4342 __put_user(host_st->st_blksize, &target_st->st_blksize);
4343 __put_user(host_st->st_blocks, &target_st->st_blocks);
4344 __put_user(host_st->st_atime, &target_st->target_st_atime);
4345 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4346 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4347 unlock_user_struct(target_st, target_addr, 1);
4348 } else
4349 #endif
4351 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4352 struct target_stat *target_st;
4353 #else
4354 struct target_stat64 *target_st;
4355 #endif
4357 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4358 return -TARGET_EFAULT;
4359 memset(target_st, 0, sizeof(*target_st));
4360 __put_user(host_st->st_dev, &target_st->st_dev);
4361 __put_user(host_st->st_ino, &target_st->st_ino);
4362 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4363 __put_user(host_st->st_ino, &target_st->__st_ino);
4364 #endif
4365 __put_user(host_st->st_mode, &target_st->st_mode);
4366 __put_user(host_st->st_nlink, &target_st->st_nlink);
4367 __put_user(host_st->st_uid, &target_st->st_uid);
4368 __put_user(host_st->st_gid, &target_st->st_gid);
4369 __put_user(host_st->st_rdev, &target_st->st_rdev);
4370 /* XXX: better use of kernel struct */
4371 __put_user(host_st->st_size, &target_st->st_size);
4372 __put_user(host_st->st_blksize, &target_st->st_blksize);
4373 __put_user(host_st->st_blocks, &target_st->st_blocks);
4374 __put_user(host_st->st_atime, &target_st->target_st_atime);
4375 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4376 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4377 unlock_user_struct(target_st, target_addr, 1);
4380 return 0;
4382 #endif
4384 #if defined(CONFIG_USE_NPTL)
4385 /* ??? Using host futex calls even when target atomic operations
4386 are not really atomic probably breaks things. However implementing
4387 futexes locally would make futexes shared between multiple processes
4388 tricky. However they're probably useless because guest atomic
4389 operations won't work either. */
4390 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4391 target_ulong uaddr2, int val3)
4393 struct timespec ts, *pts;
4394 int base_op;
4396 /* ??? We assume FUTEX_* constants are the same on both host
4397 and target. */
4398 #ifdef FUTEX_CMD_MASK
4399 base_op = op & FUTEX_CMD_MASK;
4400 #else
4401 base_op = op;
4402 #endif
4403 switch (base_op) {
4404 case FUTEX_WAIT:
4405 if (timeout) {
4406 pts = &ts;
4407 target_to_host_timespec(pts, timeout);
4408 } else {
4409 pts = NULL;
4411 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4412 pts, NULL, 0));
4413 case FUTEX_WAKE:
4414 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4415 case FUTEX_FD:
4416 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4417 case FUTEX_REQUEUE:
4418 case FUTEX_CMP_REQUEUE:
4419 case FUTEX_WAKE_OP:
4420 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4421 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4422 But the prototype takes a `struct timespec *'; insert casts
4423 to satisfy the compiler. We do not need to tswap TIMEOUT
4424 since it's not compared to guest memory. */
4425 pts = (struct timespec *)(uintptr_t) timeout;
4426 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4427 g2h(uaddr2),
4428 (base_op == FUTEX_CMP_REQUEUE
4429 ? tswap32(val3)
4430 : val3)));
4431 default:
4432 return -TARGET_ENOSYS;
4435 #endif
4437 /* Map host to target signal numbers for the wait family of syscalls.
4438 Assume all other status bits are the same. */
4439 static int host_to_target_waitstatus(int status)
4441 if (WIFSIGNALED(status)) {
4442 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4444 if (WIFSTOPPED(status)) {
4445 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4446 | (status & 0xff);
4448 return status;
4451 int get_osversion(void)
4453 static int osversion;
4454 struct new_utsname buf;
4455 const char *s;
4456 int i, n, tmp;
4457 if (osversion)
4458 return osversion;
4459 if (qemu_uname_release && *qemu_uname_release) {
4460 s = qemu_uname_release;
4461 } else {
4462 if (sys_uname(&buf))
4463 return 0;
4464 s = buf.release;
4466 tmp = 0;
4467 for (i = 0; i < 3; i++) {
4468 n = 0;
4469 while (*s >= '0' && *s <= '9') {
4470 n *= 10;
4471 n += *s - '0';
4472 s++;
4474 tmp = (tmp << 8) + n;
4475 if (*s == '.')
4476 s++;
4478 osversion = tmp;
4479 return osversion;
4482 /* do_syscall() should always have a single exit point at the end so
4483 that actions, such as logging of syscall results, can be performed.
4484 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4485 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4486 abi_long arg2, abi_long arg3, abi_long arg4,
4487 abi_long arg5, abi_long arg6)
4489 abi_long ret;
4490 struct stat st;
4491 struct statfs stfs;
4492 void *p;
4494 #ifdef DEBUG
4495 gemu_log("syscall %d", num);
4496 #endif
4497 if(do_strace)
4498 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4500 switch(num) {
4501 case TARGET_NR_exit:
4502 #ifdef CONFIG_USE_NPTL
4503 /* In old applications this may be used to implement _exit(2).
4504 However in threaded applictions it is used for thread termination,
4505 and _exit_group is used for application termination.
4506 Do thread termination if we have more then one thread. */
4507 /* FIXME: This probably breaks if a signal arrives. We should probably
4508 be disabling signals. */
4509 if (first_cpu->next_cpu) {
4510 TaskState *ts;
4511 CPUState **lastp;
4512 CPUState *p;
4514 cpu_list_lock();
4515 lastp = &first_cpu;
4516 p = first_cpu;
4517 while (p && p != (CPUState *)cpu_env) {
4518 lastp = &p->next_cpu;
4519 p = p->next_cpu;
4521 /* If we didn't find the CPU for this thread then something is
4522 horribly wrong. */
4523 if (!p)
4524 abort();
4525 /* Remove the CPU from the list. */
4526 *lastp = p->next_cpu;
4527 cpu_list_unlock();
4528 ts = ((CPUState *)cpu_env)->opaque;
4529 if (ts->child_tidptr) {
4530 put_user_u32(0, ts->child_tidptr);
4531 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4532 NULL, NULL, 0);
4534 thread_env = NULL;
4535 qemu_free(cpu_env);
4536 qemu_free(ts);
4537 pthread_exit(NULL);
4539 #endif
4540 #ifdef TARGET_GPROF
4541 _mcleanup();
4542 #endif
4543 gdb_exit(cpu_env, arg1);
4544 _exit(arg1);
4545 ret = 0; /* avoid warning */
4546 break;
4547 case TARGET_NR_read:
4548 if (arg3 == 0)
4549 ret = 0;
4550 else {
4551 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4552 goto efault;
4553 ret = get_errno(read(arg1, p, arg3));
4554 unlock_user(p, arg2, ret);
4556 break;
4557 case TARGET_NR_write:
4558 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4559 goto efault;
4560 ret = get_errno(write(arg1, p, arg3));
4561 unlock_user(p, arg2, 0);
4562 break;
4563 case TARGET_NR_open:
4564 if (!(p = lock_user_string(arg1)))
4565 goto efault;
4566 ret = get_errno(open(path(p),
4567 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4568 arg3));
4569 unlock_user(p, arg1, 0);
4570 break;
4571 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4572 case TARGET_NR_openat:
4573 if (!(p = lock_user_string(arg2)))
4574 goto efault;
4575 ret = get_errno(sys_openat(arg1,
4576 path(p),
4577 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4578 arg4));
4579 unlock_user(p, arg2, 0);
4580 break;
4581 #endif
4582 case TARGET_NR_close:
4583 ret = get_errno(close(arg1));
4584 break;
4585 case TARGET_NR_brk:
4586 ret = do_brk(arg1);
4587 break;
4588 case TARGET_NR_fork:
4589 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4590 break;
4591 #ifdef TARGET_NR_waitpid
4592 case TARGET_NR_waitpid:
4594 int status;
4595 ret = get_errno(waitpid(arg1, &status, arg3));
4596 if (!is_error(ret) && arg2
4597 && put_user_s32(host_to_target_waitstatus(status), arg2))
4598 goto efault;
4600 break;
4601 #endif
4602 #ifdef TARGET_NR_waitid
4603 case TARGET_NR_waitid:
4605 siginfo_t info;
4606 info.si_pid = 0;
4607 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4608 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4609 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4610 goto efault;
4611 host_to_target_siginfo(p, &info);
4612 unlock_user(p, arg3, sizeof(target_siginfo_t));
4615 break;
4616 #endif
4617 #ifdef TARGET_NR_creat /* not on alpha */
4618 case TARGET_NR_creat:
4619 if (!(p = lock_user_string(arg1)))
4620 goto efault;
4621 ret = get_errno(creat(p, arg2));
4622 unlock_user(p, arg1, 0);
4623 break;
4624 #endif
4625 case TARGET_NR_link:
4627 void * p2;
4628 p = lock_user_string(arg1);
4629 p2 = lock_user_string(arg2);
4630 if (!p || !p2)
4631 ret = -TARGET_EFAULT;
4632 else
4633 ret = get_errno(link(p, p2));
4634 unlock_user(p2, arg2, 0);
4635 unlock_user(p, arg1, 0);
4637 break;
4638 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4639 case TARGET_NR_linkat:
4641 void * p2 = NULL;
4642 if (!arg2 || !arg4)
4643 goto efault;
4644 p = lock_user_string(arg2);
4645 p2 = lock_user_string(arg4);
4646 if (!p || !p2)
4647 ret = -TARGET_EFAULT;
4648 else
4649 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4650 unlock_user(p, arg2, 0);
4651 unlock_user(p2, arg4, 0);
4653 break;
4654 #endif
4655 case TARGET_NR_unlink:
4656 if (!(p = lock_user_string(arg1)))
4657 goto efault;
4658 ret = get_errno(unlink(p));
4659 unlock_user(p, arg1, 0);
4660 break;
4661 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4662 case TARGET_NR_unlinkat:
4663 if (!(p = lock_user_string(arg2)))
4664 goto efault;
4665 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4666 unlock_user(p, arg2, 0);
4667 break;
4668 #endif
4669 case TARGET_NR_execve:
4671 char **argp, **envp;
4672 int argc, envc;
4673 abi_ulong gp;
4674 abi_ulong guest_argp;
4675 abi_ulong guest_envp;
4676 abi_ulong addr;
4677 char **q;
4679 argc = 0;
4680 guest_argp = arg2;
4681 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4682 if (get_user_ual(addr, gp))
4683 goto efault;
4684 if (!addr)
4685 break;
4686 argc++;
4688 envc = 0;
4689 guest_envp = arg3;
4690 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4691 if (get_user_ual(addr, gp))
4692 goto efault;
4693 if (!addr)
4694 break;
4695 envc++;
4698 argp = alloca((argc + 1) * sizeof(void *));
4699 envp = alloca((envc + 1) * sizeof(void *));
4701 for (gp = guest_argp, q = argp; gp;
4702 gp += sizeof(abi_ulong), q++) {
4703 if (get_user_ual(addr, gp))
4704 goto execve_efault;
4705 if (!addr)
4706 break;
4707 if (!(*q = lock_user_string(addr)))
4708 goto execve_efault;
4710 *q = NULL;
4712 for (gp = guest_envp, q = envp; gp;
4713 gp += sizeof(abi_ulong), q++) {
4714 if (get_user_ual(addr, gp))
4715 goto execve_efault;
4716 if (!addr)
4717 break;
4718 if (!(*q = lock_user_string(addr)))
4719 goto execve_efault;
4721 *q = NULL;
4723 if (!(p = lock_user_string(arg1)))
4724 goto execve_efault;
4725 ret = get_errno(execve(p, argp, envp));
4726 unlock_user(p, arg1, 0);
4728 goto execve_end;
4730 execve_efault:
4731 ret = -TARGET_EFAULT;
4733 execve_end:
4734 for (gp = guest_argp, q = argp; *q;
4735 gp += sizeof(abi_ulong), q++) {
4736 if (get_user_ual(addr, gp)
4737 || !addr)
4738 break;
4739 unlock_user(*q, addr, 0);
4741 for (gp = guest_envp, q = envp; *q;
4742 gp += sizeof(abi_ulong), q++) {
4743 if (get_user_ual(addr, gp)
4744 || !addr)
4745 break;
4746 unlock_user(*q, addr, 0);
4749 break;
4750 case TARGET_NR_chdir:
4751 if (!(p = lock_user_string(arg1)))
4752 goto efault;
4753 ret = get_errno(chdir(p));
4754 unlock_user(p, arg1, 0);
4755 break;
4756 #ifdef TARGET_NR_time
4757 case TARGET_NR_time:
4759 time_t host_time;
4760 ret = get_errno(time(&host_time));
4761 if (!is_error(ret)
4762 && arg1
4763 && put_user_sal(host_time, arg1))
4764 goto efault;
4766 break;
4767 #endif
4768 case TARGET_NR_mknod:
4769 if (!(p = lock_user_string(arg1)))
4770 goto efault;
4771 ret = get_errno(mknod(p, arg2, arg3));
4772 unlock_user(p, arg1, 0);
4773 break;
4774 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4775 case TARGET_NR_mknodat:
4776 if (!(p = lock_user_string(arg2)))
4777 goto efault;
4778 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4779 unlock_user(p, arg2, 0);
4780 break;
4781 #endif
4782 case TARGET_NR_chmod:
4783 if (!(p = lock_user_string(arg1)))
4784 goto efault;
4785 ret = get_errno(chmod(p, arg2));
4786 unlock_user(p, arg1, 0);
4787 break;
4788 #ifdef TARGET_NR_break
4789 case TARGET_NR_break:
4790 goto unimplemented;
4791 #endif
4792 #ifdef TARGET_NR_oldstat
4793 case TARGET_NR_oldstat:
4794 goto unimplemented;
4795 #endif
4796 case TARGET_NR_lseek:
4797 ret = get_errno(lseek(arg1, arg2, arg3));
4798 break;
4799 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4800 /* Alpha specific */
4801 case TARGET_NR_getxpid:
4802 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4803 ret = get_errno(getpid());
4804 break;
4805 #endif
4806 #ifdef TARGET_NR_getpid
4807 case TARGET_NR_getpid:
4808 ret = get_errno(getpid());
4809 break;
4810 #endif
4811 case TARGET_NR_mount:
4813 /* need to look at the data field */
4814 void *p2, *p3;
4815 p = lock_user_string(arg1);
4816 p2 = lock_user_string(arg2);
4817 p3 = lock_user_string(arg3);
4818 if (!p || !p2 || !p3)
4819 ret = -TARGET_EFAULT;
4820 else {
4821 /* FIXME - arg5 should be locked, but it isn't clear how to
4822 * do that since it's not guaranteed to be a NULL-terminated
4823 * string.
4825 if ( ! arg5 )
4826 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4827 else
4828 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4830 unlock_user(p, arg1, 0);
4831 unlock_user(p2, arg2, 0);
4832 unlock_user(p3, arg3, 0);
4833 break;
4835 #ifdef TARGET_NR_umount
4836 case TARGET_NR_umount:
4837 if (!(p = lock_user_string(arg1)))
4838 goto efault;
4839 ret = get_errno(umount(p));
4840 unlock_user(p, arg1, 0);
4841 break;
4842 #endif
4843 #ifdef TARGET_NR_stime /* not on alpha */
4844 case TARGET_NR_stime:
4846 time_t host_time;
4847 if (get_user_sal(host_time, arg1))
4848 goto efault;
4849 ret = get_errno(stime(&host_time));
4851 break;
4852 #endif
4853 case TARGET_NR_ptrace:
4854 goto unimplemented;
4855 #ifdef TARGET_NR_alarm /* not on alpha */
4856 case TARGET_NR_alarm:
4857 ret = alarm(arg1);
4858 break;
4859 #endif
4860 #ifdef TARGET_NR_oldfstat
4861 case TARGET_NR_oldfstat:
4862 goto unimplemented;
4863 #endif
4864 #ifdef TARGET_NR_pause /* not on alpha */
4865 case TARGET_NR_pause:
4866 ret = get_errno(pause());
4867 break;
4868 #endif
4869 #ifdef TARGET_NR_utime
4870 case TARGET_NR_utime:
4872 struct utimbuf tbuf, *host_tbuf;
4873 struct target_utimbuf *target_tbuf;
4874 if (arg2) {
4875 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4876 goto efault;
4877 tbuf.actime = tswapl(target_tbuf->actime);
4878 tbuf.modtime = tswapl(target_tbuf->modtime);
4879 unlock_user_struct(target_tbuf, arg2, 0);
4880 host_tbuf = &tbuf;
4881 } else {
4882 host_tbuf = NULL;
4884 if (!(p = lock_user_string(arg1)))
4885 goto efault;
4886 ret = get_errno(utime(p, host_tbuf));
4887 unlock_user(p, arg1, 0);
4889 break;
4890 #endif
4891 case TARGET_NR_utimes:
4893 struct timeval *tvp, tv[2];
4894 if (arg2) {
4895 if (copy_from_user_timeval(&tv[0], arg2)
4896 || copy_from_user_timeval(&tv[1],
4897 arg2 + sizeof(struct target_timeval)))
4898 goto efault;
4899 tvp = tv;
4900 } else {
4901 tvp = NULL;
4903 if (!(p = lock_user_string(arg1)))
4904 goto efault;
4905 ret = get_errno(utimes(p, tvp));
4906 unlock_user(p, arg1, 0);
4908 break;
4909 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4910 case TARGET_NR_futimesat:
4912 struct timeval *tvp, tv[2];
4913 if (arg3) {
4914 if (copy_from_user_timeval(&tv[0], arg3)
4915 || copy_from_user_timeval(&tv[1],
4916 arg3 + sizeof(struct target_timeval)))
4917 goto efault;
4918 tvp = tv;
4919 } else {
4920 tvp = NULL;
4922 if (!(p = lock_user_string(arg2)))
4923 goto efault;
4924 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4925 unlock_user(p, arg2, 0);
4927 break;
4928 #endif
4929 #ifdef TARGET_NR_stty
4930 case TARGET_NR_stty:
4931 goto unimplemented;
4932 #endif
4933 #ifdef TARGET_NR_gtty
4934 case TARGET_NR_gtty:
4935 goto unimplemented;
4936 #endif
4937 case TARGET_NR_access:
4938 if (!(p = lock_user_string(arg1)))
4939 goto efault;
4940 ret = get_errno(access(path(p), arg2));
4941 unlock_user(p, arg1, 0);
4942 break;
4943 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4944 case TARGET_NR_faccessat:
4945 if (!(p = lock_user_string(arg2)))
4946 goto efault;
4947 ret = get_errno(sys_faccessat(arg1, p, arg3));
4948 unlock_user(p, arg2, 0);
4949 break;
4950 #endif
4951 #ifdef TARGET_NR_nice /* not on alpha */
4952 case TARGET_NR_nice:
4953 ret = get_errno(nice(arg1));
4954 break;
4955 #endif
4956 #ifdef TARGET_NR_ftime
4957 case TARGET_NR_ftime:
4958 goto unimplemented;
4959 #endif
4960 case TARGET_NR_sync:
4961 sync();
4962 ret = 0;
4963 break;
4964 case TARGET_NR_kill:
4965 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4966 break;
4967 case TARGET_NR_rename:
4969 void *p2;
4970 p = lock_user_string(arg1);
4971 p2 = lock_user_string(arg2);
4972 if (!p || !p2)
4973 ret = -TARGET_EFAULT;
4974 else
4975 ret = get_errno(rename(p, p2));
4976 unlock_user(p2, arg2, 0);
4977 unlock_user(p, arg1, 0);
4979 break;
4980 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4981 case TARGET_NR_renameat:
4983 void *p2;
4984 p = lock_user_string(arg2);
4985 p2 = lock_user_string(arg4);
4986 if (!p || !p2)
4987 ret = -TARGET_EFAULT;
4988 else
4989 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4990 unlock_user(p2, arg4, 0);
4991 unlock_user(p, arg2, 0);
4993 break;
4994 #endif
4995 case TARGET_NR_mkdir:
4996 if (!(p = lock_user_string(arg1)))
4997 goto efault;
4998 ret = get_errno(mkdir(p, arg2));
4999 unlock_user(p, arg1, 0);
5000 break;
5001 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5002 case TARGET_NR_mkdirat:
5003 if (!(p = lock_user_string(arg2)))
5004 goto efault;
5005 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5006 unlock_user(p, arg2, 0);
5007 break;
5008 #endif
5009 case TARGET_NR_rmdir:
5010 if (!(p = lock_user_string(arg1)))
5011 goto efault;
5012 ret = get_errno(rmdir(p));
5013 unlock_user(p, arg1, 0);
5014 break;
5015 case TARGET_NR_dup:
5016 ret = get_errno(dup(arg1));
5017 break;
5018 case TARGET_NR_pipe:
5019 ret = do_pipe(cpu_env, arg1, 0, 0);
5020 break;
5021 #ifdef TARGET_NR_pipe2
5022 case TARGET_NR_pipe2:
5023 ret = do_pipe(cpu_env, arg1, arg2, 1);
5024 break;
5025 #endif
5026 case TARGET_NR_times:
5028 struct target_tms *tmsp;
5029 struct tms tms;
5030 ret = get_errno(times(&tms));
5031 if (arg1) {
5032 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5033 if (!tmsp)
5034 goto efault;
5035 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5036 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5037 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5038 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5040 if (!is_error(ret))
5041 ret = host_to_target_clock_t(ret);
5043 break;
5044 #ifdef TARGET_NR_prof
5045 case TARGET_NR_prof:
5046 goto unimplemented;
5047 #endif
5048 #ifdef TARGET_NR_signal
5049 case TARGET_NR_signal:
5050 goto unimplemented;
5051 #endif
5052 case TARGET_NR_acct:
5053 if (arg1 == 0) {
5054 ret = get_errno(acct(NULL));
5055 } else {
5056 if (!(p = lock_user_string(arg1)))
5057 goto efault;
5058 ret = get_errno(acct(path(p)));
5059 unlock_user(p, arg1, 0);
5061 break;
5062 #ifdef TARGET_NR_umount2 /* not on alpha */
5063 case TARGET_NR_umount2:
5064 if (!(p = lock_user_string(arg1)))
5065 goto efault;
5066 ret = get_errno(umount2(p, arg2));
5067 unlock_user(p, arg1, 0);
5068 break;
5069 #endif
5070 #ifdef TARGET_NR_lock
5071 case TARGET_NR_lock:
5072 goto unimplemented;
5073 #endif
5074 case TARGET_NR_ioctl:
5075 ret = do_ioctl(arg1, arg2, arg3);
5076 break;
5077 case TARGET_NR_fcntl:
5078 ret = do_fcntl(arg1, arg2, arg3);
5079 break;
5080 #ifdef TARGET_NR_mpx
5081 case TARGET_NR_mpx:
5082 goto unimplemented;
5083 #endif
5084 case TARGET_NR_setpgid:
5085 ret = get_errno(setpgid(arg1, arg2));
5086 break;
5087 #ifdef TARGET_NR_ulimit
5088 case TARGET_NR_ulimit:
5089 goto unimplemented;
5090 #endif
5091 #ifdef TARGET_NR_oldolduname
5092 case TARGET_NR_oldolduname:
5093 goto unimplemented;
5094 #endif
5095 case TARGET_NR_umask:
5096 ret = get_errno(umask(arg1));
5097 break;
5098 case TARGET_NR_chroot:
5099 if (!(p = lock_user_string(arg1)))
5100 goto efault;
5101 ret = get_errno(chroot(p));
5102 unlock_user(p, arg1, 0);
5103 break;
5104 case TARGET_NR_ustat:
5105 goto unimplemented;
5106 case TARGET_NR_dup2:
5107 ret = get_errno(dup2(arg1, arg2));
5108 break;
5109 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5110 case TARGET_NR_dup3:
5111 ret = get_errno(dup3(arg1, arg2, arg3));
5112 break;
5113 #endif
5114 #ifdef TARGET_NR_getppid /* not on alpha */
5115 case TARGET_NR_getppid:
5116 ret = get_errno(getppid());
5117 break;
5118 #endif
5119 case TARGET_NR_getpgrp:
5120 ret = get_errno(getpgrp());
5121 break;
5122 case TARGET_NR_setsid:
5123 ret = get_errno(setsid());
5124 break;
5125 #ifdef TARGET_NR_sigaction
5126 case TARGET_NR_sigaction:
5128 #if defined(TARGET_ALPHA)
5129 struct target_sigaction act, oact, *pact = 0;
5130 struct target_old_sigaction *old_act;
5131 if (arg2) {
5132 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5133 goto efault;
5134 act._sa_handler = old_act->_sa_handler;
5135 target_siginitset(&act.sa_mask, old_act->sa_mask);
5136 act.sa_flags = old_act->sa_flags;
5137 act.sa_restorer = 0;
5138 unlock_user_struct(old_act, arg2, 0);
5139 pact = &act;
5141 ret = get_errno(do_sigaction(arg1, pact, &oact));
5142 if (!is_error(ret) && arg3) {
5143 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5144 goto efault;
5145 old_act->_sa_handler = oact._sa_handler;
5146 old_act->sa_mask = oact.sa_mask.sig[0];
5147 old_act->sa_flags = oact.sa_flags;
5148 unlock_user_struct(old_act, arg3, 1);
5150 #elif defined(TARGET_MIPS)
5151 struct target_sigaction act, oact, *pact, *old_act;
5153 if (arg2) {
5154 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5155 goto efault;
5156 act._sa_handler = old_act->_sa_handler;
5157 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5158 act.sa_flags = old_act->sa_flags;
5159 unlock_user_struct(old_act, arg2, 0);
5160 pact = &act;
5161 } else {
5162 pact = NULL;
5165 ret = get_errno(do_sigaction(arg1, pact, &oact));
5167 if (!is_error(ret) && arg3) {
5168 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5169 goto efault;
5170 old_act->_sa_handler = oact._sa_handler;
5171 old_act->sa_flags = oact.sa_flags;
5172 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5173 old_act->sa_mask.sig[1] = 0;
5174 old_act->sa_mask.sig[2] = 0;
5175 old_act->sa_mask.sig[3] = 0;
5176 unlock_user_struct(old_act, arg3, 1);
5178 #else
5179 struct target_old_sigaction *old_act;
5180 struct target_sigaction act, oact, *pact;
5181 if (arg2) {
5182 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5183 goto efault;
5184 act._sa_handler = old_act->_sa_handler;
5185 target_siginitset(&act.sa_mask, old_act->sa_mask);
5186 act.sa_flags = old_act->sa_flags;
5187 act.sa_restorer = old_act->sa_restorer;
5188 unlock_user_struct(old_act, arg2, 0);
5189 pact = &act;
5190 } else {
5191 pact = NULL;
5193 ret = get_errno(do_sigaction(arg1, pact, &oact));
5194 if (!is_error(ret) && arg3) {
5195 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5196 goto efault;
5197 old_act->_sa_handler = oact._sa_handler;
5198 old_act->sa_mask = oact.sa_mask.sig[0];
5199 old_act->sa_flags = oact.sa_flags;
5200 old_act->sa_restorer = oact.sa_restorer;
5201 unlock_user_struct(old_act, arg3, 1);
5203 #endif
5205 break;
5206 #endif
5207 case TARGET_NR_rt_sigaction:
5209 #if defined(TARGET_ALPHA)
5210 struct target_sigaction act, oact, *pact = 0;
5211 struct target_rt_sigaction *rt_act;
5212 /* ??? arg4 == sizeof(sigset_t). */
5213 if (arg2) {
5214 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5215 goto efault;
5216 act._sa_handler = rt_act->_sa_handler;
5217 act.sa_mask = rt_act->sa_mask;
5218 act.sa_flags = rt_act->sa_flags;
5219 act.sa_restorer = arg5;
5220 unlock_user_struct(rt_act, arg2, 0);
5221 pact = &act;
5223 ret = get_errno(do_sigaction(arg1, pact, &oact));
5224 if (!is_error(ret) && arg3) {
5225 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5226 goto efault;
5227 rt_act->_sa_handler = oact._sa_handler;
5228 rt_act->sa_mask = oact.sa_mask;
5229 rt_act->sa_flags = oact.sa_flags;
5230 unlock_user_struct(rt_act, arg3, 1);
5232 #else
5233 struct target_sigaction *act;
5234 struct target_sigaction *oact;
5236 if (arg2) {
5237 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5238 goto efault;
5239 } else
5240 act = NULL;
5241 if (arg3) {
5242 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5243 ret = -TARGET_EFAULT;
5244 goto rt_sigaction_fail;
5246 } else
5247 oact = NULL;
5248 ret = get_errno(do_sigaction(arg1, act, oact));
5249 rt_sigaction_fail:
5250 if (act)
5251 unlock_user_struct(act, arg2, 0);
5252 if (oact)
5253 unlock_user_struct(oact, arg3, 1);
5254 #endif
5256 break;
5257 #ifdef TARGET_NR_sgetmask /* not on alpha */
5258 case TARGET_NR_sgetmask:
5260 sigset_t cur_set;
5261 abi_ulong target_set;
5262 sigprocmask(0, NULL, &cur_set);
5263 host_to_target_old_sigset(&target_set, &cur_set);
5264 ret = target_set;
5266 break;
5267 #endif
5268 #ifdef TARGET_NR_ssetmask /* not on alpha */
5269 case TARGET_NR_ssetmask:
5271 sigset_t set, oset, cur_set;
5272 abi_ulong target_set = arg1;
5273 sigprocmask(0, NULL, &cur_set);
5274 target_to_host_old_sigset(&set, &target_set);
5275 sigorset(&set, &set, &cur_set);
5276 sigprocmask(SIG_SETMASK, &set, &oset);
5277 host_to_target_old_sigset(&target_set, &oset);
5278 ret = target_set;
5280 break;
5281 #endif
5282 #ifdef TARGET_NR_sigprocmask
5283 case TARGET_NR_sigprocmask:
5285 #if defined(TARGET_ALPHA)
5286 sigset_t set, oldset;
5287 abi_ulong mask;
5288 int how;
5290 switch (arg1) {
5291 case TARGET_SIG_BLOCK:
5292 how = SIG_BLOCK;
5293 break;
5294 case TARGET_SIG_UNBLOCK:
5295 how = SIG_UNBLOCK;
5296 break;
5297 case TARGET_SIG_SETMASK:
5298 how = SIG_SETMASK;
5299 break;
5300 default:
5301 ret = -TARGET_EINVAL;
5302 goto fail;
5304 mask = arg2;
5305 target_to_host_old_sigset(&set, &mask);
5307 ret = get_errno(sigprocmask(how, &set, &oldset));
5309 if (!is_error(ret)) {
5310 host_to_target_old_sigset(&mask, &oldset);
5311 ret = mask;
5312 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5314 #else
5315 sigset_t set, oldset, *set_ptr;
5316 int how;
5318 if (arg2) {
5319 switch (arg1) {
5320 case TARGET_SIG_BLOCK:
5321 how = SIG_BLOCK;
5322 break;
5323 case TARGET_SIG_UNBLOCK:
5324 how = SIG_UNBLOCK;
5325 break;
5326 case TARGET_SIG_SETMASK:
5327 how = SIG_SETMASK;
5328 break;
5329 default:
5330 ret = -TARGET_EINVAL;
5331 goto fail;
5333 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5334 goto efault;
5335 target_to_host_old_sigset(&set, p);
5336 unlock_user(p, arg2, 0);
5337 set_ptr = &set;
5338 } else {
5339 how = 0;
5340 set_ptr = NULL;
5342 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5343 if (!is_error(ret) && arg3) {
5344 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5345 goto efault;
5346 host_to_target_old_sigset(p, &oldset);
5347 unlock_user(p, arg3, sizeof(target_sigset_t));
5349 #endif
5351 break;
5352 #endif
5353 case TARGET_NR_rt_sigprocmask:
5355 int how = arg1;
5356 sigset_t set, oldset, *set_ptr;
5358 if (arg2) {
5359 switch(how) {
5360 case TARGET_SIG_BLOCK:
5361 how = SIG_BLOCK;
5362 break;
5363 case TARGET_SIG_UNBLOCK:
5364 how = SIG_UNBLOCK;
5365 break;
5366 case TARGET_SIG_SETMASK:
5367 how = SIG_SETMASK;
5368 break;
5369 default:
5370 ret = -TARGET_EINVAL;
5371 goto fail;
5373 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5374 goto efault;
5375 target_to_host_sigset(&set, p);
5376 unlock_user(p, arg2, 0);
5377 set_ptr = &set;
5378 } else {
5379 how = 0;
5380 set_ptr = NULL;
5382 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5383 if (!is_error(ret) && arg3) {
5384 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5385 goto efault;
5386 host_to_target_sigset(p, &oldset);
5387 unlock_user(p, arg3, sizeof(target_sigset_t));
5390 break;
5391 #ifdef TARGET_NR_sigpending
5392 case TARGET_NR_sigpending:
5394 sigset_t set;
5395 ret = get_errno(sigpending(&set));
5396 if (!is_error(ret)) {
5397 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5398 goto efault;
5399 host_to_target_old_sigset(p, &set);
5400 unlock_user(p, arg1, sizeof(target_sigset_t));
5403 break;
5404 #endif
5405 case TARGET_NR_rt_sigpending:
5407 sigset_t set;
5408 ret = get_errno(sigpending(&set));
5409 if (!is_error(ret)) {
5410 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5411 goto efault;
5412 host_to_target_sigset(p, &set);
5413 unlock_user(p, arg1, sizeof(target_sigset_t));
5416 break;
5417 #ifdef TARGET_NR_sigsuspend
5418 case TARGET_NR_sigsuspend:
5420 sigset_t set;
5421 #if defined(TARGET_ALPHA)
5422 abi_ulong mask = arg1;
5423 target_to_host_old_sigset(&set, &mask);
5424 #else
5425 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5426 goto efault;
5427 target_to_host_old_sigset(&set, p);
5428 unlock_user(p, arg1, 0);
5429 #endif
5430 ret = get_errno(sigsuspend(&set));
5432 break;
5433 #endif
5434 case TARGET_NR_rt_sigsuspend:
5436 sigset_t set;
5437 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5438 goto efault;
5439 target_to_host_sigset(&set, p);
5440 unlock_user(p, arg1, 0);
5441 ret = get_errno(sigsuspend(&set));
5443 break;
5444 case TARGET_NR_rt_sigtimedwait:
5446 sigset_t set;
5447 struct timespec uts, *puts;
5448 siginfo_t uinfo;
5450 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5451 goto efault;
5452 target_to_host_sigset(&set, p);
5453 unlock_user(p, arg1, 0);
5454 if (arg3) {
5455 puts = &uts;
5456 target_to_host_timespec(puts, arg3);
5457 } else {
5458 puts = NULL;
5460 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5461 if (!is_error(ret) && arg2) {
5462 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5463 goto efault;
5464 host_to_target_siginfo(p, &uinfo);
5465 unlock_user(p, arg2, sizeof(target_siginfo_t));
5468 break;
5469 case TARGET_NR_rt_sigqueueinfo:
5471 siginfo_t uinfo;
5472 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5473 goto efault;
5474 target_to_host_siginfo(&uinfo, p);
5475 unlock_user(p, arg1, 0);
5476 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5478 break;
5479 #ifdef TARGET_NR_sigreturn
5480 case TARGET_NR_sigreturn:
5481 /* NOTE: ret is eax, so not transcoding must be done */
5482 ret = do_sigreturn(cpu_env);
5483 break;
5484 #endif
5485 case TARGET_NR_rt_sigreturn:
5486 /* NOTE: ret is eax, so not transcoding must be done */
5487 ret = do_rt_sigreturn(cpu_env);
5488 break;
5489 case TARGET_NR_sethostname:
5490 if (!(p = lock_user_string(arg1)))
5491 goto efault;
5492 ret = get_errno(sethostname(p, arg2));
5493 unlock_user(p, arg1, 0);
5494 break;
5495 case TARGET_NR_setrlimit:
5497 int resource = arg1;
5498 struct target_rlimit *target_rlim;
5499 struct rlimit rlim;
5500 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5501 goto efault;
5502 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5503 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5504 unlock_user_struct(target_rlim, arg2, 0);
5505 ret = get_errno(setrlimit(resource, &rlim));
5507 break;
5508 case TARGET_NR_getrlimit:
5510 int resource = arg1;
5511 struct target_rlimit *target_rlim;
5512 struct rlimit rlim;
5514 ret = get_errno(getrlimit(resource, &rlim));
5515 if (!is_error(ret)) {
5516 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5517 goto efault;
5518 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5519 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5520 unlock_user_struct(target_rlim, arg2, 1);
5523 break;
5524 case TARGET_NR_getrusage:
5526 struct rusage rusage;
5527 ret = get_errno(getrusage(arg1, &rusage));
5528 if (!is_error(ret)) {
5529 host_to_target_rusage(arg2, &rusage);
5532 break;
5533 case TARGET_NR_gettimeofday:
5535 struct timeval tv;
5536 ret = get_errno(gettimeofday(&tv, NULL));
5537 if (!is_error(ret)) {
5538 if (copy_to_user_timeval(arg1, &tv))
5539 goto efault;
5542 break;
5543 case TARGET_NR_settimeofday:
5545 struct timeval tv;
5546 if (copy_from_user_timeval(&tv, arg1))
5547 goto efault;
5548 ret = get_errno(settimeofday(&tv, NULL));
5550 break;
5551 #ifdef TARGET_NR_select
5552 case TARGET_NR_select:
5554 struct target_sel_arg_struct *sel;
5555 abi_ulong inp, outp, exp, tvp;
5556 long nsel;
5558 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5559 goto efault;
5560 nsel = tswapl(sel->n);
5561 inp = tswapl(sel->inp);
5562 outp = tswapl(sel->outp);
5563 exp = tswapl(sel->exp);
5564 tvp = tswapl(sel->tvp);
5565 unlock_user_struct(sel, arg1, 0);
5566 ret = do_select(nsel, inp, outp, exp, tvp);
5568 break;
5569 #endif
5570 #ifdef TARGET_NR_pselect6
5571 case TARGET_NR_pselect6:
5572 goto unimplemented_nowarn;
5573 #endif
5574 case TARGET_NR_symlink:
5576 void *p2;
5577 p = lock_user_string(arg1);
5578 p2 = lock_user_string(arg2);
5579 if (!p || !p2)
5580 ret = -TARGET_EFAULT;
5581 else
5582 ret = get_errno(symlink(p, p2));
5583 unlock_user(p2, arg2, 0);
5584 unlock_user(p, arg1, 0);
5586 break;
5587 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5588 case TARGET_NR_symlinkat:
5590 void *p2;
5591 p = lock_user_string(arg1);
5592 p2 = lock_user_string(arg3);
5593 if (!p || !p2)
5594 ret = -TARGET_EFAULT;
5595 else
5596 ret = get_errno(sys_symlinkat(p, arg2, p2));
5597 unlock_user(p2, arg3, 0);
5598 unlock_user(p, arg1, 0);
5600 break;
5601 #endif
5602 #ifdef TARGET_NR_oldlstat
5603 case TARGET_NR_oldlstat:
5604 goto unimplemented;
5605 #endif
5606 case TARGET_NR_readlink:
5608 void *p2, *temp;
5609 p = lock_user_string(arg1);
5610 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5611 if (!p || !p2)
5612 ret = -TARGET_EFAULT;
5613 else {
5614 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5615 char real[PATH_MAX];
5616 temp = realpath(exec_path,real);
5617 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5618 snprintf((char *)p2, arg3, "%s", real);
5620 else
5621 ret = get_errno(readlink(path(p), p2, arg3));
5623 unlock_user(p2, arg2, ret);
5624 unlock_user(p, arg1, 0);
5626 break;
5627 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5628 case TARGET_NR_readlinkat:
5630 void *p2;
5631 p = lock_user_string(arg2);
5632 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5633 if (!p || !p2)
5634 ret = -TARGET_EFAULT;
5635 else
5636 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5637 unlock_user(p2, arg3, ret);
5638 unlock_user(p, arg2, 0);
5640 break;
5641 #endif
5642 #ifdef TARGET_NR_uselib
5643 case TARGET_NR_uselib:
5644 goto unimplemented;
5645 #endif
5646 #ifdef TARGET_NR_swapon
5647 case TARGET_NR_swapon:
5648 if (!(p = lock_user_string(arg1)))
5649 goto efault;
5650 ret = get_errno(swapon(p, arg2));
5651 unlock_user(p, arg1, 0);
5652 break;
5653 #endif
5654 case TARGET_NR_reboot:
5655 goto unimplemented;
5656 #ifdef TARGET_NR_readdir
5657 case TARGET_NR_readdir:
5658 goto unimplemented;
5659 #endif
5660 #ifdef TARGET_NR_mmap
5661 case TARGET_NR_mmap:
5662 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5664 abi_ulong *v;
5665 abi_ulong v1, v2, v3, v4, v5, v6;
5666 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5667 goto efault;
5668 v1 = tswapl(v[0]);
5669 v2 = tswapl(v[1]);
5670 v3 = tswapl(v[2]);
5671 v4 = tswapl(v[3]);
5672 v5 = tswapl(v[4]);
5673 v6 = tswapl(v[5]);
5674 unlock_user(v, arg1, 0);
5675 ret = get_errno(target_mmap(v1, v2, v3,
5676 target_to_host_bitmask(v4, mmap_flags_tbl),
5677 v5, v6));
5679 #else
5680 ret = get_errno(target_mmap(arg1, arg2, arg3,
5681 target_to_host_bitmask(arg4, mmap_flags_tbl),
5682 arg5,
5683 arg6));
5684 #endif
5685 break;
5686 #endif
5687 #ifdef TARGET_NR_mmap2
5688 case TARGET_NR_mmap2:
5689 #ifndef MMAP_SHIFT
5690 #define MMAP_SHIFT 12
5691 #endif
5692 ret = get_errno(target_mmap(arg1, arg2, arg3,
5693 target_to_host_bitmask(arg4, mmap_flags_tbl),
5694 arg5,
5695 arg6 << MMAP_SHIFT));
5696 break;
5697 #endif
5698 case TARGET_NR_munmap:
5699 ret = get_errno(target_munmap(arg1, arg2));
5700 break;
5701 case TARGET_NR_mprotect:
5703 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5704 /* Special hack to detect libc making the stack executable. */
5705 if ((arg3 & PROT_GROWSDOWN)
5706 && arg1 >= ts->info->stack_limit
5707 && arg1 <= ts->info->start_stack) {
5708 arg3 &= ~PROT_GROWSDOWN;
5709 arg2 = arg2 + arg1 - ts->info->stack_limit;
5710 arg1 = ts->info->stack_limit;
5713 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5714 break;
5715 #ifdef TARGET_NR_mremap
5716 case TARGET_NR_mremap:
5717 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5718 break;
5719 #endif
5720 /* ??? msync/mlock/munlock are broken for softmmu. */
5721 #ifdef TARGET_NR_msync
5722 case TARGET_NR_msync:
5723 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5724 break;
5725 #endif
5726 #ifdef TARGET_NR_mlock
5727 case TARGET_NR_mlock:
5728 ret = get_errno(mlock(g2h(arg1), arg2));
5729 break;
5730 #endif
5731 #ifdef TARGET_NR_munlock
5732 case TARGET_NR_munlock:
5733 ret = get_errno(munlock(g2h(arg1), arg2));
5734 break;
5735 #endif
5736 #ifdef TARGET_NR_mlockall
5737 case TARGET_NR_mlockall:
5738 ret = get_errno(mlockall(arg1));
5739 break;
5740 #endif
5741 #ifdef TARGET_NR_munlockall
5742 case TARGET_NR_munlockall:
5743 ret = get_errno(munlockall());
5744 break;
5745 #endif
5746 case TARGET_NR_truncate:
5747 if (!(p = lock_user_string(arg1)))
5748 goto efault;
5749 ret = get_errno(truncate(p, arg2));
5750 unlock_user(p, arg1, 0);
5751 break;
5752 case TARGET_NR_ftruncate:
5753 ret = get_errno(ftruncate(arg1, arg2));
5754 break;
5755 case TARGET_NR_fchmod:
5756 ret = get_errno(fchmod(arg1, arg2));
5757 break;
5758 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5759 case TARGET_NR_fchmodat:
5760 if (!(p = lock_user_string(arg2)))
5761 goto efault;
5762 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5763 unlock_user(p, arg2, 0);
5764 break;
5765 #endif
5766 case TARGET_NR_getpriority:
5767 /* libc does special remapping of the return value of
5768 * sys_getpriority() so it's just easiest to call
5769 * sys_getpriority() directly rather than through libc. */
5770 ret = get_errno(sys_getpriority(arg1, arg2));
5771 break;
5772 case TARGET_NR_setpriority:
5773 ret = get_errno(setpriority(arg1, arg2, arg3));
5774 break;
5775 #ifdef TARGET_NR_profil
5776 case TARGET_NR_profil:
5777 goto unimplemented;
5778 #endif
5779 case TARGET_NR_statfs:
5780 if (!(p = lock_user_string(arg1)))
5781 goto efault;
5782 ret = get_errno(statfs(path(p), &stfs));
5783 unlock_user(p, arg1, 0);
5784 convert_statfs:
5785 if (!is_error(ret)) {
5786 struct target_statfs *target_stfs;
5788 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5789 goto efault;
5790 __put_user(stfs.f_type, &target_stfs->f_type);
5791 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5792 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5793 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5794 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5795 __put_user(stfs.f_files, &target_stfs->f_files);
5796 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5797 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5798 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5799 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5800 unlock_user_struct(target_stfs, arg2, 1);
5802 break;
5803 case TARGET_NR_fstatfs:
5804 ret = get_errno(fstatfs(arg1, &stfs));
5805 goto convert_statfs;
5806 #ifdef TARGET_NR_statfs64
5807 case TARGET_NR_statfs64:
5808 if (!(p = lock_user_string(arg1)))
5809 goto efault;
5810 ret = get_errno(statfs(path(p), &stfs));
5811 unlock_user(p, arg1, 0);
5812 convert_statfs64:
5813 if (!is_error(ret)) {
5814 struct target_statfs64 *target_stfs;
5816 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5817 goto efault;
5818 __put_user(stfs.f_type, &target_stfs->f_type);
5819 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5820 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5821 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5822 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5823 __put_user(stfs.f_files, &target_stfs->f_files);
5824 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5825 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5826 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5827 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5828 unlock_user_struct(target_stfs, arg3, 1);
5830 break;
5831 case TARGET_NR_fstatfs64:
5832 ret = get_errno(fstatfs(arg1, &stfs));
5833 goto convert_statfs64;
5834 #endif
5835 #ifdef TARGET_NR_ioperm
5836 case TARGET_NR_ioperm:
5837 goto unimplemented;
5838 #endif
5839 #ifdef TARGET_NR_socketcall
5840 case TARGET_NR_socketcall:
5841 ret = do_socketcall(arg1, arg2);
5842 break;
5843 #endif
5844 #ifdef TARGET_NR_accept
5845 case TARGET_NR_accept:
5846 ret = do_accept(arg1, arg2, arg3);
5847 break;
5848 #endif
5849 #ifdef TARGET_NR_bind
5850 case TARGET_NR_bind:
5851 ret = do_bind(arg1, arg2, arg3);
5852 break;
5853 #endif
5854 #ifdef TARGET_NR_connect
5855 case TARGET_NR_connect:
5856 ret = do_connect(arg1, arg2, arg3);
5857 break;
5858 #endif
5859 #ifdef TARGET_NR_getpeername
5860 case TARGET_NR_getpeername:
5861 ret = do_getpeername(arg1, arg2, arg3);
5862 break;
5863 #endif
5864 #ifdef TARGET_NR_getsockname
5865 case TARGET_NR_getsockname:
5866 ret = do_getsockname(arg1, arg2, arg3);
5867 break;
5868 #endif
5869 #ifdef TARGET_NR_getsockopt
5870 case TARGET_NR_getsockopt:
5871 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5872 break;
5873 #endif
5874 #ifdef TARGET_NR_listen
5875 case TARGET_NR_listen:
5876 ret = get_errno(listen(arg1, arg2));
5877 break;
5878 #endif
5879 #ifdef TARGET_NR_recv
5880 case TARGET_NR_recv:
5881 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5882 break;
5883 #endif
5884 #ifdef TARGET_NR_recvfrom
5885 case TARGET_NR_recvfrom:
5886 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5887 break;
5888 #endif
5889 #ifdef TARGET_NR_recvmsg
5890 case TARGET_NR_recvmsg:
5891 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5892 break;
5893 #endif
5894 #ifdef TARGET_NR_send
5895 case TARGET_NR_send:
5896 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5897 break;
5898 #endif
5899 #ifdef TARGET_NR_sendmsg
5900 case TARGET_NR_sendmsg:
5901 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5902 break;
5903 #endif
5904 #ifdef TARGET_NR_sendto
5905 case TARGET_NR_sendto:
5906 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5907 break;
5908 #endif
5909 #ifdef TARGET_NR_shutdown
5910 case TARGET_NR_shutdown:
5911 ret = get_errno(shutdown(arg1, arg2));
5912 break;
5913 #endif
5914 #ifdef TARGET_NR_socket
5915 case TARGET_NR_socket:
5916 ret = do_socket(arg1, arg2, arg3);
5917 break;
5918 #endif
5919 #ifdef TARGET_NR_socketpair
5920 case TARGET_NR_socketpair:
5921 ret = do_socketpair(arg1, arg2, arg3, arg4);
5922 break;
5923 #endif
5924 #ifdef TARGET_NR_setsockopt
5925 case TARGET_NR_setsockopt:
5926 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5927 break;
5928 #endif
5930 case TARGET_NR_syslog:
5931 if (!(p = lock_user_string(arg2)))
5932 goto efault;
5933 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5934 unlock_user(p, arg2, 0);
5935 break;
5937 case TARGET_NR_setitimer:
5939 struct itimerval value, ovalue, *pvalue;
5941 if (arg2) {
5942 pvalue = &value;
5943 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5944 || copy_from_user_timeval(&pvalue->it_value,
5945 arg2 + sizeof(struct target_timeval)))
5946 goto efault;
5947 } else {
5948 pvalue = NULL;
5950 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5951 if (!is_error(ret) && arg3) {
5952 if (copy_to_user_timeval(arg3,
5953 &ovalue.it_interval)
5954 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5955 &ovalue.it_value))
5956 goto efault;
5959 break;
5960 case TARGET_NR_getitimer:
5962 struct itimerval value;
5964 ret = get_errno(getitimer(arg1, &value));
5965 if (!is_error(ret) && arg2) {
5966 if (copy_to_user_timeval(arg2,
5967 &value.it_interval)
5968 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5969 &value.it_value))
5970 goto efault;
5973 break;
5974 case TARGET_NR_stat:
5975 if (!(p = lock_user_string(arg1)))
5976 goto efault;
5977 ret = get_errno(stat(path(p), &st));
5978 unlock_user(p, arg1, 0);
5979 goto do_stat;
5980 case TARGET_NR_lstat:
5981 if (!(p = lock_user_string(arg1)))
5982 goto efault;
5983 ret = get_errno(lstat(path(p), &st));
5984 unlock_user(p, arg1, 0);
5985 goto do_stat;
5986 case TARGET_NR_fstat:
5988 ret = get_errno(fstat(arg1, &st));
5989 do_stat:
5990 if (!is_error(ret)) {
5991 struct target_stat *target_st;
5993 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5994 goto efault;
5995 memset(target_st, 0, sizeof(*target_st));
5996 __put_user(st.st_dev, &target_st->st_dev);
5997 __put_user(st.st_ino, &target_st->st_ino);
5998 __put_user(st.st_mode, &target_st->st_mode);
5999 __put_user(st.st_uid, &target_st->st_uid);
6000 __put_user(st.st_gid, &target_st->st_gid);
6001 __put_user(st.st_nlink, &target_st->st_nlink);
6002 __put_user(st.st_rdev, &target_st->st_rdev);
6003 __put_user(st.st_size, &target_st->st_size);
6004 __put_user(st.st_blksize, &target_st->st_blksize);
6005 __put_user(st.st_blocks, &target_st->st_blocks);
6006 __put_user(st.st_atime, &target_st->target_st_atime);
6007 __put_user(st.st_mtime, &target_st->target_st_mtime);
6008 __put_user(st.st_ctime, &target_st->target_st_ctime);
6009 unlock_user_struct(target_st, arg2, 1);
6012 break;
6013 #ifdef TARGET_NR_olduname
6014 case TARGET_NR_olduname:
6015 goto unimplemented;
6016 #endif
6017 #ifdef TARGET_NR_iopl
6018 case TARGET_NR_iopl:
6019 goto unimplemented;
6020 #endif
6021 case TARGET_NR_vhangup:
6022 ret = get_errno(vhangup());
6023 break;
6024 #ifdef TARGET_NR_idle
6025 case TARGET_NR_idle:
6026 goto unimplemented;
6027 #endif
6028 #ifdef TARGET_NR_syscall
6029 case TARGET_NR_syscall:
6030 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
6031 break;
6032 #endif
6033 case TARGET_NR_wait4:
6035 int status;
6036 abi_long status_ptr = arg2;
6037 struct rusage rusage, *rusage_ptr;
6038 abi_ulong target_rusage = arg4;
6039 if (target_rusage)
6040 rusage_ptr = &rusage;
6041 else
6042 rusage_ptr = NULL;
6043 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6044 if (!is_error(ret)) {
6045 if (status_ptr) {
6046 status = host_to_target_waitstatus(status);
6047 if (put_user_s32(status, status_ptr))
6048 goto efault;
6050 if (target_rusage)
6051 host_to_target_rusage(target_rusage, &rusage);
6054 break;
6055 #ifdef TARGET_NR_swapoff
6056 case TARGET_NR_swapoff:
6057 if (!(p = lock_user_string(arg1)))
6058 goto efault;
6059 ret = get_errno(swapoff(p));
6060 unlock_user(p, arg1, 0);
6061 break;
6062 #endif
6063 case TARGET_NR_sysinfo:
6065 struct target_sysinfo *target_value;
6066 struct sysinfo value;
6067 ret = get_errno(sysinfo(&value));
6068 if (!is_error(ret) && arg1)
6070 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6071 goto efault;
6072 __put_user(value.uptime, &target_value->uptime);
6073 __put_user(value.loads[0], &target_value->loads[0]);
6074 __put_user(value.loads[1], &target_value->loads[1]);
6075 __put_user(value.loads[2], &target_value->loads[2]);
6076 __put_user(value.totalram, &target_value->totalram);
6077 __put_user(value.freeram, &target_value->freeram);
6078 __put_user(value.sharedram, &target_value->sharedram);
6079 __put_user(value.bufferram, &target_value->bufferram);
6080 __put_user(value.totalswap, &target_value->totalswap);
6081 __put_user(value.freeswap, &target_value->freeswap);
6082 __put_user(value.procs, &target_value->procs);
6083 __put_user(value.totalhigh, &target_value->totalhigh);
6084 __put_user(value.freehigh, &target_value->freehigh);
6085 __put_user(value.mem_unit, &target_value->mem_unit);
6086 unlock_user_struct(target_value, arg1, 1);
6089 break;
6090 #ifdef TARGET_NR_ipc
6091 case TARGET_NR_ipc:
6092 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6093 break;
6094 #endif
6095 #ifdef TARGET_NR_semget
6096 case TARGET_NR_semget:
6097 ret = get_errno(semget(arg1, arg2, arg3));
6098 break;
6099 #endif
6100 #ifdef TARGET_NR_semop
6101 case TARGET_NR_semop:
6102 ret = get_errno(do_semop(arg1, arg2, arg3));
6103 break;
6104 #endif
6105 #ifdef TARGET_NR_semctl
6106 case TARGET_NR_semctl:
6107 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6108 break;
6109 #endif
6110 #ifdef TARGET_NR_msgctl
6111 case TARGET_NR_msgctl:
6112 ret = do_msgctl(arg1, arg2, arg3);
6113 break;
6114 #endif
6115 #ifdef TARGET_NR_msgget
6116 case TARGET_NR_msgget:
6117 ret = get_errno(msgget(arg1, arg2));
6118 break;
6119 #endif
6120 #ifdef TARGET_NR_msgrcv
6121 case TARGET_NR_msgrcv:
6122 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6123 break;
6124 #endif
6125 #ifdef TARGET_NR_msgsnd
6126 case TARGET_NR_msgsnd:
6127 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6128 break;
6129 #endif
6130 #ifdef TARGET_NR_shmget
6131 case TARGET_NR_shmget:
6132 ret = get_errno(shmget(arg1, arg2, arg3));
6133 break;
6134 #endif
6135 #ifdef TARGET_NR_shmctl
6136 case TARGET_NR_shmctl:
6137 ret = do_shmctl(arg1, arg2, arg3);
6138 break;
6139 #endif
6140 #ifdef TARGET_NR_shmat
6141 case TARGET_NR_shmat:
6142 ret = do_shmat(arg1, arg2, arg3);
6143 break;
6144 #endif
6145 #ifdef TARGET_NR_shmdt
6146 case TARGET_NR_shmdt:
6147 ret = do_shmdt(arg1);
6148 break;
6149 #endif
6150 case TARGET_NR_fsync:
6151 ret = get_errno(fsync(arg1));
6152 break;
6153 case TARGET_NR_clone:
6154 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6155 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6156 #elif defined(TARGET_CRIS)
6157 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6158 #else
6159 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6160 #endif
6161 break;
6162 #ifdef __NR_exit_group
6163 /* new thread calls */
6164 case TARGET_NR_exit_group:
6165 #ifdef TARGET_GPROF
6166 _mcleanup();
6167 #endif
6168 gdb_exit(cpu_env, arg1);
6169 ret = get_errno(exit_group(arg1));
6170 break;
6171 #endif
6172 case TARGET_NR_setdomainname:
6173 if (!(p = lock_user_string(arg1)))
6174 goto efault;
6175 ret = get_errno(setdomainname(p, arg2));
6176 unlock_user(p, arg1, 0);
6177 break;
6178 case TARGET_NR_uname:
6179 /* no need to transcode because we use the linux syscall */
6181 struct new_utsname * buf;
6183 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6184 goto efault;
6185 ret = get_errno(sys_uname(buf));
6186 if (!is_error(ret)) {
6187 /* Overrite the native machine name with whatever is being
6188 emulated. */
6189 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6190 /* Allow the user to override the reported release. */
6191 if (qemu_uname_release && *qemu_uname_release)
6192 strcpy (buf->release, qemu_uname_release);
6194 unlock_user_struct(buf, arg1, 1);
6196 break;
6197 #ifdef TARGET_I386
6198 case TARGET_NR_modify_ldt:
6199 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6200 break;
6201 #if !defined(TARGET_X86_64)
6202 case TARGET_NR_vm86old:
6203 goto unimplemented;
6204 case TARGET_NR_vm86:
6205 ret = do_vm86(cpu_env, arg1, arg2);
6206 break;
6207 #endif
6208 #endif
6209 case TARGET_NR_adjtimex:
6210 goto unimplemented;
6211 #ifdef TARGET_NR_create_module
6212 case TARGET_NR_create_module:
6213 #endif
6214 case TARGET_NR_init_module:
6215 case TARGET_NR_delete_module:
6216 #ifdef TARGET_NR_get_kernel_syms
6217 case TARGET_NR_get_kernel_syms:
6218 #endif
6219 goto unimplemented;
6220 case TARGET_NR_quotactl:
6221 goto unimplemented;
6222 case TARGET_NR_getpgid:
6223 ret = get_errno(getpgid(arg1));
6224 break;
6225 case TARGET_NR_fchdir:
6226 ret = get_errno(fchdir(arg1));
6227 break;
6228 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6229 case TARGET_NR_bdflush:
6230 goto unimplemented;
6231 #endif
6232 #ifdef TARGET_NR_sysfs
6233 case TARGET_NR_sysfs:
6234 goto unimplemented;
6235 #endif
6236 case TARGET_NR_personality:
6237 ret = get_errno(personality(arg1));
6238 break;
6239 #ifdef TARGET_NR_afs_syscall
6240 case TARGET_NR_afs_syscall:
6241 goto unimplemented;
6242 #endif
6243 #ifdef TARGET_NR__llseek /* Not on alpha */
6244 case TARGET_NR__llseek:
6246 int64_t res;
6247 #if !defined(__NR_llseek)
6248 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6249 if (res == -1) {
6250 ret = get_errno(res);
6251 } else {
6252 ret = 0;
6254 #else
6255 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6256 #endif
6257 if ((ret == 0) && put_user_s64(res, arg4)) {
6258 goto efault;
6261 break;
6262 #endif
6263 case TARGET_NR_getdents:
6264 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6266 struct target_dirent *target_dirp;
6267 struct linux_dirent *dirp;
6268 abi_long count = arg3;
6270 dirp = malloc(count);
6271 if (!dirp) {
6272 ret = -TARGET_ENOMEM;
6273 goto fail;
6276 ret = get_errno(sys_getdents(arg1, dirp, count));
6277 if (!is_error(ret)) {
6278 struct linux_dirent *de;
6279 struct target_dirent *tde;
6280 int len = ret;
6281 int reclen, treclen;
6282 int count1, tnamelen;
6284 count1 = 0;
6285 de = dirp;
6286 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6287 goto efault;
6288 tde = target_dirp;
6289 while (len > 0) {
6290 reclen = de->d_reclen;
6291 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6292 tde->d_reclen = tswap16(treclen);
6293 tde->d_ino = tswapl(de->d_ino);
6294 tde->d_off = tswapl(de->d_off);
6295 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6296 if (tnamelen > 256)
6297 tnamelen = 256;
6298 /* XXX: may not be correct */
6299 pstrcpy(tde->d_name, tnamelen, de->d_name);
6300 de = (struct linux_dirent *)((char *)de + reclen);
6301 len -= reclen;
6302 tde = (struct target_dirent *)((char *)tde + treclen);
6303 count1 += treclen;
6305 ret = count1;
6306 unlock_user(target_dirp, arg2, ret);
6308 free(dirp);
6310 #else
6312 struct linux_dirent *dirp;
6313 abi_long count = arg3;
6315 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6316 goto efault;
6317 ret = get_errno(sys_getdents(arg1, dirp, count));
6318 if (!is_error(ret)) {
6319 struct linux_dirent *de;
6320 int len = ret;
6321 int reclen;
6322 de = dirp;
6323 while (len > 0) {
6324 reclen = de->d_reclen;
6325 if (reclen > len)
6326 break;
6327 de->d_reclen = tswap16(reclen);
6328 tswapls(&de->d_ino);
6329 tswapls(&de->d_off);
6330 de = (struct linux_dirent *)((char *)de + reclen);
6331 len -= reclen;
6334 unlock_user(dirp, arg2, ret);
6336 #endif
6337 break;
6338 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6339 case TARGET_NR_getdents64:
6341 struct linux_dirent64 *dirp;
6342 abi_long count = arg3;
6343 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6344 goto efault;
6345 ret = get_errno(sys_getdents64(arg1, dirp, count));
6346 if (!is_error(ret)) {
6347 struct linux_dirent64 *de;
6348 int len = ret;
6349 int reclen;
6350 de = dirp;
6351 while (len > 0) {
6352 reclen = de->d_reclen;
6353 if (reclen > len)
6354 break;
6355 de->d_reclen = tswap16(reclen);
6356 tswap64s((uint64_t *)&de->d_ino);
6357 tswap64s((uint64_t *)&de->d_off);
6358 de = (struct linux_dirent64 *)((char *)de + reclen);
6359 len -= reclen;
6362 unlock_user(dirp, arg2, ret);
6364 break;
6365 #endif /* TARGET_NR_getdents64 */
6366 #ifdef TARGET_NR__newselect
6367 case TARGET_NR__newselect:
6368 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6369 break;
6370 #endif
6371 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6372 # ifdef TARGET_NR_poll
6373 case TARGET_NR_poll:
6374 # endif
6375 # ifdef TARGET_NR_ppoll
6376 case TARGET_NR_ppoll:
6377 # endif
6379 struct target_pollfd *target_pfd;
6380 unsigned int nfds = arg2;
6381 int timeout = arg3;
6382 struct pollfd *pfd;
6383 unsigned int i;
6385 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6386 if (!target_pfd)
6387 goto efault;
6389 pfd = alloca(sizeof(struct pollfd) * nfds);
6390 for(i = 0; i < nfds; i++) {
6391 pfd[i].fd = tswap32(target_pfd[i].fd);
6392 pfd[i].events = tswap16(target_pfd[i].events);
6395 # ifdef TARGET_NR_ppoll
6396 if (num == TARGET_NR_ppoll) {
6397 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6398 target_sigset_t *target_set;
6399 sigset_t _set, *set = &_set;
6401 if (arg3) {
6402 if (target_to_host_timespec(timeout_ts, arg3)) {
6403 unlock_user(target_pfd, arg1, 0);
6404 goto efault;
6406 } else {
6407 timeout_ts = NULL;
6410 if (arg4) {
6411 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6412 if (!target_set) {
6413 unlock_user(target_pfd, arg1, 0);
6414 goto efault;
6416 target_to_host_sigset(set, target_set);
6417 } else {
6418 set = NULL;
6421 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6423 if (!is_error(ret) && arg3) {
6424 host_to_target_timespec(arg3, timeout_ts);
6426 if (arg4) {
6427 unlock_user(target_set, arg4, 0);
6429 } else
6430 # endif
6431 ret = get_errno(poll(pfd, nfds, timeout));
6433 if (!is_error(ret)) {
6434 for(i = 0; i < nfds; i++) {
6435 target_pfd[i].revents = tswap16(pfd[i].revents);
6438 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6440 break;
6441 #endif
6442 case TARGET_NR_flock:
6443 /* NOTE: the flock constant seems to be the same for every
6444 Linux platform */
6445 ret = get_errno(flock(arg1, arg2));
6446 break;
6447 case TARGET_NR_readv:
6449 int count = arg3;
6450 struct iovec *vec;
6452 vec = alloca(count * sizeof(struct iovec));
6453 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6454 goto efault;
6455 ret = get_errno(readv(arg1, vec, count));
6456 unlock_iovec(vec, arg2, count, 1);
6458 break;
6459 case TARGET_NR_writev:
6461 int count = arg3;
6462 struct iovec *vec;
6464 vec = alloca(count * sizeof(struct iovec));
6465 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6466 goto efault;
6467 ret = get_errno(writev(arg1, vec, count));
6468 unlock_iovec(vec, arg2, count, 0);
6470 break;
6471 case TARGET_NR_getsid:
6472 ret = get_errno(getsid(arg1));
6473 break;
6474 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6475 case TARGET_NR_fdatasync:
6476 ret = get_errno(fdatasync(arg1));
6477 break;
6478 #endif
6479 case TARGET_NR__sysctl:
6480 /* We don't implement this, but ENOTDIR is always a safe
6481 return value. */
6482 ret = -TARGET_ENOTDIR;
6483 break;
6484 case TARGET_NR_sched_getaffinity:
6486 unsigned int mask_size;
6487 unsigned long *mask;
6490 * sched_getaffinity needs multiples of ulong, so need to take
6491 * care of mismatches between target ulong and host ulong sizes.
6493 if (arg2 & (sizeof(abi_ulong) - 1)) {
6494 ret = -TARGET_EINVAL;
6495 break;
6497 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6499 mask = alloca(mask_size);
6500 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6502 if (!is_error(ret)) {
6503 if (arg2 > ret) {
6504 /* Zero out any extra space kernel didn't fill */
6505 unsigned long zero = arg2 - ret;
6506 p = alloca(zero);
6507 memset(p, 0, zero);
6508 if (copy_to_user(arg3 + zero, p, zero)) {
6509 goto efault;
6511 arg2 = ret;
6513 if (copy_to_user(arg3, mask, arg2)) {
6514 goto efault;
6516 ret = arg2;
6519 break;
6520 case TARGET_NR_sched_setaffinity:
6522 unsigned int mask_size;
6523 unsigned long *mask;
6526 * sched_setaffinity needs multiples of ulong, so need to take
6527 * care of mismatches between target ulong and host ulong sizes.
6529 if (arg2 & (sizeof(abi_ulong) - 1)) {
6530 ret = -TARGET_EINVAL;
6531 break;
6533 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6535 mask = alloca(mask_size);
6536 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6537 goto efault;
6539 memcpy(mask, p, arg2);
6540 unlock_user_struct(p, arg2, 0);
6542 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6544 break;
6545 case TARGET_NR_sched_setparam:
6547 struct sched_param *target_schp;
6548 struct sched_param schp;
6550 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6551 goto efault;
6552 schp.sched_priority = tswap32(target_schp->sched_priority);
6553 unlock_user_struct(target_schp, arg2, 0);
6554 ret = get_errno(sched_setparam(arg1, &schp));
6556 break;
6557 case TARGET_NR_sched_getparam:
6559 struct sched_param *target_schp;
6560 struct sched_param schp;
6561 ret = get_errno(sched_getparam(arg1, &schp));
6562 if (!is_error(ret)) {
6563 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6564 goto efault;
6565 target_schp->sched_priority = tswap32(schp.sched_priority);
6566 unlock_user_struct(target_schp, arg2, 1);
6569 break;
6570 case TARGET_NR_sched_setscheduler:
6572 struct sched_param *target_schp;
6573 struct sched_param schp;
6574 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6575 goto efault;
6576 schp.sched_priority = tswap32(target_schp->sched_priority);
6577 unlock_user_struct(target_schp, arg3, 0);
6578 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6580 break;
6581 case TARGET_NR_sched_getscheduler:
6582 ret = get_errno(sched_getscheduler(arg1));
6583 break;
6584 case TARGET_NR_sched_yield:
6585 ret = get_errno(sched_yield());
6586 break;
6587 case TARGET_NR_sched_get_priority_max:
6588 ret = get_errno(sched_get_priority_max(arg1));
6589 break;
6590 case TARGET_NR_sched_get_priority_min:
6591 ret = get_errno(sched_get_priority_min(arg1));
6592 break;
6593 case TARGET_NR_sched_rr_get_interval:
6595 struct timespec ts;
6596 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6597 if (!is_error(ret)) {
6598 host_to_target_timespec(arg2, &ts);
6601 break;
6602 case TARGET_NR_nanosleep:
6604 struct timespec req, rem;
6605 target_to_host_timespec(&req, arg1);
6606 ret = get_errno(nanosleep(&req, &rem));
6607 if (is_error(ret) && arg2) {
6608 host_to_target_timespec(arg2, &rem);
6611 break;
6612 #ifdef TARGET_NR_query_module
6613 case TARGET_NR_query_module:
6614 goto unimplemented;
6615 #endif
6616 #ifdef TARGET_NR_nfsservctl
6617 case TARGET_NR_nfsservctl:
6618 goto unimplemented;
6619 #endif
6620 case TARGET_NR_prctl:
6621 switch (arg1)
6623 case PR_GET_PDEATHSIG:
6625 int deathsig;
6626 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6627 if (!is_error(ret) && arg2
6628 && put_user_ual(deathsig, arg2))
6629 goto efault;
6631 break;
6632 default:
6633 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6634 break;
6636 break;
6637 #ifdef TARGET_NR_arch_prctl
6638 case TARGET_NR_arch_prctl:
6639 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6640 ret = do_arch_prctl(cpu_env, arg1, arg2);
6641 break;
6642 #else
6643 goto unimplemented;
6644 #endif
6645 #endif
6646 #ifdef TARGET_NR_pread
6647 case TARGET_NR_pread:
6648 #ifdef TARGET_ARM
6649 if (((CPUARMState *)cpu_env)->eabi)
6650 arg4 = arg5;
6651 #endif
6652 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6653 goto efault;
6654 ret = get_errno(pread(arg1, p, arg3, arg4));
6655 unlock_user(p, arg2, ret);
6656 break;
6657 case TARGET_NR_pwrite:
6658 #ifdef TARGET_ARM
6659 if (((CPUARMState *)cpu_env)->eabi)
6660 arg4 = arg5;
6661 #endif
6662 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6663 goto efault;
6664 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6665 unlock_user(p, arg2, 0);
6666 break;
6667 #endif
6668 #ifdef TARGET_NR_pread64
6669 case TARGET_NR_pread64:
6670 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6671 goto efault;
6672 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6673 unlock_user(p, arg2, ret);
6674 break;
6675 case TARGET_NR_pwrite64:
6676 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6677 goto efault;
6678 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6679 unlock_user(p, arg2, 0);
6680 break;
6681 #endif
6682 case TARGET_NR_getcwd:
6683 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6684 goto efault;
6685 ret = get_errno(sys_getcwd1(p, arg2));
6686 unlock_user(p, arg1, ret);
6687 break;
6688 case TARGET_NR_capget:
6689 goto unimplemented;
6690 case TARGET_NR_capset:
6691 goto unimplemented;
6692 case TARGET_NR_sigaltstack:
6693 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6694 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6695 defined(TARGET_M68K)
6696 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6697 break;
6698 #else
6699 goto unimplemented;
6700 #endif
6701 case TARGET_NR_sendfile:
6702 goto unimplemented;
6703 #ifdef TARGET_NR_getpmsg
6704 case TARGET_NR_getpmsg:
6705 goto unimplemented;
6706 #endif
6707 #ifdef TARGET_NR_putpmsg
6708 case TARGET_NR_putpmsg:
6709 goto unimplemented;
6710 #endif
6711 #ifdef TARGET_NR_vfork
6712 case TARGET_NR_vfork:
6713 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6714 0, 0, 0, 0));
6715 break;
6716 #endif
6717 #ifdef TARGET_NR_ugetrlimit
6718 case TARGET_NR_ugetrlimit:
6720 struct rlimit rlim;
6721 ret = get_errno(getrlimit(arg1, &rlim));
6722 if (!is_error(ret)) {
6723 struct target_rlimit *target_rlim;
6724 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6725 goto efault;
6726 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6727 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6728 unlock_user_struct(target_rlim, arg2, 1);
6730 break;
6732 #endif
6733 #ifdef TARGET_NR_truncate64
6734 case TARGET_NR_truncate64:
6735 if (!(p = lock_user_string(arg1)))
6736 goto efault;
6737 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6738 unlock_user(p, arg1, 0);
6739 break;
6740 #endif
6741 #ifdef TARGET_NR_ftruncate64
6742 case TARGET_NR_ftruncate64:
6743 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6744 break;
6745 #endif
6746 #ifdef TARGET_NR_stat64
6747 case TARGET_NR_stat64:
6748 if (!(p = lock_user_string(arg1)))
6749 goto efault;
6750 ret = get_errno(stat(path(p), &st));
6751 unlock_user(p, arg1, 0);
6752 if (!is_error(ret))
6753 ret = host_to_target_stat64(cpu_env, arg2, &st);
6754 break;
6755 #endif
6756 #ifdef TARGET_NR_lstat64
6757 case TARGET_NR_lstat64:
6758 if (!(p = lock_user_string(arg1)))
6759 goto efault;
6760 ret = get_errno(lstat(path(p), &st));
6761 unlock_user(p, arg1, 0);
6762 if (!is_error(ret))
6763 ret = host_to_target_stat64(cpu_env, arg2, &st);
6764 break;
6765 #endif
6766 #ifdef TARGET_NR_fstat64
6767 case TARGET_NR_fstat64:
6768 ret = get_errno(fstat(arg1, &st));
6769 if (!is_error(ret))
6770 ret = host_to_target_stat64(cpu_env, arg2, &st);
6771 break;
6772 #endif
6773 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6774 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6775 #ifdef TARGET_NR_fstatat64
6776 case TARGET_NR_fstatat64:
6777 #endif
6778 #ifdef TARGET_NR_newfstatat
6779 case TARGET_NR_newfstatat:
6780 #endif
6781 if (!(p = lock_user_string(arg2)))
6782 goto efault;
6783 #ifdef __NR_fstatat64
6784 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6785 #else
6786 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6787 #endif
6788 if (!is_error(ret))
6789 ret = host_to_target_stat64(cpu_env, arg3, &st);
6790 break;
6791 #endif
6792 case TARGET_NR_lchown:
6793 if (!(p = lock_user_string(arg1)))
6794 goto efault;
6795 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6796 unlock_user(p, arg1, 0);
6797 break;
6798 #ifdef TARGET_NR_getuid
6799 case TARGET_NR_getuid:
6800 ret = get_errno(high2lowuid(getuid()));
6801 break;
6802 #endif
6803 #ifdef TARGET_NR_getgid
6804 case TARGET_NR_getgid:
6805 ret = get_errno(high2lowgid(getgid()));
6806 break;
6807 #endif
6808 #ifdef TARGET_NR_geteuid
6809 case TARGET_NR_geteuid:
6810 ret = get_errno(high2lowuid(geteuid()));
6811 break;
6812 #endif
6813 #ifdef TARGET_NR_getegid
6814 case TARGET_NR_getegid:
6815 ret = get_errno(high2lowgid(getegid()));
6816 break;
6817 #endif
6818 case TARGET_NR_setreuid:
6819 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6820 break;
6821 case TARGET_NR_setregid:
6822 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6823 break;
6824 case TARGET_NR_getgroups:
6826 int gidsetsize = arg1;
6827 target_id *target_grouplist;
6828 gid_t *grouplist;
6829 int i;
6831 grouplist = alloca(gidsetsize * sizeof(gid_t));
6832 ret = get_errno(getgroups(gidsetsize, grouplist));
6833 if (gidsetsize == 0)
6834 break;
6835 if (!is_error(ret)) {
6836 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6837 if (!target_grouplist)
6838 goto efault;
6839 for(i = 0;i < ret; i++)
6840 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
6841 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6844 break;
6845 case TARGET_NR_setgroups:
6847 int gidsetsize = arg1;
6848 target_id *target_grouplist;
6849 gid_t *grouplist;
6850 int i;
6852 grouplist = alloca(gidsetsize * sizeof(gid_t));
6853 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6854 if (!target_grouplist) {
6855 ret = -TARGET_EFAULT;
6856 goto fail;
6858 for(i = 0;i < gidsetsize; i++)
6859 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
6860 unlock_user(target_grouplist, arg2, 0);
6861 ret = get_errno(setgroups(gidsetsize, grouplist));
6863 break;
6864 case TARGET_NR_fchown:
6865 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6866 break;
6867 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6868 case TARGET_NR_fchownat:
6869 if (!(p = lock_user_string(arg2)))
6870 goto efault;
6871 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6872 unlock_user(p, arg2, 0);
6873 break;
6874 #endif
6875 #ifdef TARGET_NR_setresuid
6876 case TARGET_NR_setresuid:
6877 ret = get_errno(setresuid(low2highuid(arg1),
6878 low2highuid(arg2),
6879 low2highuid(arg3)));
6880 break;
6881 #endif
6882 #ifdef TARGET_NR_getresuid
6883 case TARGET_NR_getresuid:
6885 uid_t ruid, euid, suid;
6886 ret = get_errno(getresuid(&ruid, &euid, &suid));
6887 if (!is_error(ret)) {
6888 if (put_user_u16(high2lowuid(ruid), arg1)
6889 || put_user_u16(high2lowuid(euid), arg2)
6890 || put_user_u16(high2lowuid(suid), arg3))
6891 goto efault;
6894 break;
6895 #endif
6896 #ifdef TARGET_NR_getresgid
6897 case TARGET_NR_setresgid:
6898 ret = get_errno(setresgid(low2highgid(arg1),
6899 low2highgid(arg2),
6900 low2highgid(arg3)));
6901 break;
6902 #endif
6903 #ifdef TARGET_NR_getresgid
6904 case TARGET_NR_getresgid:
6906 gid_t rgid, egid, sgid;
6907 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6908 if (!is_error(ret)) {
6909 if (put_user_u16(high2lowgid(rgid), arg1)
6910 || put_user_u16(high2lowgid(egid), arg2)
6911 || put_user_u16(high2lowgid(sgid), arg3))
6912 goto efault;
6915 break;
6916 #endif
6917 case TARGET_NR_chown:
6918 if (!(p = lock_user_string(arg1)))
6919 goto efault;
6920 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6921 unlock_user(p, arg1, 0);
6922 break;
6923 case TARGET_NR_setuid:
6924 ret = get_errno(setuid(low2highuid(arg1)));
6925 break;
6926 case TARGET_NR_setgid:
6927 ret = get_errno(setgid(low2highgid(arg1)));
6928 break;
6929 case TARGET_NR_setfsuid:
6930 ret = get_errno(setfsuid(arg1));
6931 break;
6932 case TARGET_NR_setfsgid:
6933 ret = get_errno(setfsgid(arg1));
6934 break;
6936 #ifdef TARGET_NR_lchown32
6937 case TARGET_NR_lchown32:
6938 if (!(p = lock_user_string(arg1)))
6939 goto efault;
6940 ret = get_errno(lchown(p, arg2, arg3));
6941 unlock_user(p, arg1, 0);
6942 break;
6943 #endif
6944 #ifdef TARGET_NR_getuid32
6945 case TARGET_NR_getuid32:
6946 ret = get_errno(getuid());
6947 break;
6948 #endif
6950 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6951 /* Alpha specific */
6952 case TARGET_NR_getxuid:
6954 uid_t euid;
6955 euid=geteuid();
6956 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6958 ret = get_errno(getuid());
6959 break;
6960 #endif
6961 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6962 /* Alpha specific */
6963 case TARGET_NR_getxgid:
6965 uid_t egid;
6966 egid=getegid();
6967 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6969 ret = get_errno(getgid());
6970 break;
6971 #endif
6972 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6973 /* Alpha specific */
6974 case TARGET_NR_osf_getsysinfo:
6975 ret = -TARGET_EOPNOTSUPP;
6976 switch (arg1) {
6977 case TARGET_GSI_IEEE_FP_CONTROL:
6979 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
6981 /* Copied from linux ieee_fpcr_to_swcr. */
6982 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
6983 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
6984 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
6985 | SWCR_TRAP_ENABLE_DZE
6986 | SWCR_TRAP_ENABLE_OVF);
6987 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
6988 | SWCR_TRAP_ENABLE_INE);
6989 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
6990 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
6992 if (put_user_u64 (swcr, arg2))
6993 goto efault;
6994 ret = 0;
6996 break;
6998 /* case GSI_IEEE_STATE_AT_SIGNAL:
6999 -- Not implemented in linux kernel.
7000 case GSI_UACPROC:
7001 -- Retrieves current unaligned access state; not much used.
7002 case GSI_PROC_TYPE:
7003 -- Retrieves implver information; surely not used.
7004 case GSI_GET_HWRPB:
7005 -- Grabs a copy of the HWRPB; surely not used.
7008 break;
7009 #endif
7010 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7011 /* Alpha specific */
7012 case TARGET_NR_osf_setsysinfo:
7013 ret = -TARGET_EOPNOTSUPP;
7014 switch (arg1) {
7015 case TARGET_SSI_IEEE_FP_CONTROL:
7016 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7018 uint64_t swcr, fpcr, orig_fpcr;
7020 if (get_user_u64 (swcr, arg2))
7021 goto efault;
7022 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7023 fpcr = orig_fpcr & FPCR_DYN_MASK;
7025 /* Copied from linux ieee_swcr_to_fpcr. */
7026 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7027 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7028 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7029 | SWCR_TRAP_ENABLE_DZE
7030 | SWCR_TRAP_ENABLE_OVF)) << 48;
7031 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7032 | SWCR_TRAP_ENABLE_INE)) << 57;
7033 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7034 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7036 cpu_alpha_store_fpcr (cpu_env, fpcr);
7037 ret = 0;
7039 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7040 /* Old exceptions are not signaled. */
7041 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7043 /* If any exceptions set by this call, and are unmasked,
7044 send a signal. */
7045 /* ??? FIXME */
7048 break;
7050 /* case SSI_NVPAIRS:
7051 -- Used with SSIN_UACPROC to enable unaligned accesses.
7052 case SSI_IEEE_STATE_AT_SIGNAL:
7053 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7054 -- Not implemented in linux kernel
7057 break;
7058 #endif
7059 #ifdef TARGET_NR_osf_sigprocmask
7060 /* Alpha specific. */
7061 case TARGET_NR_osf_sigprocmask:
7063 abi_ulong mask;
7064 int how = arg1;
7065 sigset_t set, oldset;
7067 switch(arg1) {
7068 case TARGET_SIG_BLOCK:
7069 how = SIG_BLOCK;
7070 break;
7071 case TARGET_SIG_UNBLOCK:
7072 how = SIG_UNBLOCK;
7073 break;
7074 case TARGET_SIG_SETMASK:
7075 how = SIG_SETMASK;
7076 break;
7077 default:
7078 ret = -TARGET_EINVAL;
7079 goto fail;
7081 mask = arg2;
7082 target_to_host_old_sigset(&set, &mask);
7083 sigprocmask(arg1, &set, &oldset);
7084 host_to_target_old_sigset(&mask, &oldset);
7085 ret = mask;
7087 break;
7088 #endif
7090 #ifdef TARGET_NR_getgid32
7091 case TARGET_NR_getgid32:
7092 ret = get_errno(getgid());
7093 break;
7094 #endif
7095 #ifdef TARGET_NR_geteuid32
7096 case TARGET_NR_geteuid32:
7097 ret = get_errno(geteuid());
7098 break;
7099 #endif
7100 #ifdef TARGET_NR_getegid32
7101 case TARGET_NR_getegid32:
7102 ret = get_errno(getegid());
7103 break;
7104 #endif
7105 #ifdef TARGET_NR_setreuid32
7106 case TARGET_NR_setreuid32:
7107 ret = get_errno(setreuid(arg1, arg2));
7108 break;
7109 #endif
7110 #ifdef TARGET_NR_setregid32
7111 case TARGET_NR_setregid32:
7112 ret = get_errno(setregid(arg1, arg2));
7113 break;
7114 #endif
7115 #ifdef TARGET_NR_getgroups32
7116 case TARGET_NR_getgroups32:
7118 int gidsetsize = arg1;
7119 uint32_t *target_grouplist;
7120 gid_t *grouplist;
7121 int i;
7123 grouplist = alloca(gidsetsize * sizeof(gid_t));
7124 ret = get_errno(getgroups(gidsetsize, grouplist));
7125 if (gidsetsize == 0)
7126 break;
7127 if (!is_error(ret)) {
7128 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7129 if (!target_grouplist) {
7130 ret = -TARGET_EFAULT;
7131 goto fail;
7133 for(i = 0;i < ret; i++)
7134 target_grouplist[i] = tswap32(grouplist[i]);
7135 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7138 break;
7139 #endif
7140 #ifdef TARGET_NR_setgroups32
7141 case TARGET_NR_setgroups32:
7143 int gidsetsize = arg1;
7144 uint32_t *target_grouplist;
7145 gid_t *grouplist;
7146 int i;
7148 grouplist = alloca(gidsetsize * sizeof(gid_t));
7149 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7150 if (!target_grouplist) {
7151 ret = -TARGET_EFAULT;
7152 goto fail;
7154 for(i = 0;i < gidsetsize; i++)
7155 grouplist[i] = tswap32(target_grouplist[i]);
7156 unlock_user(target_grouplist, arg2, 0);
7157 ret = get_errno(setgroups(gidsetsize, grouplist));
7159 break;
7160 #endif
7161 #ifdef TARGET_NR_fchown32
7162 case TARGET_NR_fchown32:
7163 ret = get_errno(fchown(arg1, arg2, arg3));
7164 break;
7165 #endif
7166 #ifdef TARGET_NR_setresuid32
7167 case TARGET_NR_setresuid32:
7168 ret = get_errno(setresuid(arg1, arg2, arg3));
7169 break;
7170 #endif
7171 #ifdef TARGET_NR_getresuid32
7172 case TARGET_NR_getresuid32:
7174 uid_t ruid, euid, suid;
7175 ret = get_errno(getresuid(&ruid, &euid, &suid));
7176 if (!is_error(ret)) {
7177 if (put_user_u32(ruid, arg1)
7178 || put_user_u32(euid, arg2)
7179 || put_user_u32(suid, arg3))
7180 goto efault;
7183 break;
7184 #endif
7185 #ifdef TARGET_NR_setresgid32
7186 case TARGET_NR_setresgid32:
7187 ret = get_errno(setresgid(arg1, arg2, arg3));
7188 break;
7189 #endif
7190 #ifdef TARGET_NR_getresgid32
7191 case TARGET_NR_getresgid32:
7193 gid_t rgid, egid, sgid;
7194 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7195 if (!is_error(ret)) {
7196 if (put_user_u32(rgid, arg1)
7197 || put_user_u32(egid, arg2)
7198 || put_user_u32(sgid, arg3))
7199 goto efault;
7202 break;
7203 #endif
7204 #ifdef TARGET_NR_chown32
7205 case TARGET_NR_chown32:
7206 if (!(p = lock_user_string(arg1)))
7207 goto efault;
7208 ret = get_errno(chown(p, arg2, arg3));
7209 unlock_user(p, arg1, 0);
7210 break;
7211 #endif
7212 #ifdef TARGET_NR_setuid32
7213 case TARGET_NR_setuid32:
7214 ret = get_errno(setuid(arg1));
7215 break;
7216 #endif
7217 #ifdef TARGET_NR_setgid32
7218 case TARGET_NR_setgid32:
7219 ret = get_errno(setgid(arg1));
7220 break;
7221 #endif
7222 #ifdef TARGET_NR_setfsuid32
7223 case TARGET_NR_setfsuid32:
7224 ret = get_errno(setfsuid(arg1));
7225 break;
7226 #endif
7227 #ifdef TARGET_NR_setfsgid32
7228 case TARGET_NR_setfsgid32:
7229 ret = get_errno(setfsgid(arg1));
7230 break;
7231 #endif
7233 case TARGET_NR_pivot_root:
7234 goto unimplemented;
7235 #ifdef TARGET_NR_mincore
7236 case TARGET_NR_mincore:
7238 void *a;
7239 ret = -TARGET_EFAULT;
7240 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7241 goto efault;
7242 if (!(p = lock_user_string(arg3)))
7243 goto mincore_fail;
7244 ret = get_errno(mincore(a, arg2, p));
7245 unlock_user(p, arg3, ret);
7246 mincore_fail:
7247 unlock_user(a, arg1, 0);
7249 break;
7250 #endif
7251 #ifdef TARGET_NR_arm_fadvise64_64
7252 case TARGET_NR_arm_fadvise64_64:
7255 * arm_fadvise64_64 looks like fadvise64_64 but
7256 * with different argument order
7258 abi_long temp;
7259 temp = arg3;
7260 arg3 = arg4;
7261 arg4 = temp;
7263 #endif
7264 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7265 #ifdef TARGET_NR_fadvise64_64
7266 case TARGET_NR_fadvise64_64:
7267 #endif
7268 #ifdef TARGET_NR_fadvise64
7269 case TARGET_NR_fadvise64:
7270 #endif
7271 #ifdef TARGET_S390X
7272 switch (arg4) {
7273 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7274 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7275 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7276 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7277 default: break;
7279 #endif
7280 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7281 break;
7282 #endif
7283 #ifdef TARGET_NR_madvise
7284 case TARGET_NR_madvise:
7285 /* A straight passthrough may not be safe because qemu sometimes
7286 turns private flie-backed mappings into anonymous mappings.
7287 This will break MADV_DONTNEED.
7288 This is a hint, so ignoring and returning success is ok. */
7289 ret = get_errno(0);
7290 break;
7291 #endif
7292 #if TARGET_ABI_BITS == 32
7293 case TARGET_NR_fcntl64:
7295 int cmd;
7296 struct flock64 fl;
7297 struct target_flock64 *target_fl;
7298 #ifdef TARGET_ARM
7299 struct target_eabi_flock64 *target_efl;
7300 #endif
7302 cmd = target_to_host_fcntl_cmd(arg2);
7303 if (cmd == -TARGET_EINVAL)
7304 return cmd;
7306 switch(arg2) {
7307 case TARGET_F_GETLK64:
7308 #ifdef TARGET_ARM
7309 if (((CPUARMState *)cpu_env)->eabi) {
7310 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7311 goto efault;
7312 fl.l_type = tswap16(target_efl->l_type);
7313 fl.l_whence = tswap16(target_efl->l_whence);
7314 fl.l_start = tswap64(target_efl->l_start);
7315 fl.l_len = tswap64(target_efl->l_len);
7316 fl.l_pid = tswap32(target_efl->l_pid);
7317 unlock_user_struct(target_efl, arg3, 0);
7318 } else
7319 #endif
7321 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7322 goto efault;
7323 fl.l_type = tswap16(target_fl->l_type);
7324 fl.l_whence = tswap16(target_fl->l_whence);
7325 fl.l_start = tswap64(target_fl->l_start);
7326 fl.l_len = tswap64(target_fl->l_len);
7327 fl.l_pid = tswap32(target_fl->l_pid);
7328 unlock_user_struct(target_fl, arg3, 0);
7330 ret = get_errno(fcntl(arg1, cmd, &fl));
7331 if (ret == 0) {
7332 #ifdef TARGET_ARM
7333 if (((CPUARMState *)cpu_env)->eabi) {
7334 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7335 goto efault;
7336 target_efl->l_type = tswap16(fl.l_type);
7337 target_efl->l_whence = tswap16(fl.l_whence);
7338 target_efl->l_start = tswap64(fl.l_start);
7339 target_efl->l_len = tswap64(fl.l_len);
7340 target_efl->l_pid = tswap32(fl.l_pid);
7341 unlock_user_struct(target_efl, arg3, 1);
7342 } else
7343 #endif
7345 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7346 goto efault;
7347 target_fl->l_type = tswap16(fl.l_type);
7348 target_fl->l_whence = tswap16(fl.l_whence);
7349 target_fl->l_start = tswap64(fl.l_start);
7350 target_fl->l_len = tswap64(fl.l_len);
7351 target_fl->l_pid = tswap32(fl.l_pid);
7352 unlock_user_struct(target_fl, arg3, 1);
7355 break;
7357 case TARGET_F_SETLK64:
7358 case TARGET_F_SETLKW64:
7359 #ifdef TARGET_ARM
7360 if (((CPUARMState *)cpu_env)->eabi) {
7361 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7362 goto efault;
7363 fl.l_type = tswap16(target_efl->l_type);
7364 fl.l_whence = tswap16(target_efl->l_whence);
7365 fl.l_start = tswap64(target_efl->l_start);
7366 fl.l_len = tswap64(target_efl->l_len);
7367 fl.l_pid = tswap32(target_efl->l_pid);
7368 unlock_user_struct(target_efl, arg3, 0);
7369 } else
7370 #endif
7372 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7373 goto efault;
7374 fl.l_type = tswap16(target_fl->l_type);
7375 fl.l_whence = tswap16(target_fl->l_whence);
7376 fl.l_start = tswap64(target_fl->l_start);
7377 fl.l_len = tswap64(target_fl->l_len);
7378 fl.l_pid = tswap32(target_fl->l_pid);
7379 unlock_user_struct(target_fl, arg3, 0);
7381 ret = get_errno(fcntl(arg1, cmd, &fl));
7382 break;
7383 default:
7384 ret = do_fcntl(arg1, arg2, arg3);
7385 break;
7387 break;
7389 #endif
7390 #ifdef TARGET_NR_cacheflush
7391 case TARGET_NR_cacheflush:
7392 /* self-modifying code is handled automatically, so nothing needed */
7393 ret = 0;
7394 break;
7395 #endif
7396 #ifdef TARGET_NR_security
7397 case TARGET_NR_security:
7398 goto unimplemented;
7399 #endif
7400 #ifdef TARGET_NR_getpagesize
7401 case TARGET_NR_getpagesize:
7402 ret = TARGET_PAGE_SIZE;
7403 break;
7404 #endif
7405 case TARGET_NR_gettid:
7406 ret = get_errno(gettid());
7407 break;
7408 #ifdef TARGET_NR_readahead
7409 case TARGET_NR_readahead:
7410 #if TARGET_ABI_BITS == 32
7411 #ifdef TARGET_ARM
7412 if (((CPUARMState *)cpu_env)->eabi)
7414 arg2 = arg3;
7415 arg3 = arg4;
7416 arg4 = arg5;
7418 #endif
7419 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7420 #else
7421 ret = get_errno(readahead(arg1, arg2, arg3));
7422 #endif
7423 break;
7424 #endif
7425 #ifdef TARGET_NR_setxattr
7426 case TARGET_NR_setxattr:
7427 case TARGET_NR_lsetxattr:
7428 case TARGET_NR_fsetxattr:
7429 case TARGET_NR_getxattr:
7430 case TARGET_NR_lgetxattr:
7431 case TARGET_NR_fgetxattr:
7432 case TARGET_NR_listxattr:
7433 case TARGET_NR_llistxattr:
7434 case TARGET_NR_flistxattr:
7435 case TARGET_NR_removexattr:
7436 case TARGET_NR_lremovexattr:
7437 case TARGET_NR_fremovexattr:
7438 ret = -TARGET_EOPNOTSUPP;
7439 break;
7440 #endif
7441 #ifdef TARGET_NR_set_thread_area
7442 case TARGET_NR_set_thread_area:
7443 #if defined(TARGET_MIPS)
7444 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7445 ret = 0;
7446 break;
7447 #elif defined(TARGET_CRIS)
7448 if (arg1 & 0xff)
7449 ret = -TARGET_EINVAL;
7450 else {
7451 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7452 ret = 0;
7454 break;
7455 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7456 ret = do_set_thread_area(cpu_env, arg1);
7457 break;
7458 #else
7459 goto unimplemented_nowarn;
7460 #endif
7461 #endif
7462 #ifdef TARGET_NR_get_thread_area
7463 case TARGET_NR_get_thread_area:
7464 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7465 ret = do_get_thread_area(cpu_env, arg1);
7466 #else
7467 goto unimplemented_nowarn;
7468 #endif
7469 #endif
7470 #ifdef TARGET_NR_getdomainname
7471 case TARGET_NR_getdomainname:
7472 goto unimplemented_nowarn;
7473 #endif
7475 #ifdef TARGET_NR_clock_gettime
7476 case TARGET_NR_clock_gettime:
7478 struct timespec ts;
7479 ret = get_errno(clock_gettime(arg1, &ts));
7480 if (!is_error(ret)) {
7481 host_to_target_timespec(arg2, &ts);
7483 break;
7485 #endif
7486 #ifdef TARGET_NR_clock_getres
7487 case TARGET_NR_clock_getres:
7489 struct timespec ts;
7490 ret = get_errno(clock_getres(arg1, &ts));
7491 if (!is_error(ret)) {
7492 host_to_target_timespec(arg2, &ts);
7494 break;
7496 #endif
7497 #ifdef TARGET_NR_clock_nanosleep
7498 case TARGET_NR_clock_nanosleep:
7500 struct timespec ts;
7501 target_to_host_timespec(&ts, arg3);
7502 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7503 if (arg4)
7504 host_to_target_timespec(arg4, &ts);
7505 break;
7507 #endif
7509 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7510 case TARGET_NR_set_tid_address:
7511 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7512 break;
7513 #endif
7515 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7516 case TARGET_NR_tkill:
7517 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7518 break;
7519 #endif
7521 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7522 case TARGET_NR_tgkill:
7523 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7524 target_to_host_signal(arg3)));
7525 break;
7526 #endif
7528 #ifdef TARGET_NR_set_robust_list
7529 case TARGET_NR_set_robust_list:
7530 goto unimplemented_nowarn;
7531 #endif
7533 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7534 case TARGET_NR_utimensat:
7536 struct timespec *tsp, ts[2];
7537 if (!arg3) {
7538 tsp = NULL;
7539 } else {
7540 target_to_host_timespec(ts, arg3);
7541 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7542 tsp = ts;
7544 if (!arg2)
7545 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7546 else {
7547 if (!(p = lock_user_string(arg2))) {
7548 ret = -TARGET_EFAULT;
7549 goto fail;
7551 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7552 unlock_user(p, arg2, 0);
7555 break;
7556 #endif
7557 #if defined(CONFIG_USE_NPTL)
7558 case TARGET_NR_futex:
7559 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7560 break;
7561 #endif
7562 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7563 case TARGET_NR_inotify_init:
7564 ret = get_errno(sys_inotify_init());
7565 break;
7566 #endif
7567 #ifdef CONFIG_INOTIFY1
7568 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7569 case TARGET_NR_inotify_init1:
7570 ret = get_errno(sys_inotify_init1(arg1));
7571 break;
7572 #endif
7573 #endif
7574 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7575 case TARGET_NR_inotify_add_watch:
7576 p = lock_user_string(arg2);
7577 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7578 unlock_user(p, arg2, 0);
7579 break;
7580 #endif
7581 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7582 case TARGET_NR_inotify_rm_watch:
7583 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7584 break;
7585 #endif
7587 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7588 case TARGET_NR_mq_open:
7590 struct mq_attr posix_mq_attr;
7592 p = lock_user_string(arg1 - 1);
7593 if (arg4 != 0)
7594 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7595 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7596 unlock_user (p, arg1, 0);
7598 break;
7600 case TARGET_NR_mq_unlink:
7601 p = lock_user_string(arg1 - 1);
7602 ret = get_errno(mq_unlink(p));
7603 unlock_user (p, arg1, 0);
7604 break;
7606 case TARGET_NR_mq_timedsend:
7608 struct timespec ts;
7610 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7611 if (arg5 != 0) {
7612 target_to_host_timespec(&ts, arg5);
7613 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7614 host_to_target_timespec(arg5, &ts);
7616 else
7617 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7618 unlock_user (p, arg2, arg3);
7620 break;
7622 case TARGET_NR_mq_timedreceive:
7624 struct timespec ts;
7625 unsigned int prio;
7627 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7628 if (arg5 != 0) {
7629 target_to_host_timespec(&ts, arg5);
7630 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7631 host_to_target_timespec(arg5, &ts);
7633 else
7634 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7635 unlock_user (p, arg2, arg3);
7636 if (arg4 != 0)
7637 put_user_u32(prio, arg4);
7639 break;
7641 /* Not implemented for now... */
7642 /* case TARGET_NR_mq_notify: */
7643 /* break; */
7645 case TARGET_NR_mq_getsetattr:
7647 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7648 ret = 0;
7649 if (arg3 != 0) {
7650 ret = mq_getattr(arg1, &posix_mq_attr_out);
7651 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7653 if (arg2 != 0) {
7654 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7655 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7659 break;
7660 #endif
7662 #ifdef CONFIG_SPLICE
7663 #ifdef TARGET_NR_tee
7664 case TARGET_NR_tee:
7666 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7668 break;
7669 #endif
7670 #ifdef TARGET_NR_splice
7671 case TARGET_NR_splice:
7673 loff_t loff_in, loff_out;
7674 loff_t *ploff_in = NULL, *ploff_out = NULL;
7675 if(arg2) {
7676 get_user_u64(loff_in, arg2);
7677 ploff_in = &loff_in;
7679 if(arg4) {
7680 get_user_u64(loff_out, arg2);
7681 ploff_out = &loff_out;
7683 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7685 break;
7686 #endif
7687 #ifdef TARGET_NR_vmsplice
7688 case TARGET_NR_vmsplice:
7690 int count = arg3;
7691 struct iovec *vec;
7693 vec = alloca(count * sizeof(struct iovec));
7694 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7695 goto efault;
7696 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7697 unlock_iovec(vec, arg2, count, 0);
7699 break;
7700 #endif
7701 #endif /* CONFIG_SPLICE */
7702 #ifdef CONFIG_EVENTFD
7703 #if defined(TARGET_NR_eventfd)
7704 case TARGET_NR_eventfd:
7705 ret = get_errno(eventfd(arg1, 0));
7706 break;
7707 #endif
7708 #if defined(TARGET_NR_eventfd2)
7709 case TARGET_NR_eventfd2:
7710 ret = get_errno(eventfd(arg1, arg2));
7711 break;
7712 #endif
7713 #endif /* CONFIG_EVENTFD */
7714 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7715 case TARGET_NR_fallocate:
7716 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7717 break;
7718 #endif
7719 #if defined(CONFIG_SYNC_FILE_RANGE)
7720 #if defined(TARGET_NR_sync_file_range)
7721 case TARGET_NR_sync_file_range:
7722 #if TARGET_ABI_BITS == 32
7723 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7724 target_offset64(arg4, arg5), arg6));
7725 #else
7726 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7727 #endif
7728 break;
7729 #endif
7730 #if defined(TARGET_NR_sync_file_range2)
7731 case TARGET_NR_sync_file_range2:
7732 /* This is like sync_file_range but the arguments are reordered */
7733 #if TARGET_ABI_BITS == 32
7734 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7735 target_offset64(arg5, arg6), arg2));
7736 #else
7737 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7738 #endif
7739 break;
7740 #endif
7741 #endif
7742 #if defined(CONFIG_EPOLL)
7743 #if defined(TARGET_NR_epoll_create)
7744 case TARGET_NR_epoll_create:
7745 ret = get_errno(epoll_create(arg1));
7746 break;
7747 #endif
7748 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7749 case TARGET_NR_epoll_create1:
7750 ret = get_errno(epoll_create1(arg1));
7751 break;
7752 #endif
7753 #if defined(TARGET_NR_epoll_ctl)
7754 case TARGET_NR_epoll_ctl:
7756 struct epoll_event ep;
7757 struct epoll_event *epp = 0;
7758 if (arg4) {
7759 struct target_epoll_event *target_ep;
7760 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7761 goto efault;
7763 ep.events = tswap32(target_ep->events);
7764 /* The epoll_data_t union is just opaque data to the kernel,
7765 * so we transfer all 64 bits across and need not worry what
7766 * actual data type it is.
7768 ep.data.u64 = tswap64(target_ep->data.u64);
7769 unlock_user_struct(target_ep, arg4, 0);
7770 epp = &ep;
7772 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7773 break;
7775 #endif
7777 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7778 #define IMPLEMENT_EPOLL_PWAIT
7779 #endif
7780 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7781 #if defined(TARGET_NR_epoll_wait)
7782 case TARGET_NR_epoll_wait:
7783 #endif
7784 #if defined(IMPLEMENT_EPOLL_PWAIT)
7785 case TARGET_NR_epoll_pwait:
7786 #endif
7788 struct target_epoll_event *target_ep;
7789 struct epoll_event *ep;
7790 int epfd = arg1;
7791 int maxevents = arg3;
7792 int timeout = arg4;
7794 target_ep = lock_user(VERIFY_WRITE, arg2,
7795 maxevents * sizeof(struct target_epoll_event), 1);
7796 if (!target_ep) {
7797 goto efault;
7800 ep = alloca(maxevents * sizeof(struct epoll_event));
7802 switch (num) {
7803 #if defined(IMPLEMENT_EPOLL_PWAIT)
7804 case TARGET_NR_epoll_pwait:
7806 target_sigset_t *target_set;
7807 sigset_t _set, *set = &_set;
7809 if (arg5) {
7810 target_set = lock_user(VERIFY_READ, arg5,
7811 sizeof(target_sigset_t), 1);
7812 if (!target_set) {
7813 unlock_user(target_ep, arg2, 0);
7814 goto efault;
7816 target_to_host_sigset(set, target_set);
7817 unlock_user(target_set, arg5, 0);
7818 } else {
7819 set = NULL;
7822 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
7823 break;
7825 #endif
7826 #if defined(TARGET_NR_epoll_wait)
7827 case TARGET_NR_epoll_wait:
7828 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
7829 break;
7830 #endif
7831 default:
7832 ret = -TARGET_ENOSYS;
7834 if (!is_error(ret)) {
7835 int i;
7836 for (i = 0; i < ret; i++) {
7837 target_ep[i].events = tswap32(ep[i].events);
7838 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
7841 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
7842 break;
7844 #endif
7845 #endif
7846 default:
7847 unimplemented:
7848 gemu_log("qemu: Unsupported syscall: %d\n", num);
7849 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7850 unimplemented_nowarn:
7851 #endif
7852 ret = -TARGET_ENOSYS;
7853 break;
7855 fail:
7856 #ifdef DEBUG
7857 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
7858 #endif
7859 if(do_strace)
7860 print_syscall_ret(num, ret);
7861 return ret;
7862 efault:
7863 ret = -TARGET_EFAULT;
7864 goto fail;