lm32: add Milkymist TMU2 support
[qemu.git] / linux-user / syscall.c
blobbb0999d1abe73b337570478ae94dfc572bbe8511
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <qemu-common.h>
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
73 #define termios host_termios
74 #define winsize host_winsize
75 #define termio host_termio
76 #define sgttyb host_sgttyb /* same as target */
77 #define tchars host_tchars /* same as target */
78 #define ltchars host_ltchars /* same as target */
80 #include <linux/termios.h>
81 #include <linux/unistd.h>
82 #include <linux/utsname.h>
83 #include <linux/cdrom.h>
84 #include <linux/hdreg.h>
85 #include <linux/soundcard.h>
86 #include <linux/kd.h>
87 #include <linux/mtio.h>
88 #include <linux/fs.h>
89 #if defined(CONFIG_FIEMAP)
90 #include <linux/fiemap.h>
91 #endif
92 #include <linux/fb.h>
93 #include <linux/vt.h>
94 #include "linux_loop.h"
95 #include "cpu-uname.h"
97 #include "qemu.h"
98 #include "qemu-common.h"
100 #if defined(CONFIG_USE_NPTL)
101 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
102 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
103 #else
104 /* XXX: Hardcode the above values. */
105 #define CLONE_NPTL_FLAGS2 0
106 #endif
108 //#define DEBUG
110 //#include <linux/msdos_fs.h>
111 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
112 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
115 #undef _syscall0
116 #undef _syscall1
117 #undef _syscall2
118 #undef _syscall3
119 #undef _syscall4
120 #undef _syscall5
121 #undef _syscall6
123 #define _syscall0(type,name) \
124 static type name (void) \
126 return syscall(__NR_##name); \
129 #define _syscall1(type,name,type1,arg1) \
130 static type name (type1 arg1) \
132 return syscall(__NR_##name, arg1); \
135 #define _syscall2(type,name,type1,arg1,type2,arg2) \
136 static type name (type1 arg1,type2 arg2) \
138 return syscall(__NR_##name, arg1, arg2); \
141 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
142 static type name (type1 arg1,type2 arg2,type3 arg3) \
144 return syscall(__NR_##name, arg1, arg2, arg3); \
147 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
148 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
150 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
153 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
154 type5,arg5) \
155 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
157 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
161 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
162 type5,arg5,type6,arg6) \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
164 type6 arg6) \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
170 #define __NR_sys_uname __NR_uname
171 #define __NR_sys_faccessat __NR_faccessat
172 #define __NR_sys_fchmodat __NR_fchmodat
173 #define __NR_sys_fchownat __NR_fchownat
174 #define __NR_sys_fstatat64 __NR_fstatat64
175 #define __NR_sys_futimesat __NR_futimesat
176 #define __NR_sys_getcwd1 __NR_getcwd
177 #define __NR_sys_getdents __NR_getdents
178 #define __NR_sys_getdents64 __NR_getdents64
179 #define __NR_sys_getpriority __NR_getpriority
180 #define __NR_sys_linkat __NR_linkat
181 #define __NR_sys_mkdirat __NR_mkdirat
182 #define __NR_sys_mknodat __NR_mknodat
183 #define __NR_sys_newfstatat __NR_newfstatat
184 #define __NR_sys_openat __NR_openat
185 #define __NR_sys_readlinkat __NR_readlinkat
186 #define __NR_sys_renameat __NR_renameat
187 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
188 #define __NR_sys_symlinkat __NR_symlinkat
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_unlinkat __NR_unlinkat
193 #define __NR_sys_utimensat __NR_utimensat
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
200 #define __NR__llseek __NR_lseek
201 #endif
203 #ifdef __NR_gettid
204 _syscall0(int, gettid)
205 #else
206 /* This is a replacement for the host gettid() and must return a host
207 errno. */
208 static int gettid(void) {
209 return -ENOSYS;
211 #endif
212 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
213 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
214 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
215 #endif
216 _syscall2(int, sys_getpriority, int, which, int, who);
217 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
218 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
219 loff_t *, res, uint, wh);
220 #endif
221 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
222 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
223 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
224 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
225 #endif
226 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
227 _syscall2(int,sys_tkill,int,tid,int,sig)
228 #endif
229 #ifdef __NR_exit_group
230 _syscall1(int,exit_group,int,error_code)
231 #endif
232 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
233 _syscall1(int,set_tid_address,int *,tidptr)
234 #endif
235 #if defined(CONFIG_USE_NPTL)
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
238 const struct timespec *,timeout,int *,uaddr2,int,val3)
239 #endif
240 #endif
241 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
242 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
243 unsigned long *, user_mask_ptr);
244 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
245 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
246 unsigned long *, user_mask_ptr);
248 static bitmask_transtbl fcntl_flags_tbl[] = {
249 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
250 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
251 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
252 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
253 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
254 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
255 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
256 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
257 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
258 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
259 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
260 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
261 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
262 #if defined(O_DIRECT)
263 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
264 #endif
265 { 0, 0, 0, 0 }
268 #define COPY_UTSNAME_FIELD(dest, src) \
269 do { \
270 /* __NEW_UTS_LEN doesn't include terminating null */ \
271 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
272 (dest)[__NEW_UTS_LEN] = '\0'; \
273 } while (0)
275 static int sys_uname(struct new_utsname *buf)
277 struct utsname uts_buf;
279 if (uname(&uts_buf) < 0)
280 return (-1);
283 * Just in case these have some differences, we
284 * translate utsname to new_utsname (which is the
285 * struct linux kernel uses).
288 bzero(buf, sizeof (*buf));
289 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
290 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
291 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
292 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
293 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
294 #ifdef _GNU_SOURCE
295 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
296 #endif
297 return (0);
299 #undef COPY_UTSNAME_FIELD
302 static int sys_getcwd1(char *buf, size_t size)
304 if (getcwd(buf, size) == NULL) {
305 /* getcwd() sets errno */
306 return (-1);
308 return strlen(buf)+1;
311 #ifdef CONFIG_ATFILE
313 * Host system seems to have atfile syscall stubs available. We
314 * now enable them one by one as specified by target syscall_nr.h.
317 #ifdef TARGET_NR_faccessat
318 static int sys_faccessat(int dirfd, const char *pathname, int mode)
320 return (faccessat(dirfd, pathname, mode, 0));
322 #endif
323 #ifdef TARGET_NR_fchmodat
324 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
326 return (fchmodat(dirfd, pathname, mode, 0));
328 #endif
329 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
330 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
331 gid_t group, int flags)
333 return (fchownat(dirfd, pathname, owner, group, flags));
335 #endif
336 #ifdef __NR_fstatat64
337 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
338 int flags)
340 return (fstatat(dirfd, pathname, buf, flags));
342 #endif
343 #ifdef __NR_newfstatat
344 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
345 int flags)
347 return (fstatat(dirfd, pathname, buf, flags));
349 #endif
350 #ifdef TARGET_NR_futimesat
351 static int sys_futimesat(int dirfd, const char *pathname,
352 const struct timeval times[2])
354 return (futimesat(dirfd, pathname, times));
356 #endif
357 #ifdef TARGET_NR_linkat
358 static int sys_linkat(int olddirfd, const char *oldpath,
359 int newdirfd, const char *newpath, int flags)
361 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
363 #endif
364 #ifdef TARGET_NR_mkdirat
365 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
367 return (mkdirat(dirfd, pathname, mode));
369 #endif
370 #ifdef TARGET_NR_mknodat
371 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
372 dev_t dev)
374 return (mknodat(dirfd, pathname, mode, dev));
376 #endif
377 #ifdef TARGET_NR_openat
378 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
381 * open(2) has extra parameter 'mode' when called with
382 * flag O_CREAT.
384 if ((flags & O_CREAT) != 0) {
385 va_list ap;
386 mode_t mode;
389 * Get the 'mode' parameter and translate it to
390 * host bits.
392 va_start(ap, flags);
393 mode = va_arg(ap, mode_t);
394 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
395 va_end(ap);
397 return (openat(dirfd, pathname, flags, mode));
399 return (openat(dirfd, pathname, flags));
401 #endif
402 #ifdef TARGET_NR_readlinkat
403 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
405 return (readlinkat(dirfd, pathname, buf, bufsiz));
407 #endif
408 #ifdef TARGET_NR_renameat
409 static int sys_renameat(int olddirfd, const char *oldpath,
410 int newdirfd, const char *newpath)
412 return (renameat(olddirfd, oldpath, newdirfd, newpath));
414 #endif
415 #ifdef TARGET_NR_symlinkat
416 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
418 return (symlinkat(oldpath, newdirfd, newpath));
420 #endif
421 #ifdef TARGET_NR_unlinkat
422 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
424 return (unlinkat(dirfd, pathname, flags));
426 #endif
427 #else /* !CONFIG_ATFILE */
430 * Try direct syscalls instead
432 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
433 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
434 #endif
435 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
436 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
437 #endif
438 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
439 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
440 uid_t,owner,gid_t,group,int,flags)
441 #endif
442 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
443 defined(__NR_fstatat64)
444 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
445 struct stat *,buf,int,flags)
446 #endif
447 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
448 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
449 const struct timeval *,times)
450 #endif
451 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
452 defined(__NR_newfstatat)
453 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
454 struct stat *,buf,int,flags)
455 #endif
456 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
457 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
458 int,newdirfd,const char *,newpath,int,flags)
459 #endif
460 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
461 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
462 #endif
463 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
464 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
465 mode_t,mode,dev_t,dev)
466 #endif
467 #if defined(TARGET_NR_openat) && defined(__NR_openat)
468 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
469 #endif
470 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
471 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
472 char *,buf,size_t,bufsize)
473 #endif
474 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
475 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
476 int,newdirfd,const char *,newpath)
477 #endif
478 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
479 _syscall3(int,sys_symlinkat,const char *,oldpath,
480 int,newdirfd,const char *,newpath)
481 #endif
482 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
483 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
484 #endif
486 #endif /* CONFIG_ATFILE */
488 #ifdef CONFIG_UTIMENSAT
489 static int sys_utimensat(int dirfd, const char *pathname,
490 const struct timespec times[2], int flags)
492 if (pathname == NULL)
493 return futimens(dirfd, times);
494 else
495 return utimensat(dirfd, pathname, times, flags);
497 #else
498 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
499 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
500 const struct timespec *,tsp,int,flags)
501 #endif
502 #endif /* CONFIG_UTIMENSAT */
504 #ifdef CONFIG_INOTIFY
505 #include <sys/inotify.h>
507 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
508 static int sys_inotify_init(void)
510 return (inotify_init());
512 #endif
513 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
514 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
516 return (inotify_add_watch(fd, pathname, mask));
518 #endif
519 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
520 static int sys_inotify_rm_watch(int fd, int32_t wd)
522 return (inotify_rm_watch(fd, wd));
524 #endif
525 #ifdef CONFIG_INOTIFY1
526 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
527 static int sys_inotify_init1(int flags)
529 return (inotify_init1(flags));
531 #endif
532 #endif
533 #else
534 /* Userspace can usually survive runtime without inotify */
535 #undef TARGET_NR_inotify_init
536 #undef TARGET_NR_inotify_init1
537 #undef TARGET_NR_inotify_add_watch
538 #undef TARGET_NR_inotify_rm_watch
539 #endif /* CONFIG_INOTIFY */
541 #if defined(TARGET_NR_ppoll)
542 #ifndef __NR_ppoll
543 # define __NR_ppoll -1
544 #endif
545 #define __NR_sys_ppoll __NR_ppoll
546 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
547 struct timespec *, timeout, const __sigset_t *, sigmask,
548 size_t, sigsetsize)
549 #endif
551 extern int personality(int);
552 extern int flock(int, int);
553 extern int setfsuid(int);
554 extern int setfsgid(int);
555 extern int setgroups(int, gid_t *);
557 #define ERRNO_TABLE_SIZE 1200
559 /* target_to_host_errno_table[] is initialized from
560 * host_to_target_errno_table[] in syscall_init(). */
561 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
565 * This list is the union of errno values overridden in asm-<arch>/errno.h
566 * minus the errnos that are not actually generic to all archs.
568 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
569 [EIDRM] = TARGET_EIDRM,
570 [ECHRNG] = TARGET_ECHRNG,
571 [EL2NSYNC] = TARGET_EL2NSYNC,
572 [EL3HLT] = TARGET_EL3HLT,
573 [EL3RST] = TARGET_EL3RST,
574 [ELNRNG] = TARGET_ELNRNG,
575 [EUNATCH] = TARGET_EUNATCH,
576 [ENOCSI] = TARGET_ENOCSI,
577 [EL2HLT] = TARGET_EL2HLT,
578 [EDEADLK] = TARGET_EDEADLK,
579 [ENOLCK] = TARGET_ENOLCK,
580 [EBADE] = TARGET_EBADE,
581 [EBADR] = TARGET_EBADR,
582 [EXFULL] = TARGET_EXFULL,
583 [ENOANO] = TARGET_ENOANO,
584 [EBADRQC] = TARGET_EBADRQC,
585 [EBADSLT] = TARGET_EBADSLT,
586 [EBFONT] = TARGET_EBFONT,
587 [ENOSTR] = TARGET_ENOSTR,
588 [ENODATA] = TARGET_ENODATA,
589 [ETIME] = TARGET_ETIME,
590 [ENOSR] = TARGET_ENOSR,
591 [ENONET] = TARGET_ENONET,
592 [ENOPKG] = TARGET_ENOPKG,
593 [EREMOTE] = TARGET_EREMOTE,
594 [ENOLINK] = TARGET_ENOLINK,
595 [EADV] = TARGET_EADV,
596 [ESRMNT] = TARGET_ESRMNT,
597 [ECOMM] = TARGET_ECOMM,
598 [EPROTO] = TARGET_EPROTO,
599 [EDOTDOT] = TARGET_EDOTDOT,
600 [EMULTIHOP] = TARGET_EMULTIHOP,
601 [EBADMSG] = TARGET_EBADMSG,
602 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
603 [EOVERFLOW] = TARGET_EOVERFLOW,
604 [ENOTUNIQ] = TARGET_ENOTUNIQ,
605 [EBADFD] = TARGET_EBADFD,
606 [EREMCHG] = TARGET_EREMCHG,
607 [ELIBACC] = TARGET_ELIBACC,
608 [ELIBBAD] = TARGET_ELIBBAD,
609 [ELIBSCN] = TARGET_ELIBSCN,
610 [ELIBMAX] = TARGET_ELIBMAX,
611 [ELIBEXEC] = TARGET_ELIBEXEC,
612 [EILSEQ] = TARGET_EILSEQ,
613 [ENOSYS] = TARGET_ENOSYS,
614 [ELOOP] = TARGET_ELOOP,
615 [ERESTART] = TARGET_ERESTART,
616 [ESTRPIPE] = TARGET_ESTRPIPE,
617 [ENOTEMPTY] = TARGET_ENOTEMPTY,
618 [EUSERS] = TARGET_EUSERS,
619 [ENOTSOCK] = TARGET_ENOTSOCK,
620 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
621 [EMSGSIZE] = TARGET_EMSGSIZE,
622 [EPROTOTYPE] = TARGET_EPROTOTYPE,
623 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
624 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
625 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
626 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
627 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
628 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
629 [EADDRINUSE] = TARGET_EADDRINUSE,
630 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
631 [ENETDOWN] = TARGET_ENETDOWN,
632 [ENETUNREACH] = TARGET_ENETUNREACH,
633 [ENETRESET] = TARGET_ENETRESET,
634 [ECONNABORTED] = TARGET_ECONNABORTED,
635 [ECONNRESET] = TARGET_ECONNRESET,
636 [ENOBUFS] = TARGET_ENOBUFS,
637 [EISCONN] = TARGET_EISCONN,
638 [ENOTCONN] = TARGET_ENOTCONN,
639 [EUCLEAN] = TARGET_EUCLEAN,
640 [ENOTNAM] = TARGET_ENOTNAM,
641 [ENAVAIL] = TARGET_ENAVAIL,
642 [EISNAM] = TARGET_EISNAM,
643 [EREMOTEIO] = TARGET_EREMOTEIO,
644 [ESHUTDOWN] = TARGET_ESHUTDOWN,
645 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
646 [ETIMEDOUT] = TARGET_ETIMEDOUT,
647 [ECONNREFUSED] = TARGET_ECONNREFUSED,
648 [EHOSTDOWN] = TARGET_EHOSTDOWN,
649 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
650 [EALREADY] = TARGET_EALREADY,
651 [EINPROGRESS] = TARGET_EINPROGRESS,
652 [ESTALE] = TARGET_ESTALE,
653 [ECANCELED] = TARGET_ECANCELED,
654 [ENOMEDIUM] = TARGET_ENOMEDIUM,
655 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
656 #ifdef ENOKEY
657 [ENOKEY] = TARGET_ENOKEY,
658 #endif
659 #ifdef EKEYEXPIRED
660 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
661 #endif
662 #ifdef EKEYREVOKED
663 [EKEYREVOKED] = TARGET_EKEYREVOKED,
664 #endif
665 #ifdef EKEYREJECTED
666 [EKEYREJECTED] = TARGET_EKEYREJECTED,
667 #endif
668 #ifdef EOWNERDEAD
669 [EOWNERDEAD] = TARGET_EOWNERDEAD,
670 #endif
671 #ifdef ENOTRECOVERABLE
672 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
673 #endif
676 static inline int host_to_target_errno(int err)
678 if(host_to_target_errno_table[err])
679 return host_to_target_errno_table[err];
680 return err;
683 static inline int target_to_host_errno(int err)
685 if (target_to_host_errno_table[err])
686 return target_to_host_errno_table[err];
687 return err;
690 static inline abi_long get_errno(abi_long ret)
692 if (ret == -1)
693 return -host_to_target_errno(errno);
694 else
695 return ret;
698 static inline int is_error(abi_long ret)
700 return (abi_ulong)ret >= (abi_ulong)(-4096);
703 char *target_strerror(int err)
705 return strerror(target_to_host_errno(err));
708 static abi_ulong target_brk;
709 static abi_ulong target_original_brk;
711 void target_set_brk(abi_ulong new_brk)
713 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
716 /* do_brk() must return target values and target errnos. */
717 abi_long do_brk(abi_ulong new_brk)
719 abi_ulong brk_page;
720 abi_long mapped_addr;
721 int new_alloc_size;
723 if (!new_brk)
724 return target_brk;
725 if (new_brk < target_original_brk)
726 return target_brk;
728 brk_page = HOST_PAGE_ALIGN(target_brk);
730 /* If the new brk is less than this, set it and we're done... */
731 if (new_brk < brk_page) {
732 target_brk = new_brk;
733 return target_brk;
736 /* We need to allocate more memory after the brk... */
737 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
738 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
739 PROT_READ|PROT_WRITE,
740 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
742 #if defined(TARGET_ALPHA)
743 /* We (partially) emulate OSF/1 on Alpha, which requires we
744 return a proper errno, not an unchanged brk value. */
745 if (is_error(mapped_addr)) {
746 return -TARGET_ENOMEM;
748 #endif
750 if (!is_error(mapped_addr)) {
751 target_brk = new_brk;
753 return target_brk;
756 static inline abi_long copy_from_user_fdset(fd_set *fds,
757 abi_ulong target_fds_addr,
758 int n)
760 int i, nw, j, k;
761 abi_ulong b, *target_fds;
763 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
764 if (!(target_fds = lock_user(VERIFY_READ,
765 target_fds_addr,
766 sizeof(abi_ulong) * nw,
767 1)))
768 return -TARGET_EFAULT;
770 FD_ZERO(fds);
771 k = 0;
772 for (i = 0; i < nw; i++) {
773 /* grab the abi_ulong */
774 __get_user(b, &target_fds[i]);
775 for (j = 0; j < TARGET_ABI_BITS; j++) {
776 /* check the bit inside the abi_ulong */
777 if ((b >> j) & 1)
778 FD_SET(k, fds);
779 k++;
783 unlock_user(target_fds, target_fds_addr, 0);
785 return 0;
788 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
789 const fd_set *fds,
790 int n)
792 int i, nw, j, k;
793 abi_long v;
794 abi_ulong *target_fds;
796 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
797 if (!(target_fds = lock_user(VERIFY_WRITE,
798 target_fds_addr,
799 sizeof(abi_ulong) * nw,
800 0)))
801 return -TARGET_EFAULT;
803 k = 0;
804 for (i = 0; i < nw; i++) {
805 v = 0;
806 for (j = 0; j < TARGET_ABI_BITS; j++) {
807 v |= ((FD_ISSET(k, fds) != 0) << j);
808 k++;
810 __put_user(v, &target_fds[i]);
813 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
815 return 0;
818 #if defined(__alpha__)
819 #define HOST_HZ 1024
820 #else
821 #define HOST_HZ 100
822 #endif
824 static inline abi_long host_to_target_clock_t(long ticks)
826 #if HOST_HZ == TARGET_HZ
827 return ticks;
828 #else
829 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
830 #endif
833 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
834 const struct rusage *rusage)
836 struct target_rusage *target_rusage;
838 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
839 return -TARGET_EFAULT;
840 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
841 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
842 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
843 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
844 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
845 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
846 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
847 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
848 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
849 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
850 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
851 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
852 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
853 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
854 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
855 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
856 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
857 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
858 unlock_user_struct(target_rusage, target_addr, 1);
860 return 0;
863 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
865 if (target_rlim == TARGET_RLIM_INFINITY)
866 return RLIM_INFINITY;
867 else
868 return tswapl(target_rlim);
871 static inline target_ulong host_to_target_rlim(rlim_t rlim)
873 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
874 return TARGET_RLIM_INFINITY;
875 else
876 return tswapl(rlim);
879 static inline abi_long copy_from_user_timeval(struct timeval *tv,
880 abi_ulong target_tv_addr)
882 struct target_timeval *target_tv;
884 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
885 return -TARGET_EFAULT;
887 __get_user(tv->tv_sec, &target_tv->tv_sec);
888 __get_user(tv->tv_usec, &target_tv->tv_usec);
890 unlock_user_struct(target_tv, target_tv_addr, 0);
892 return 0;
895 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
896 const struct timeval *tv)
898 struct target_timeval *target_tv;
900 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
901 return -TARGET_EFAULT;
903 __put_user(tv->tv_sec, &target_tv->tv_sec);
904 __put_user(tv->tv_usec, &target_tv->tv_usec);
906 unlock_user_struct(target_tv, target_tv_addr, 1);
908 return 0;
911 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
912 #include <mqueue.h>
914 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
915 abi_ulong target_mq_attr_addr)
917 struct target_mq_attr *target_mq_attr;
919 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
920 target_mq_attr_addr, 1))
921 return -TARGET_EFAULT;
923 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
924 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
925 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
926 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
928 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
930 return 0;
933 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
934 const struct mq_attr *attr)
936 struct target_mq_attr *target_mq_attr;
938 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
939 target_mq_attr_addr, 0))
940 return -TARGET_EFAULT;
942 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
943 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
944 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
945 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
947 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
949 return 0;
951 #endif
953 /* do_select() must return target values and target errnos. */
954 static abi_long do_select(int n,
955 abi_ulong rfd_addr, abi_ulong wfd_addr,
956 abi_ulong efd_addr, abi_ulong target_tv_addr)
958 fd_set rfds, wfds, efds;
959 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
960 struct timeval tv, *tv_ptr;
961 abi_long ret;
963 if (rfd_addr) {
964 if (copy_from_user_fdset(&rfds, rfd_addr, n))
965 return -TARGET_EFAULT;
966 rfds_ptr = &rfds;
967 } else {
968 rfds_ptr = NULL;
970 if (wfd_addr) {
971 if (copy_from_user_fdset(&wfds, wfd_addr, n))
972 return -TARGET_EFAULT;
973 wfds_ptr = &wfds;
974 } else {
975 wfds_ptr = NULL;
977 if (efd_addr) {
978 if (copy_from_user_fdset(&efds, efd_addr, n))
979 return -TARGET_EFAULT;
980 efds_ptr = &efds;
981 } else {
982 efds_ptr = NULL;
985 if (target_tv_addr) {
986 if (copy_from_user_timeval(&tv, target_tv_addr))
987 return -TARGET_EFAULT;
988 tv_ptr = &tv;
989 } else {
990 tv_ptr = NULL;
993 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
995 if (!is_error(ret)) {
996 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
997 return -TARGET_EFAULT;
998 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
999 return -TARGET_EFAULT;
1000 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1001 return -TARGET_EFAULT;
1003 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1004 return -TARGET_EFAULT;
1007 return ret;
1010 static abi_long do_pipe2(int host_pipe[], int flags)
1012 #ifdef CONFIG_PIPE2
1013 return pipe2(host_pipe, flags);
1014 #else
1015 return -ENOSYS;
1016 #endif
1019 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1020 int flags, int is_pipe2)
1022 int host_pipe[2];
1023 abi_long ret;
1024 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1026 if (is_error(ret))
1027 return get_errno(ret);
1029 /* Several targets have special calling conventions for the original
1030 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1031 if (!is_pipe2) {
1032 #if defined(TARGET_ALPHA)
1033 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1034 return host_pipe[0];
1035 #elif defined(TARGET_MIPS)
1036 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1037 return host_pipe[0];
1038 #elif defined(TARGET_SH4)
1039 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1040 return host_pipe[0];
1041 #endif
1044 if (put_user_s32(host_pipe[0], pipedes)
1045 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1046 return -TARGET_EFAULT;
1047 return get_errno(ret);
1050 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1051 abi_ulong target_addr,
1052 socklen_t len)
1054 struct target_ip_mreqn *target_smreqn;
1056 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1057 if (!target_smreqn)
1058 return -TARGET_EFAULT;
1059 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1060 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1061 if (len == sizeof(struct target_ip_mreqn))
1062 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1063 unlock_user(target_smreqn, target_addr, 0);
1065 return 0;
1068 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1069 abi_ulong target_addr,
1070 socklen_t len)
1072 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1073 sa_family_t sa_family;
1074 struct target_sockaddr *target_saddr;
1076 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1077 if (!target_saddr)
1078 return -TARGET_EFAULT;
1080 sa_family = tswap16(target_saddr->sa_family);
1082 /* Oops. The caller might send a incomplete sun_path; sun_path
1083 * must be terminated by \0 (see the manual page), but
1084 * unfortunately it is quite common to specify sockaddr_un
1085 * length as "strlen(x->sun_path)" while it should be
1086 * "strlen(...) + 1". We'll fix that here if needed.
1087 * Linux kernel has a similar feature.
1090 if (sa_family == AF_UNIX) {
1091 if (len < unix_maxlen && len > 0) {
1092 char *cp = (char*)target_saddr;
1094 if ( cp[len-1] && !cp[len] )
1095 len++;
1097 if (len > unix_maxlen)
1098 len = unix_maxlen;
1101 memcpy(addr, target_saddr, len);
1102 addr->sa_family = sa_family;
1103 unlock_user(target_saddr, target_addr, 0);
1105 return 0;
1108 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1109 struct sockaddr *addr,
1110 socklen_t len)
1112 struct target_sockaddr *target_saddr;
1114 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1115 if (!target_saddr)
1116 return -TARGET_EFAULT;
1117 memcpy(target_saddr, addr, len);
1118 target_saddr->sa_family = tswap16(addr->sa_family);
1119 unlock_user(target_saddr, target_addr, len);
1121 return 0;
1124 /* ??? Should this also swap msgh->name? */
1125 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1126 struct target_msghdr *target_msgh)
1128 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1129 abi_long msg_controllen;
1130 abi_ulong target_cmsg_addr;
1131 struct target_cmsghdr *target_cmsg;
1132 socklen_t space = 0;
1134 msg_controllen = tswapl(target_msgh->msg_controllen);
1135 if (msg_controllen < sizeof (struct target_cmsghdr))
1136 goto the_end;
1137 target_cmsg_addr = tswapl(target_msgh->msg_control);
1138 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1139 if (!target_cmsg)
1140 return -TARGET_EFAULT;
1142 while (cmsg && target_cmsg) {
1143 void *data = CMSG_DATA(cmsg);
1144 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1146 int len = tswapl(target_cmsg->cmsg_len)
1147 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1149 space += CMSG_SPACE(len);
1150 if (space > msgh->msg_controllen) {
1151 space -= CMSG_SPACE(len);
1152 gemu_log("Host cmsg overflow\n");
1153 break;
1156 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1157 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1158 cmsg->cmsg_len = CMSG_LEN(len);
1160 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1161 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1162 memcpy(data, target_data, len);
1163 } else {
1164 int *fd = (int *)data;
1165 int *target_fd = (int *)target_data;
1166 int i, numfds = len / sizeof(int);
1168 for (i = 0; i < numfds; i++)
1169 fd[i] = tswap32(target_fd[i]);
1172 cmsg = CMSG_NXTHDR(msgh, cmsg);
1173 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1175 unlock_user(target_cmsg, target_cmsg_addr, 0);
1176 the_end:
1177 msgh->msg_controllen = space;
1178 return 0;
1181 /* ??? Should this also swap msgh->name? */
1182 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1183 struct msghdr *msgh)
1185 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1186 abi_long msg_controllen;
1187 abi_ulong target_cmsg_addr;
1188 struct target_cmsghdr *target_cmsg;
1189 socklen_t space = 0;
1191 msg_controllen = tswapl(target_msgh->msg_controllen);
1192 if (msg_controllen < sizeof (struct target_cmsghdr))
1193 goto the_end;
1194 target_cmsg_addr = tswapl(target_msgh->msg_control);
1195 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1196 if (!target_cmsg)
1197 return -TARGET_EFAULT;
1199 while (cmsg && target_cmsg) {
1200 void *data = CMSG_DATA(cmsg);
1201 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1203 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1205 space += TARGET_CMSG_SPACE(len);
1206 if (space > msg_controllen) {
1207 space -= TARGET_CMSG_SPACE(len);
1208 gemu_log("Target cmsg overflow\n");
1209 break;
1212 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1213 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1214 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1216 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1217 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1218 memcpy(target_data, data, len);
1219 } else {
1220 int *fd = (int *)data;
1221 int *target_fd = (int *)target_data;
1222 int i, numfds = len / sizeof(int);
1224 for (i = 0; i < numfds; i++)
1225 target_fd[i] = tswap32(fd[i]);
1228 cmsg = CMSG_NXTHDR(msgh, cmsg);
1229 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1231 unlock_user(target_cmsg, target_cmsg_addr, space);
1232 the_end:
1233 target_msgh->msg_controllen = tswapl(space);
1234 return 0;
1237 /* do_setsockopt() Must return target values and target errnos. */
1238 static abi_long do_setsockopt(int sockfd, int level, int optname,
1239 abi_ulong optval_addr, socklen_t optlen)
1241 abi_long ret;
1242 int val;
1243 struct ip_mreqn *ip_mreq;
1244 struct ip_mreq_source *ip_mreq_source;
1246 switch(level) {
1247 case SOL_TCP:
1248 /* TCP options all take an 'int' value. */
1249 if (optlen < sizeof(uint32_t))
1250 return -TARGET_EINVAL;
1252 if (get_user_u32(val, optval_addr))
1253 return -TARGET_EFAULT;
1254 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1255 break;
1256 case SOL_IP:
1257 switch(optname) {
1258 case IP_TOS:
1259 case IP_TTL:
1260 case IP_HDRINCL:
1261 case IP_ROUTER_ALERT:
1262 case IP_RECVOPTS:
1263 case IP_RETOPTS:
1264 case IP_PKTINFO:
1265 case IP_MTU_DISCOVER:
1266 case IP_RECVERR:
1267 case IP_RECVTOS:
1268 #ifdef IP_FREEBIND
1269 case IP_FREEBIND:
1270 #endif
1271 case IP_MULTICAST_TTL:
1272 case IP_MULTICAST_LOOP:
1273 val = 0;
1274 if (optlen >= sizeof(uint32_t)) {
1275 if (get_user_u32(val, optval_addr))
1276 return -TARGET_EFAULT;
1277 } else if (optlen >= 1) {
1278 if (get_user_u8(val, optval_addr))
1279 return -TARGET_EFAULT;
1281 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1282 break;
1283 case IP_ADD_MEMBERSHIP:
1284 case IP_DROP_MEMBERSHIP:
1285 if (optlen < sizeof (struct target_ip_mreq) ||
1286 optlen > sizeof (struct target_ip_mreqn))
1287 return -TARGET_EINVAL;
1289 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1290 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1291 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1292 break;
1294 case IP_BLOCK_SOURCE:
1295 case IP_UNBLOCK_SOURCE:
1296 case IP_ADD_SOURCE_MEMBERSHIP:
1297 case IP_DROP_SOURCE_MEMBERSHIP:
1298 if (optlen != sizeof (struct target_ip_mreq_source))
1299 return -TARGET_EINVAL;
1301 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1302 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1303 unlock_user (ip_mreq_source, optval_addr, 0);
1304 break;
1306 default:
1307 goto unimplemented;
1309 break;
1310 case TARGET_SOL_SOCKET:
1311 switch (optname) {
1312 /* Options with 'int' argument. */
1313 case TARGET_SO_DEBUG:
1314 optname = SO_DEBUG;
1315 break;
1316 case TARGET_SO_REUSEADDR:
1317 optname = SO_REUSEADDR;
1318 break;
1319 case TARGET_SO_TYPE:
1320 optname = SO_TYPE;
1321 break;
1322 case TARGET_SO_ERROR:
1323 optname = SO_ERROR;
1324 break;
1325 case TARGET_SO_DONTROUTE:
1326 optname = SO_DONTROUTE;
1327 break;
1328 case TARGET_SO_BROADCAST:
1329 optname = SO_BROADCAST;
1330 break;
1331 case TARGET_SO_SNDBUF:
1332 optname = SO_SNDBUF;
1333 break;
1334 case TARGET_SO_RCVBUF:
1335 optname = SO_RCVBUF;
1336 break;
1337 case TARGET_SO_KEEPALIVE:
1338 optname = SO_KEEPALIVE;
1339 break;
1340 case TARGET_SO_OOBINLINE:
1341 optname = SO_OOBINLINE;
1342 break;
1343 case TARGET_SO_NO_CHECK:
1344 optname = SO_NO_CHECK;
1345 break;
1346 case TARGET_SO_PRIORITY:
1347 optname = SO_PRIORITY;
1348 break;
1349 #ifdef SO_BSDCOMPAT
1350 case TARGET_SO_BSDCOMPAT:
1351 optname = SO_BSDCOMPAT;
1352 break;
1353 #endif
1354 case TARGET_SO_PASSCRED:
1355 optname = SO_PASSCRED;
1356 break;
1357 case TARGET_SO_TIMESTAMP:
1358 optname = SO_TIMESTAMP;
1359 break;
1360 case TARGET_SO_RCVLOWAT:
1361 optname = SO_RCVLOWAT;
1362 break;
1363 case TARGET_SO_RCVTIMEO:
1364 optname = SO_RCVTIMEO;
1365 break;
1366 case TARGET_SO_SNDTIMEO:
1367 optname = SO_SNDTIMEO;
1368 break;
1369 break;
1370 default:
1371 goto unimplemented;
1373 if (optlen < sizeof(uint32_t))
1374 return -TARGET_EINVAL;
1376 if (get_user_u32(val, optval_addr))
1377 return -TARGET_EFAULT;
1378 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1379 break;
1380 default:
1381 unimplemented:
1382 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1383 ret = -TARGET_ENOPROTOOPT;
1385 return ret;
1388 /* do_getsockopt() Must return target values and target errnos. */
1389 static abi_long do_getsockopt(int sockfd, int level, int optname,
1390 abi_ulong optval_addr, abi_ulong optlen)
1392 abi_long ret;
1393 int len, val;
1394 socklen_t lv;
1396 switch(level) {
1397 case TARGET_SOL_SOCKET:
1398 level = SOL_SOCKET;
1399 switch (optname) {
1400 /* These don't just return a single integer */
1401 case TARGET_SO_LINGER:
1402 case TARGET_SO_RCVTIMEO:
1403 case TARGET_SO_SNDTIMEO:
1404 case TARGET_SO_PEERCRED:
1405 case TARGET_SO_PEERNAME:
1406 goto unimplemented;
1407 /* Options with 'int' argument. */
1408 case TARGET_SO_DEBUG:
1409 optname = SO_DEBUG;
1410 goto int_case;
1411 case TARGET_SO_REUSEADDR:
1412 optname = SO_REUSEADDR;
1413 goto int_case;
1414 case TARGET_SO_TYPE:
1415 optname = SO_TYPE;
1416 goto int_case;
1417 case TARGET_SO_ERROR:
1418 optname = SO_ERROR;
1419 goto int_case;
1420 case TARGET_SO_DONTROUTE:
1421 optname = SO_DONTROUTE;
1422 goto int_case;
1423 case TARGET_SO_BROADCAST:
1424 optname = SO_BROADCAST;
1425 goto int_case;
1426 case TARGET_SO_SNDBUF:
1427 optname = SO_SNDBUF;
1428 goto int_case;
1429 case TARGET_SO_RCVBUF:
1430 optname = SO_RCVBUF;
1431 goto int_case;
1432 case TARGET_SO_KEEPALIVE:
1433 optname = SO_KEEPALIVE;
1434 goto int_case;
1435 case TARGET_SO_OOBINLINE:
1436 optname = SO_OOBINLINE;
1437 goto int_case;
1438 case TARGET_SO_NO_CHECK:
1439 optname = SO_NO_CHECK;
1440 goto int_case;
1441 case TARGET_SO_PRIORITY:
1442 optname = SO_PRIORITY;
1443 goto int_case;
1444 #ifdef SO_BSDCOMPAT
1445 case TARGET_SO_BSDCOMPAT:
1446 optname = SO_BSDCOMPAT;
1447 goto int_case;
1448 #endif
1449 case TARGET_SO_PASSCRED:
1450 optname = SO_PASSCRED;
1451 goto int_case;
1452 case TARGET_SO_TIMESTAMP:
1453 optname = SO_TIMESTAMP;
1454 goto int_case;
1455 case TARGET_SO_RCVLOWAT:
1456 optname = SO_RCVLOWAT;
1457 goto int_case;
1458 default:
1459 goto int_case;
1461 break;
1462 case SOL_TCP:
1463 /* TCP options all take an 'int' value. */
1464 int_case:
1465 if (get_user_u32(len, optlen))
1466 return -TARGET_EFAULT;
1467 if (len < 0)
1468 return -TARGET_EINVAL;
1469 lv = sizeof(lv);
1470 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1471 if (ret < 0)
1472 return ret;
1473 if (len > lv)
1474 len = lv;
1475 if (len == 4) {
1476 if (put_user_u32(val, optval_addr))
1477 return -TARGET_EFAULT;
1478 } else {
1479 if (put_user_u8(val, optval_addr))
1480 return -TARGET_EFAULT;
1482 if (put_user_u32(len, optlen))
1483 return -TARGET_EFAULT;
1484 break;
1485 case SOL_IP:
1486 switch(optname) {
1487 case IP_TOS:
1488 case IP_TTL:
1489 case IP_HDRINCL:
1490 case IP_ROUTER_ALERT:
1491 case IP_RECVOPTS:
1492 case IP_RETOPTS:
1493 case IP_PKTINFO:
1494 case IP_MTU_DISCOVER:
1495 case IP_RECVERR:
1496 case IP_RECVTOS:
1497 #ifdef IP_FREEBIND
1498 case IP_FREEBIND:
1499 #endif
1500 case IP_MULTICAST_TTL:
1501 case IP_MULTICAST_LOOP:
1502 if (get_user_u32(len, optlen))
1503 return -TARGET_EFAULT;
1504 if (len < 0)
1505 return -TARGET_EINVAL;
1506 lv = sizeof(lv);
1507 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1508 if (ret < 0)
1509 return ret;
1510 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1511 len = 1;
1512 if (put_user_u32(len, optlen)
1513 || put_user_u8(val, optval_addr))
1514 return -TARGET_EFAULT;
1515 } else {
1516 if (len > sizeof(int))
1517 len = sizeof(int);
1518 if (put_user_u32(len, optlen)
1519 || put_user_u32(val, optval_addr))
1520 return -TARGET_EFAULT;
1522 break;
1523 default:
1524 ret = -TARGET_ENOPROTOOPT;
1525 break;
1527 break;
1528 default:
1529 unimplemented:
1530 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1531 level, optname);
1532 ret = -TARGET_EOPNOTSUPP;
1533 break;
1535 return ret;
1538 /* FIXME
1539 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1540 * other lock functions have a return code of 0 for failure.
1542 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1543 int count, int copy)
1545 struct target_iovec *target_vec;
1546 abi_ulong base;
1547 int i;
1549 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1550 if (!target_vec)
1551 return -TARGET_EFAULT;
1552 for(i = 0;i < count; i++) {
1553 base = tswapl(target_vec[i].iov_base);
1554 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1555 if (vec[i].iov_len != 0) {
1556 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1557 /* Don't check lock_user return value. We must call writev even
1558 if a element has invalid base address. */
1559 } else {
1560 /* zero length pointer is ignored */
1561 vec[i].iov_base = NULL;
1564 unlock_user (target_vec, target_addr, 0);
1565 return 0;
1568 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1569 int count, int copy)
1571 struct target_iovec *target_vec;
1572 abi_ulong base;
1573 int i;
1575 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1576 if (!target_vec)
1577 return -TARGET_EFAULT;
1578 for(i = 0;i < count; i++) {
1579 if (target_vec[i].iov_base) {
1580 base = tswapl(target_vec[i].iov_base);
1581 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1584 unlock_user (target_vec, target_addr, 0);
1586 return 0;
1589 /* do_socket() Must return target values and target errnos. */
1590 static abi_long do_socket(int domain, int type, int protocol)
1592 #if defined(TARGET_MIPS)
1593 switch(type) {
1594 case TARGET_SOCK_DGRAM:
1595 type = SOCK_DGRAM;
1596 break;
1597 case TARGET_SOCK_STREAM:
1598 type = SOCK_STREAM;
1599 break;
1600 case TARGET_SOCK_RAW:
1601 type = SOCK_RAW;
1602 break;
1603 case TARGET_SOCK_RDM:
1604 type = SOCK_RDM;
1605 break;
1606 case TARGET_SOCK_SEQPACKET:
1607 type = SOCK_SEQPACKET;
1608 break;
1609 case TARGET_SOCK_PACKET:
1610 type = SOCK_PACKET;
1611 break;
1613 #endif
1614 if (domain == PF_NETLINK)
1615 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1616 return get_errno(socket(domain, type, protocol));
1619 /* do_bind() Must return target values and target errnos. */
1620 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1621 socklen_t addrlen)
1623 void *addr;
1624 abi_long ret;
1626 if ((int)addrlen < 0) {
1627 return -TARGET_EINVAL;
1630 addr = alloca(addrlen+1);
1632 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1633 if (ret)
1634 return ret;
1636 return get_errno(bind(sockfd, addr, addrlen));
1639 /* do_connect() Must return target values and target errnos. */
1640 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1641 socklen_t addrlen)
1643 void *addr;
1644 abi_long ret;
1646 if ((int)addrlen < 0) {
1647 return -TARGET_EINVAL;
1650 addr = alloca(addrlen);
1652 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1653 if (ret)
1654 return ret;
1656 return get_errno(connect(sockfd, addr, addrlen));
1659 /* do_sendrecvmsg() Must return target values and target errnos. */
1660 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1661 int flags, int send)
1663 abi_long ret, len;
1664 struct target_msghdr *msgp;
1665 struct msghdr msg;
1666 int count;
1667 struct iovec *vec;
1668 abi_ulong target_vec;
1670 /* FIXME */
1671 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1672 msgp,
1673 target_msg,
1674 send ? 1 : 0))
1675 return -TARGET_EFAULT;
1676 if (msgp->msg_name) {
1677 msg.msg_namelen = tswap32(msgp->msg_namelen);
1678 msg.msg_name = alloca(msg.msg_namelen);
1679 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1680 msg.msg_namelen);
1681 if (ret) {
1682 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1683 return ret;
1685 } else {
1686 msg.msg_name = NULL;
1687 msg.msg_namelen = 0;
1689 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1690 msg.msg_control = alloca(msg.msg_controllen);
1691 msg.msg_flags = tswap32(msgp->msg_flags);
1693 count = tswapl(msgp->msg_iovlen);
1694 vec = alloca(count * sizeof(struct iovec));
1695 target_vec = tswapl(msgp->msg_iov);
1696 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1697 msg.msg_iovlen = count;
1698 msg.msg_iov = vec;
1700 if (send) {
1701 ret = target_to_host_cmsg(&msg, msgp);
1702 if (ret == 0)
1703 ret = get_errno(sendmsg(fd, &msg, flags));
1704 } else {
1705 ret = get_errno(recvmsg(fd, &msg, flags));
1706 if (!is_error(ret)) {
1707 len = ret;
1708 ret = host_to_target_cmsg(msgp, &msg);
1709 if (!is_error(ret))
1710 ret = len;
1713 unlock_iovec(vec, target_vec, count, !send);
1714 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1715 return ret;
1718 /* do_accept() Must return target values and target errnos. */
1719 static abi_long do_accept(int fd, abi_ulong target_addr,
1720 abi_ulong target_addrlen_addr)
1722 socklen_t addrlen;
1723 void *addr;
1724 abi_long ret;
1726 if (target_addr == 0)
1727 return get_errno(accept(fd, NULL, NULL));
1729 /* linux returns EINVAL if addrlen pointer is invalid */
1730 if (get_user_u32(addrlen, target_addrlen_addr))
1731 return -TARGET_EINVAL;
1733 if ((int)addrlen < 0) {
1734 return -TARGET_EINVAL;
1737 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1738 return -TARGET_EINVAL;
1740 addr = alloca(addrlen);
1742 ret = get_errno(accept(fd, addr, &addrlen));
1743 if (!is_error(ret)) {
1744 host_to_target_sockaddr(target_addr, addr, addrlen);
1745 if (put_user_u32(addrlen, target_addrlen_addr))
1746 ret = -TARGET_EFAULT;
1748 return ret;
1751 /* do_getpeername() Must return target values and target errnos. */
1752 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1753 abi_ulong target_addrlen_addr)
1755 socklen_t addrlen;
1756 void *addr;
1757 abi_long ret;
1759 if (get_user_u32(addrlen, target_addrlen_addr))
1760 return -TARGET_EFAULT;
1762 if ((int)addrlen < 0) {
1763 return -TARGET_EINVAL;
1766 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1767 return -TARGET_EFAULT;
1769 addr = alloca(addrlen);
1771 ret = get_errno(getpeername(fd, addr, &addrlen));
1772 if (!is_error(ret)) {
1773 host_to_target_sockaddr(target_addr, addr, addrlen);
1774 if (put_user_u32(addrlen, target_addrlen_addr))
1775 ret = -TARGET_EFAULT;
1777 return ret;
1780 /* do_getsockname() Must return target values and target errnos. */
1781 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1782 abi_ulong target_addrlen_addr)
1784 socklen_t addrlen;
1785 void *addr;
1786 abi_long ret;
1788 if (get_user_u32(addrlen, target_addrlen_addr))
1789 return -TARGET_EFAULT;
1791 if ((int)addrlen < 0) {
1792 return -TARGET_EINVAL;
1795 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1796 return -TARGET_EFAULT;
1798 addr = alloca(addrlen);
1800 ret = get_errno(getsockname(fd, addr, &addrlen));
1801 if (!is_error(ret)) {
1802 host_to_target_sockaddr(target_addr, addr, addrlen);
1803 if (put_user_u32(addrlen, target_addrlen_addr))
1804 ret = -TARGET_EFAULT;
1806 return ret;
1809 /* do_socketpair() Must return target values and target errnos. */
1810 static abi_long do_socketpair(int domain, int type, int protocol,
1811 abi_ulong target_tab_addr)
1813 int tab[2];
1814 abi_long ret;
1816 ret = get_errno(socketpair(domain, type, protocol, tab));
1817 if (!is_error(ret)) {
1818 if (put_user_s32(tab[0], target_tab_addr)
1819 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1820 ret = -TARGET_EFAULT;
1822 return ret;
1825 /* do_sendto() Must return target values and target errnos. */
1826 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1827 abi_ulong target_addr, socklen_t addrlen)
1829 void *addr;
1830 void *host_msg;
1831 abi_long ret;
1833 if ((int)addrlen < 0) {
1834 return -TARGET_EINVAL;
1837 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1838 if (!host_msg)
1839 return -TARGET_EFAULT;
1840 if (target_addr) {
1841 addr = alloca(addrlen);
1842 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1843 if (ret) {
1844 unlock_user(host_msg, msg, 0);
1845 return ret;
1847 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1848 } else {
1849 ret = get_errno(send(fd, host_msg, len, flags));
1851 unlock_user(host_msg, msg, 0);
1852 return ret;
1855 /* do_recvfrom() Must return target values and target errnos. */
1856 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1857 abi_ulong target_addr,
1858 abi_ulong target_addrlen)
1860 socklen_t addrlen;
1861 void *addr;
1862 void *host_msg;
1863 abi_long ret;
1865 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1866 if (!host_msg)
1867 return -TARGET_EFAULT;
1868 if (target_addr) {
1869 if (get_user_u32(addrlen, target_addrlen)) {
1870 ret = -TARGET_EFAULT;
1871 goto fail;
1873 if ((int)addrlen < 0) {
1874 ret = -TARGET_EINVAL;
1875 goto fail;
1877 addr = alloca(addrlen);
1878 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1879 } else {
1880 addr = NULL; /* To keep compiler quiet. */
1881 ret = get_errno(recv(fd, host_msg, len, flags));
1883 if (!is_error(ret)) {
1884 if (target_addr) {
1885 host_to_target_sockaddr(target_addr, addr, addrlen);
1886 if (put_user_u32(addrlen, target_addrlen)) {
1887 ret = -TARGET_EFAULT;
1888 goto fail;
1891 unlock_user(host_msg, msg, len);
1892 } else {
1893 fail:
1894 unlock_user(host_msg, msg, 0);
1896 return ret;
1899 #ifdef TARGET_NR_socketcall
1900 /* do_socketcall() Must return target values and target errnos. */
1901 static abi_long do_socketcall(int num, abi_ulong vptr)
1903 abi_long ret;
1904 const int n = sizeof(abi_ulong);
1906 switch(num) {
1907 case SOCKOP_socket:
1909 abi_ulong domain, type, protocol;
1911 if (get_user_ual(domain, vptr)
1912 || get_user_ual(type, vptr + n)
1913 || get_user_ual(protocol, vptr + 2 * n))
1914 return -TARGET_EFAULT;
1916 ret = do_socket(domain, type, protocol);
1918 break;
1919 case SOCKOP_bind:
1921 abi_ulong sockfd;
1922 abi_ulong target_addr;
1923 socklen_t addrlen;
1925 if (get_user_ual(sockfd, vptr)
1926 || get_user_ual(target_addr, vptr + n)
1927 || get_user_ual(addrlen, vptr + 2 * n))
1928 return -TARGET_EFAULT;
1930 ret = do_bind(sockfd, target_addr, addrlen);
1932 break;
1933 case SOCKOP_connect:
1935 abi_ulong sockfd;
1936 abi_ulong target_addr;
1937 socklen_t addrlen;
1939 if (get_user_ual(sockfd, vptr)
1940 || get_user_ual(target_addr, vptr + n)
1941 || get_user_ual(addrlen, vptr + 2 * n))
1942 return -TARGET_EFAULT;
1944 ret = do_connect(sockfd, target_addr, addrlen);
1946 break;
1947 case SOCKOP_listen:
1949 abi_ulong sockfd, backlog;
1951 if (get_user_ual(sockfd, vptr)
1952 || get_user_ual(backlog, vptr + n))
1953 return -TARGET_EFAULT;
1955 ret = get_errno(listen(sockfd, backlog));
1957 break;
1958 case SOCKOP_accept:
1960 abi_ulong sockfd;
1961 abi_ulong target_addr, target_addrlen;
1963 if (get_user_ual(sockfd, vptr)
1964 || get_user_ual(target_addr, vptr + n)
1965 || get_user_ual(target_addrlen, vptr + 2 * n))
1966 return -TARGET_EFAULT;
1968 ret = do_accept(sockfd, target_addr, target_addrlen);
1970 break;
1971 case SOCKOP_getsockname:
1973 abi_ulong sockfd;
1974 abi_ulong target_addr, target_addrlen;
1976 if (get_user_ual(sockfd, vptr)
1977 || get_user_ual(target_addr, vptr + n)
1978 || get_user_ual(target_addrlen, vptr + 2 * n))
1979 return -TARGET_EFAULT;
1981 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1983 break;
1984 case SOCKOP_getpeername:
1986 abi_ulong sockfd;
1987 abi_ulong target_addr, target_addrlen;
1989 if (get_user_ual(sockfd, vptr)
1990 || get_user_ual(target_addr, vptr + n)
1991 || get_user_ual(target_addrlen, vptr + 2 * n))
1992 return -TARGET_EFAULT;
1994 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1996 break;
1997 case SOCKOP_socketpair:
1999 abi_ulong domain, type, protocol;
2000 abi_ulong tab;
2002 if (get_user_ual(domain, vptr)
2003 || get_user_ual(type, vptr + n)
2004 || get_user_ual(protocol, vptr + 2 * n)
2005 || get_user_ual(tab, vptr + 3 * n))
2006 return -TARGET_EFAULT;
2008 ret = do_socketpair(domain, type, protocol, tab);
2010 break;
2011 case SOCKOP_send:
2013 abi_ulong sockfd;
2014 abi_ulong msg;
2015 size_t len;
2016 abi_ulong flags;
2018 if (get_user_ual(sockfd, vptr)
2019 || get_user_ual(msg, vptr + n)
2020 || get_user_ual(len, vptr + 2 * n)
2021 || get_user_ual(flags, vptr + 3 * n))
2022 return -TARGET_EFAULT;
2024 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2026 break;
2027 case SOCKOP_recv:
2029 abi_ulong sockfd;
2030 abi_ulong msg;
2031 size_t len;
2032 abi_ulong flags;
2034 if (get_user_ual(sockfd, vptr)
2035 || get_user_ual(msg, vptr + n)
2036 || get_user_ual(len, vptr + 2 * n)
2037 || get_user_ual(flags, vptr + 3 * n))
2038 return -TARGET_EFAULT;
2040 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2042 break;
2043 case SOCKOP_sendto:
2045 abi_ulong sockfd;
2046 abi_ulong msg;
2047 size_t len;
2048 abi_ulong flags;
2049 abi_ulong addr;
2050 socklen_t addrlen;
2052 if (get_user_ual(sockfd, vptr)
2053 || get_user_ual(msg, vptr + n)
2054 || get_user_ual(len, vptr + 2 * n)
2055 || get_user_ual(flags, vptr + 3 * n)
2056 || get_user_ual(addr, vptr + 4 * n)
2057 || get_user_ual(addrlen, vptr + 5 * n))
2058 return -TARGET_EFAULT;
2060 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2062 break;
2063 case SOCKOP_recvfrom:
2065 abi_ulong sockfd;
2066 abi_ulong msg;
2067 size_t len;
2068 abi_ulong flags;
2069 abi_ulong addr;
2070 socklen_t addrlen;
2072 if (get_user_ual(sockfd, vptr)
2073 || get_user_ual(msg, vptr + n)
2074 || get_user_ual(len, vptr + 2 * n)
2075 || get_user_ual(flags, vptr + 3 * n)
2076 || get_user_ual(addr, vptr + 4 * n)
2077 || get_user_ual(addrlen, vptr + 5 * n))
2078 return -TARGET_EFAULT;
2080 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2082 break;
2083 case SOCKOP_shutdown:
2085 abi_ulong sockfd, how;
2087 if (get_user_ual(sockfd, vptr)
2088 || get_user_ual(how, vptr + n))
2089 return -TARGET_EFAULT;
2091 ret = get_errno(shutdown(sockfd, how));
2093 break;
2094 case SOCKOP_sendmsg:
2095 case SOCKOP_recvmsg:
2097 abi_ulong fd;
2098 abi_ulong target_msg;
2099 abi_ulong flags;
2101 if (get_user_ual(fd, vptr)
2102 || get_user_ual(target_msg, vptr + n)
2103 || get_user_ual(flags, vptr + 2 * n))
2104 return -TARGET_EFAULT;
2106 ret = do_sendrecvmsg(fd, target_msg, flags,
2107 (num == SOCKOP_sendmsg));
2109 break;
2110 case SOCKOP_setsockopt:
2112 abi_ulong sockfd;
2113 abi_ulong level;
2114 abi_ulong optname;
2115 abi_ulong optval;
2116 socklen_t optlen;
2118 if (get_user_ual(sockfd, vptr)
2119 || get_user_ual(level, vptr + n)
2120 || get_user_ual(optname, vptr + 2 * n)
2121 || get_user_ual(optval, vptr + 3 * n)
2122 || get_user_ual(optlen, vptr + 4 * n))
2123 return -TARGET_EFAULT;
2125 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2127 break;
2128 case SOCKOP_getsockopt:
2130 abi_ulong sockfd;
2131 abi_ulong level;
2132 abi_ulong optname;
2133 abi_ulong optval;
2134 socklen_t optlen;
2136 if (get_user_ual(sockfd, vptr)
2137 || get_user_ual(level, vptr + n)
2138 || get_user_ual(optname, vptr + 2 * n)
2139 || get_user_ual(optval, vptr + 3 * n)
2140 || get_user_ual(optlen, vptr + 4 * n))
2141 return -TARGET_EFAULT;
2143 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2145 break;
2146 default:
2147 gemu_log("Unsupported socketcall: %d\n", num);
2148 ret = -TARGET_ENOSYS;
2149 break;
2151 return ret;
2153 #endif
2155 #define N_SHM_REGIONS 32
2157 static struct shm_region {
2158 abi_ulong start;
2159 abi_ulong size;
2160 } shm_regions[N_SHM_REGIONS];
2162 struct target_ipc_perm
2164 abi_long __key;
2165 abi_ulong uid;
2166 abi_ulong gid;
2167 abi_ulong cuid;
2168 abi_ulong cgid;
2169 unsigned short int mode;
2170 unsigned short int __pad1;
2171 unsigned short int __seq;
2172 unsigned short int __pad2;
2173 abi_ulong __unused1;
2174 abi_ulong __unused2;
2177 struct target_semid_ds
2179 struct target_ipc_perm sem_perm;
2180 abi_ulong sem_otime;
2181 abi_ulong __unused1;
2182 abi_ulong sem_ctime;
2183 abi_ulong __unused2;
2184 abi_ulong sem_nsems;
2185 abi_ulong __unused3;
2186 abi_ulong __unused4;
2189 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2190 abi_ulong target_addr)
2192 struct target_ipc_perm *target_ip;
2193 struct target_semid_ds *target_sd;
2195 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2196 return -TARGET_EFAULT;
2197 target_ip = &(target_sd->sem_perm);
2198 host_ip->__key = tswapl(target_ip->__key);
2199 host_ip->uid = tswapl(target_ip->uid);
2200 host_ip->gid = tswapl(target_ip->gid);
2201 host_ip->cuid = tswapl(target_ip->cuid);
2202 host_ip->cgid = tswapl(target_ip->cgid);
2203 host_ip->mode = tswapl(target_ip->mode);
2204 unlock_user_struct(target_sd, target_addr, 0);
2205 return 0;
2208 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2209 struct ipc_perm *host_ip)
2211 struct target_ipc_perm *target_ip;
2212 struct target_semid_ds *target_sd;
2214 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2215 return -TARGET_EFAULT;
2216 target_ip = &(target_sd->sem_perm);
2217 target_ip->__key = tswapl(host_ip->__key);
2218 target_ip->uid = tswapl(host_ip->uid);
2219 target_ip->gid = tswapl(host_ip->gid);
2220 target_ip->cuid = tswapl(host_ip->cuid);
2221 target_ip->cgid = tswapl(host_ip->cgid);
2222 target_ip->mode = tswapl(host_ip->mode);
2223 unlock_user_struct(target_sd, target_addr, 1);
2224 return 0;
2227 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2228 abi_ulong target_addr)
2230 struct target_semid_ds *target_sd;
2232 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2233 return -TARGET_EFAULT;
2234 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2235 return -TARGET_EFAULT;
2236 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2237 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2238 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2239 unlock_user_struct(target_sd, target_addr, 0);
2240 return 0;
2243 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2244 struct semid_ds *host_sd)
2246 struct target_semid_ds *target_sd;
2248 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2249 return -TARGET_EFAULT;
2250 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2251 return -TARGET_EFAULT;;
2252 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2253 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2254 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2255 unlock_user_struct(target_sd, target_addr, 1);
2256 return 0;
2259 struct target_seminfo {
2260 int semmap;
2261 int semmni;
2262 int semmns;
2263 int semmnu;
2264 int semmsl;
2265 int semopm;
2266 int semume;
2267 int semusz;
2268 int semvmx;
2269 int semaem;
2272 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2273 struct seminfo *host_seminfo)
2275 struct target_seminfo *target_seminfo;
2276 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2277 return -TARGET_EFAULT;
2278 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2279 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2280 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2281 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2282 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2283 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2284 __put_user(host_seminfo->semume, &target_seminfo->semume);
2285 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2286 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2287 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2288 unlock_user_struct(target_seminfo, target_addr, 1);
2289 return 0;
2292 union semun {
2293 int val;
2294 struct semid_ds *buf;
2295 unsigned short *array;
2296 struct seminfo *__buf;
2299 union target_semun {
2300 int val;
2301 abi_ulong buf;
2302 abi_ulong array;
2303 abi_ulong __buf;
2306 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2307 abi_ulong target_addr)
2309 int nsems;
2310 unsigned short *array;
2311 union semun semun;
2312 struct semid_ds semid_ds;
2313 int i, ret;
2315 semun.buf = &semid_ds;
2317 ret = semctl(semid, 0, IPC_STAT, semun);
2318 if (ret == -1)
2319 return get_errno(ret);
2321 nsems = semid_ds.sem_nsems;
2323 *host_array = malloc(nsems*sizeof(unsigned short));
2324 array = lock_user(VERIFY_READ, target_addr,
2325 nsems*sizeof(unsigned short), 1);
2326 if (!array)
2327 return -TARGET_EFAULT;
2329 for(i=0; i<nsems; i++) {
2330 __get_user((*host_array)[i], &array[i]);
2332 unlock_user(array, target_addr, 0);
2334 return 0;
2337 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2338 unsigned short **host_array)
2340 int nsems;
2341 unsigned short *array;
2342 union semun semun;
2343 struct semid_ds semid_ds;
2344 int i, ret;
2346 semun.buf = &semid_ds;
2348 ret = semctl(semid, 0, IPC_STAT, semun);
2349 if (ret == -1)
2350 return get_errno(ret);
2352 nsems = semid_ds.sem_nsems;
2354 array = lock_user(VERIFY_WRITE, target_addr,
2355 nsems*sizeof(unsigned short), 0);
2356 if (!array)
2357 return -TARGET_EFAULT;
2359 for(i=0; i<nsems; i++) {
2360 __put_user((*host_array)[i], &array[i]);
2362 free(*host_array);
2363 unlock_user(array, target_addr, 1);
2365 return 0;
2368 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2369 union target_semun target_su)
2371 union semun arg;
2372 struct semid_ds dsarg;
2373 unsigned short *array = NULL;
2374 struct seminfo seminfo;
2375 abi_long ret = -TARGET_EINVAL;
2376 abi_long err;
2377 cmd &= 0xff;
2379 switch( cmd ) {
2380 case GETVAL:
2381 case SETVAL:
2382 arg.val = tswapl(target_su.val);
2383 ret = get_errno(semctl(semid, semnum, cmd, arg));
2384 target_su.val = tswapl(arg.val);
2385 break;
2386 case GETALL:
2387 case SETALL:
2388 err = target_to_host_semarray(semid, &array, target_su.array);
2389 if (err)
2390 return err;
2391 arg.array = array;
2392 ret = get_errno(semctl(semid, semnum, cmd, arg));
2393 err = host_to_target_semarray(semid, target_su.array, &array);
2394 if (err)
2395 return err;
2396 break;
2397 case IPC_STAT:
2398 case IPC_SET:
2399 case SEM_STAT:
2400 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2401 if (err)
2402 return err;
2403 arg.buf = &dsarg;
2404 ret = get_errno(semctl(semid, semnum, cmd, arg));
2405 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2406 if (err)
2407 return err;
2408 break;
2409 case IPC_INFO:
2410 case SEM_INFO:
2411 arg.__buf = &seminfo;
2412 ret = get_errno(semctl(semid, semnum, cmd, arg));
2413 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2414 if (err)
2415 return err;
2416 break;
2417 case IPC_RMID:
2418 case GETPID:
2419 case GETNCNT:
2420 case GETZCNT:
2421 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2422 break;
2425 return ret;
2428 struct target_sembuf {
2429 unsigned short sem_num;
2430 short sem_op;
2431 short sem_flg;
2434 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2435 abi_ulong target_addr,
2436 unsigned nsops)
2438 struct target_sembuf *target_sembuf;
2439 int i;
2441 target_sembuf = lock_user(VERIFY_READ, target_addr,
2442 nsops*sizeof(struct target_sembuf), 1);
2443 if (!target_sembuf)
2444 return -TARGET_EFAULT;
2446 for(i=0; i<nsops; i++) {
2447 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2448 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2449 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2452 unlock_user(target_sembuf, target_addr, 0);
2454 return 0;
2457 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2459 struct sembuf sops[nsops];
2461 if (target_to_host_sembuf(sops, ptr, nsops))
2462 return -TARGET_EFAULT;
2464 return semop(semid, sops, nsops);
2467 struct target_msqid_ds
2469 struct target_ipc_perm msg_perm;
2470 abi_ulong msg_stime;
2471 #if TARGET_ABI_BITS == 32
2472 abi_ulong __unused1;
2473 #endif
2474 abi_ulong msg_rtime;
2475 #if TARGET_ABI_BITS == 32
2476 abi_ulong __unused2;
2477 #endif
2478 abi_ulong msg_ctime;
2479 #if TARGET_ABI_BITS == 32
2480 abi_ulong __unused3;
2481 #endif
2482 abi_ulong __msg_cbytes;
2483 abi_ulong msg_qnum;
2484 abi_ulong msg_qbytes;
2485 abi_ulong msg_lspid;
2486 abi_ulong msg_lrpid;
2487 abi_ulong __unused4;
2488 abi_ulong __unused5;
2491 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2492 abi_ulong target_addr)
2494 struct target_msqid_ds *target_md;
2496 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2497 return -TARGET_EFAULT;
2498 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2499 return -TARGET_EFAULT;
2500 host_md->msg_stime = tswapl(target_md->msg_stime);
2501 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2502 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2503 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2504 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2505 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2506 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2507 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2508 unlock_user_struct(target_md, target_addr, 0);
2509 return 0;
2512 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2513 struct msqid_ds *host_md)
2515 struct target_msqid_ds *target_md;
2517 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2518 return -TARGET_EFAULT;
2519 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2520 return -TARGET_EFAULT;
2521 target_md->msg_stime = tswapl(host_md->msg_stime);
2522 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2523 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2524 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2525 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2526 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2527 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2528 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2529 unlock_user_struct(target_md, target_addr, 1);
2530 return 0;
2533 struct target_msginfo {
2534 int msgpool;
2535 int msgmap;
2536 int msgmax;
2537 int msgmnb;
2538 int msgmni;
2539 int msgssz;
2540 int msgtql;
2541 unsigned short int msgseg;
2544 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2545 struct msginfo *host_msginfo)
2547 struct target_msginfo *target_msginfo;
2548 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2549 return -TARGET_EFAULT;
2550 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2551 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2552 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2553 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2554 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2555 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2556 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2557 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2558 unlock_user_struct(target_msginfo, target_addr, 1);
2559 return 0;
2562 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2564 struct msqid_ds dsarg;
2565 struct msginfo msginfo;
2566 abi_long ret = -TARGET_EINVAL;
2568 cmd &= 0xff;
2570 switch (cmd) {
2571 case IPC_STAT:
2572 case IPC_SET:
2573 case MSG_STAT:
2574 if (target_to_host_msqid_ds(&dsarg,ptr))
2575 return -TARGET_EFAULT;
2576 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2577 if (host_to_target_msqid_ds(ptr,&dsarg))
2578 return -TARGET_EFAULT;
2579 break;
2580 case IPC_RMID:
2581 ret = get_errno(msgctl(msgid, cmd, NULL));
2582 break;
2583 case IPC_INFO:
2584 case MSG_INFO:
2585 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2586 if (host_to_target_msginfo(ptr, &msginfo))
2587 return -TARGET_EFAULT;
2588 break;
2591 return ret;
2594 struct target_msgbuf {
2595 abi_long mtype;
2596 char mtext[1];
2599 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2600 unsigned int msgsz, int msgflg)
2602 struct target_msgbuf *target_mb;
2603 struct msgbuf *host_mb;
2604 abi_long ret = 0;
2606 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2607 return -TARGET_EFAULT;
2608 host_mb = malloc(msgsz+sizeof(long));
2609 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2610 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2611 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2612 free(host_mb);
2613 unlock_user_struct(target_mb, msgp, 0);
2615 return ret;
2618 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2619 unsigned int msgsz, abi_long msgtyp,
2620 int msgflg)
2622 struct target_msgbuf *target_mb;
2623 char *target_mtext;
2624 struct msgbuf *host_mb;
2625 abi_long ret = 0;
2627 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2628 return -TARGET_EFAULT;
2630 host_mb = malloc(msgsz+sizeof(long));
2631 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2633 if (ret > 0) {
2634 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2635 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2636 if (!target_mtext) {
2637 ret = -TARGET_EFAULT;
2638 goto end;
2640 memcpy(target_mb->mtext, host_mb->mtext, ret);
2641 unlock_user(target_mtext, target_mtext_addr, ret);
2644 target_mb->mtype = tswapl(host_mb->mtype);
2645 free(host_mb);
2647 end:
2648 if (target_mb)
2649 unlock_user_struct(target_mb, msgp, 1);
2650 return ret;
2653 struct target_shmid_ds
2655 struct target_ipc_perm shm_perm;
2656 abi_ulong shm_segsz;
2657 abi_ulong shm_atime;
2658 #if TARGET_ABI_BITS == 32
2659 abi_ulong __unused1;
2660 #endif
2661 abi_ulong shm_dtime;
2662 #if TARGET_ABI_BITS == 32
2663 abi_ulong __unused2;
2664 #endif
2665 abi_ulong shm_ctime;
2666 #if TARGET_ABI_BITS == 32
2667 abi_ulong __unused3;
2668 #endif
2669 int shm_cpid;
2670 int shm_lpid;
2671 abi_ulong shm_nattch;
2672 unsigned long int __unused4;
2673 unsigned long int __unused5;
2676 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2677 abi_ulong target_addr)
2679 struct target_shmid_ds *target_sd;
2681 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2682 return -TARGET_EFAULT;
2683 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2684 return -TARGET_EFAULT;
2685 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2686 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2687 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2688 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2689 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2690 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2691 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2692 unlock_user_struct(target_sd, target_addr, 0);
2693 return 0;
2696 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2697 struct shmid_ds *host_sd)
2699 struct target_shmid_ds *target_sd;
2701 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2702 return -TARGET_EFAULT;
2703 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2704 return -TARGET_EFAULT;
2705 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2706 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2707 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2708 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2709 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2710 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2711 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2712 unlock_user_struct(target_sd, target_addr, 1);
2713 return 0;
2716 struct target_shminfo {
2717 abi_ulong shmmax;
2718 abi_ulong shmmin;
2719 abi_ulong shmmni;
2720 abi_ulong shmseg;
2721 abi_ulong shmall;
2724 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2725 struct shminfo *host_shminfo)
2727 struct target_shminfo *target_shminfo;
2728 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2729 return -TARGET_EFAULT;
2730 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2731 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2732 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2733 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2734 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2735 unlock_user_struct(target_shminfo, target_addr, 1);
2736 return 0;
2739 struct target_shm_info {
2740 int used_ids;
2741 abi_ulong shm_tot;
2742 abi_ulong shm_rss;
2743 abi_ulong shm_swp;
2744 abi_ulong swap_attempts;
2745 abi_ulong swap_successes;
2748 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2749 struct shm_info *host_shm_info)
2751 struct target_shm_info *target_shm_info;
2752 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2753 return -TARGET_EFAULT;
2754 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2755 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2756 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2757 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2758 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2759 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2760 unlock_user_struct(target_shm_info, target_addr, 1);
2761 return 0;
2764 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2766 struct shmid_ds dsarg;
2767 struct shminfo shminfo;
2768 struct shm_info shm_info;
2769 abi_long ret = -TARGET_EINVAL;
2771 cmd &= 0xff;
2773 switch(cmd) {
2774 case IPC_STAT:
2775 case IPC_SET:
2776 case SHM_STAT:
2777 if (target_to_host_shmid_ds(&dsarg, buf))
2778 return -TARGET_EFAULT;
2779 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2780 if (host_to_target_shmid_ds(buf, &dsarg))
2781 return -TARGET_EFAULT;
2782 break;
2783 case IPC_INFO:
2784 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2785 if (host_to_target_shminfo(buf, &shminfo))
2786 return -TARGET_EFAULT;
2787 break;
2788 case SHM_INFO:
2789 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2790 if (host_to_target_shm_info(buf, &shm_info))
2791 return -TARGET_EFAULT;
2792 break;
2793 case IPC_RMID:
2794 case SHM_LOCK:
2795 case SHM_UNLOCK:
2796 ret = get_errno(shmctl(shmid, cmd, NULL));
2797 break;
2800 return ret;
2803 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2805 abi_long raddr;
2806 void *host_raddr;
2807 struct shmid_ds shm_info;
2808 int i,ret;
2810 /* find out the length of the shared memory segment */
2811 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2812 if (is_error(ret)) {
2813 /* can't get length, bail out */
2814 return ret;
2817 mmap_lock();
2819 if (shmaddr)
2820 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2821 else {
2822 abi_ulong mmap_start;
2824 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2826 if (mmap_start == -1) {
2827 errno = ENOMEM;
2828 host_raddr = (void *)-1;
2829 } else
2830 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2833 if (host_raddr == (void *)-1) {
2834 mmap_unlock();
2835 return get_errno((long)host_raddr);
2837 raddr=h2g((unsigned long)host_raddr);
2839 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2840 PAGE_VALID | PAGE_READ |
2841 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2843 for (i = 0; i < N_SHM_REGIONS; i++) {
2844 if (shm_regions[i].start == 0) {
2845 shm_regions[i].start = raddr;
2846 shm_regions[i].size = shm_info.shm_segsz;
2847 break;
2851 mmap_unlock();
2852 return raddr;
2856 static inline abi_long do_shmdt(abi_ulong shmaddr)
2858 int i;
2860 for (i = 0; i < N_SHM_REGIONS; ++i) {
2861 if (shm_regions[i].start == shmaddr) {
2862 shm_regions[i].start = 0;
2863 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2864 break;
2868 return get_errno(shmdt(g2h(shmaddr)));
2871 #ifdef TARGET_NR_ipc
2872 /* ??? This only works with linear mappings. */
2873 /* do_ipc() must return target values and target errnos. */
2874 static abi_long do_ipc(unsigned int call, int first,
2875 int second, int third,
2876 abi_long ptr, abi_long fifth)
2878 int version;
2879 abi_long ret = 0;
2881 version = call >> 16;
2882 call &= 0xffff;
2884 switch (call) {
2885 case IPCOP_semop:
2886 ret = do_semop(first, ptr, second);
2887 break;
2889 case IPCOP_semget:
2890 ret = get_errno(semget(first, second, third));
2891 break;
2893 case IPCOP_semctl:
2894 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2895 break;
2897 case IPCOP_msgget:
2898 ret = get_errno(msgget(first, second));
2899 break;
2901 case IPCOP_msgsnd:
2902 ret = do_msgsnd(first, ptr, second, third);
2903 break;
2905 case IPCOP_msgctl:
2906 ret = do_msgctl(first, second, ptr);
2907 break;
2909 case IPCOP_msgrcv:
2910 switch (version) {
2911 case 0:
2913 struct target_ipc_kludge {
2914 abi_long msgp;
2915 abi_long msgtyp;
2916 } *tmp;
2918 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2919 ret = -TARGET_EFAULT;
2920 break;
2923 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2925 unlock_user_struct(tmp, ptr, 0);
2926 break;
2928 default:
2929 ret = do_msgrcv(first, ptr, second, fifth, third);
2931 break;
2933 case IPCOP_shmat:
2934 switch (version) {
2935 default:
2937 abi_ulong raddr;
2938 raddr = do_shmat(first, ptr, second);
2939 if (is_error(raddr))
2940 return get_errno(raddr);
2941 if (put_user_ual(raddr, third))
2942 return -TARGET_EFAULT;
2943 break;
2945 case 1:
2946 ret = -TARGET_EINVAL;
2947 break;
2949 break;
2950 case IPCOP_shmdt:
2951 ret = do_shmdt(ptr);
2952 break;
2954 case IPCOP_shmget:
2955 /* IPC_* flag values are the same on all linux platforms */
2956 ret = get_errno(shmget(first, second, third));
2957 break;
2959 /* IPC_* and SHM_* command values are the same on all linux platforms */
2960 case IPCOP_shmctl:
2961 ret = do_shmctl(first, second, third);
2962 break;
2963 default:
2964 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2965 ret = -TARGET_ENOSYS;
2966 break;
2968 return ret;
2970 #endif
2972 /* kernel structure types definitions */
2973 #define IFNAMSIZ 16
2975 #define STRUCT(name, ...) STRUCT_ ## name,
2976 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2977 enum {
2978 #include "syscall_types.h"
2980 #undef STRUCT
2981 #undef STRUCT_SPECIAL
2983 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2984 #define STRUCT_SPECIAL(name)
2985 #include "syscall_types.h"
2986 #undef STRUCT
2987 #undef STRUCT_SPECIAL
2989 typedef struct IOCTLEntry IOCTLEntry;
2991 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
2992 int fd, abi_long cmd, abi_long arg);
2994 struct IOCTLEntry {
2995 unsigned int target_cmd;
2996 unsigned int host_cmd;
2997 const char *name;
2998 int access;
2999 do_ioctl_fn *do_ioctl;
3000 const argtype arg_type[5];
3003 #define IOC_R 0x0001
3004 #define IOC_W 0x0002
3005 #define IOC_RW (IOC_R | IOC_W)
3007 #define MAX_STRUCT_SIZE 4096
3009 #ifdef CONFIG_FIEMAP
3010 /* So fiemap access checks don't overflow on 32 bit systems.
3011 * This is very slightly smaller than the limit imposed by
3012 * the underlying kernel.
3014 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3015 / sizeof(struct fiemap_extent))
3017 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3018 int fd, abi_long cmd, abi_long arg)
3020 /* The parameter for this ioctl is a struct fiemap followed
3021 * by an array of struct fiemap_extent whose size is set
3022 * in fiemap->fm_extent_count. The array is filled in by the
3023 * ioctl.
3025 int target_size_in, target_size_out;
3026 struct fiemap *fm;
3027 const argtype *arg_type = ie->arg_type;
3028 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3029 void *argptr, *p;
3030 abi_long ret;
3031 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3032 uint32_t outbufsz;
3033 int free_fm = 0;
3035 assert(arg_type[0] == TYPE_PTR);
3036 assert(ie->access == IOC_RW);
3037 arg_type++;
3038 target_size_in = thunk_type_size(arg_type, 0);
3039 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3040 if (!argptr) {
3041 return -TARGET_EFAULT;
3043 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3044 unlock_user(argptr, arg, 0);
3045 fm = (struct fiemap *)buf_temp;
3046 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3047 return -TARGET_EINVAL;
3050 outbufsz = sizeof (*fm) +
3051 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3053 if (outbufsz > MAX_STRUCT_SIZE) {
3054 /* We can't fit all the extents into the fixed size buffer.
3055 * Allocate one that is large enough and use it instead.
3057 fm = malloc(outbufsz);
3058 if (!fm) {
3059 return -TARGET_ENOMEM;
3061 memcpy(fm, buf_temp, sizeof(struct fiemap));
3062 free_fm = 1;
3064 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3065 if (!is_error(ret)) {
3066 target_size_out = target_size_in;
3067 /* An extent_count of 0 means we were only counting the extents
3068 * so there are no structs to copy
3070 if (fm->fm_extent_count != 0) {
3071 target_size_out += fm->fm_mapped_extents * extent_size;
3073 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3074 if (!argptr) {
3075 ret = -TARGET_EFAULT;
3076 } else {
3077 /* Convert the struct fiemap */
3078 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3079 if (fm->fm_extent_count != 0) {
3080 p = argptr + target_size_in;
3081 /* ...and then all the struct fiemap_extents */
3082 for (i = 0; i < fm->fm_mapped_extents; i++) {
3083 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3084 THUNK_TARGET);
3085 p += extent_size;
3088 unlock_user(argptr, arg, target_size_out);
3091 if (free_fm) {
3092 free(fm);
3094 return ret;
3096 #endif
3098 static IOCTLEntry ioctl_entries[] = {
3099 #define IOCTL(cmd, access, ...) \
3100 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3101 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3102 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3103 #include "ioctls.h"
3104 { 0, 0, },
3107 /* ??? Implement proper locking for ioctls. */
3108 /* do_ioctl() Must return target values and target errnos. */
3109 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3111 const IOCTLEntry *ie;
3112 const argtype *arg_type;
3113 abi_long ret;
3114 uint8_t buf_temp[MAX_STRUCT_SIZE];
3115 int target_size;
3116 void *argptr;
3118 ie = ioctl_entries;
3119 for(;;) {
3120 if (ie->target_cmd == 0) {
3121 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3122 return -TARGET_ENOSYS;
3124 if (ie->target_cmd == cmd)
3125 break;
3126 ie++;
3128 arg_type = ie->arg_type;
3129 #if defined(DEBUG)
3130 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3131 #endif
3132 if (ie->do_ioctl) {
3133 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3136 switch(arg_type[0]) {
3137 case TYPE_NULL:
3138 /* no argument */
3139 ret = get_errno(ioctl(fd, ie->host_cmd));
3140 break;
3141 case TYPE_PTRVOID:
3142 case TYPE_INT:
3143 /* int argment */
3144 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3145 break;
3146 case TYPE_PTR:
3147 arg_type++;
3148 target_size = thunk_type_size(arg_type, 0);
3149 switch(ie->access) {
3150 case IOC_R:
3151 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3152 if (!is_error(ret)) {
3153 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3154 if (!argptr)
3155 return -TARGET_EFAULT;
3156 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3157 unlock_user(argptr, arg, target_size);
3159 break;
3160 case IOC_W:
3161 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3162 if (!argptr)
3163 return -TARGET_EFAULT;
3164 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3165 unlock_user(argptr, arg, 0);
3166 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3167 break;
3168 default:
3169 case IOC_RW:
3170 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3171 if (!argptr)
3172 return -TARGET_EFAULT;
3173 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3174 unlock_user(argptr, arg, 0);
3175 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3176 if (!is_error(ret)) {
3177 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3178 if (!argptr)
3179 return -TARGET_EFAULT;
3180 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3181 unlock_user(argptr, arg, target_size);
3183 break;
3185 break;
3186 default:
3187 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3188 (long)cmd, arg_type[0]);
3189 ret = -TARGET_ENOSYS;
3190 break;
3192 return ret;
3195 static const bitmask_transtbl iflag_tbl[] = {
3196 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3197 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3198 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3199 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3200 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3201 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3202 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3203 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3204 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3205 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3206 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3207 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3208 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3209 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3210 { 0, 0, 0, 0 }
3213 static const bitmask_transtbl oflag_tbl[] = {
3214 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3215 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3216 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3217 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3218 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3219 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3220 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3221 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3222 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3223 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3224 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3225 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3226 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3227 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3228 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3229 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3230 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3231 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3232 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3233 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3234 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3235 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3236 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3237 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3238 { 0, 0, 0, 0 }
3241 static const bitmask_transtbl cflag_tbl[] = {
3242 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3243 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3244 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3245 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3246 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3247 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3248 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3249 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3250 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3251 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3252 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3253 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3254 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3255 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3256 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3257 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3258 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3259 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3260 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3261 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3262 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3263 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3264 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3265 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3266 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3267 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3268 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3269 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3270 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3271 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3272 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3273 { 0, 0, 0, 0 }
3276 static const bitmask_transtbl lflag_tbl[] = {
3277 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3278 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3279 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3280 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3281 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3282 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3283 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3284 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3285 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3286 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3287 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3288 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3289 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3290 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3291 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3292 { 0, 0, 0, 0 }
3295 static void target_to_host_termios (void *dst, const void *src)
3297 struct host_termios *host = dst;
3298 const struct target_termios *target = src;
3300 host->c_iflag =
3301 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3302 host->c_oflag =
3303 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3304 host->c_cflag =
3305 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3306 host->c_lflag =
3307 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3308 host->c_line = target->c_line;
3310 memset(host->c_cc, 0, sizeof(host->c_cc));
3311 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3312 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3313 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3314 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3315 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3316 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3317 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3318 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3319 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3320 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3321 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3322 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3323 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3324 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3325 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3326 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3327 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3330 static void host_to_target_termios (void *dst, const void *src)
3332 struct target_termios *target = dst;
3333 const struct host_termios *host = src;
3335 target->c_iflag =
3336 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3337 target->c_oflag =
3338 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3339 target->c_cflag =
3340 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3341 target->c_lflag =
3342 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3343 target->c_line = host->c_line;
3345 memset(target->c_cc, 0, sizeof(target->c_cc));
3346 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3347 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3348 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3349 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3350 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3351 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3352 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3353 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3354 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3355 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3356 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3357 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3358 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3359 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3360 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3361 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3362 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3365 static const StructEntry struct_termios_def = {
3366 .convert = { host_to_target_termios, target_to_host_termios },
3367 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3368 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3371 static bitmask_transtbl mmap_flags_tbl[] = {
3372 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3373 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3374 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3375 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3376 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3377 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3378 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3379 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3380 { 0, 0, 0, 0 }
3383 #if defined(TARGET_I386)
3385 /* NOTE: there is really one LDT for all the threads */
3386 static uint8_t *ldt_table;
3388 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3390 int size;
3391 void *p;
3393 if (!ldt_table)
3394 return 0;
3395 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3396 if (size > bytecount)
3397 size = bytecount;
3398 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3399 if (!p)
3400 return -TARGET_EFAULT;
3401 /* ??? Should this by byteswapped? */
3402 memcpy(p, ldt_table, size);
3403 unlock_user(p, ptr, size);
3404 return size;
3407 /* XXX: add locking support */
3408 static abi_long write_ldt(CPUX86State *env,
3409 abi_ulong ptr, unsigned long bytecount, int oldmode)
3411 struct target_modify_ldt_ldt_s ldt_info;
3412 struct target_modify_ldt_ldt_s *target_ldt_info;
3413 int seg_32bit, contents, read_exec_only, limit_in_pages;
3414 int seg_not_present, useable, lm;
3415 uint32_t *lp, entry_1, entry_2;
3417 if (bytecount != sizeof(ldt_info))
3418 return -TARGET_EINVAL;
3419 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3420 return -TARGET_EFAULT;
3421 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3422 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3423 ldt_info.limit = tswap32(target_ldt_info->limit);
3424 ldt_info.flags = tswap32(target_ldt_info->flags);
3425 unlock_user_struct(target_ldt_info, ptr, 0);
3427 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3428 return -TARGET_EINVAL;
3429 seg_32bit = ldt_info.flags & 1;
3430 contents = (ldt_info.flags >> 1) & 3;
3431 read_exec_only = (ldt_info.flags >> 3) & 1;
3432 limit_in_pages = (ldt_info.flags >> 4) & 1;
3433 seg_not_present = (ldt_info.flags >> 5) & 1;
3434 useable = (ldt_info.flags >> 6) & 1;
3435 #ifdef TARGET_ABI32
3436 lm = 0;
3437 #else
3438 lm = (ldt_info.flags >> 7) & 1;
3439 #endif
3440 if (contents == 3) {
3441 if (oldmode)
3442 return -TARGET_EINVAL;
3443 if (seg_not_present == 0)
3444 return -TARGET_EINVAL;
3446 /* allocate the LDT */
3447 if (!ldt_table) {
3448 env->ldt.base = target_mmap(0,
3449 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3450 PROT_READ|PROT_WRITE,
3451 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3452 if (env->ldt.base == -1)
3453 return -TARGET_ENOMEM;
3454 memset(g2h(env->ldt.base), 0,
3455 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3456 env->ldt.limit = 0xffff;
3457 ldt_table = g2h(env->ldt.base);
3460 /* NOTE: same code as Linux kernel */
3461 /* Allow LDTs to be cleared by the user. */
3462 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3463 if (oldmode ||
3464 (contents == 0 &&
3465 read_exec_only == 1 &&
3466 seg_32bit == 0 &&
3467 limit_in_pages == 0 &&
3468 seg_not_present == 1 &&
3469 useable == 0 )) {
3470 entry_1 = 0;
3471 entry_2 = 0;
3472 goto install;
3476 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3477 (ldt_info.limit & 0x0ffff);
3478 entry_2 = (ldt_info.base_addr & 0xff000000) |
3479 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3480 (ldt_info.limit & 0xf0000) |
3481 ((read_exec_only ^ 1) << 9) |
3482 (contents << 10) |
3483 ((seg_not_present ^ 1) << 15) |
3484 (seg_32bit << 22) |
3485 (limit_in_pages << 23) |
3486 (lm << 21) |
3487 0x7000;
3488 if (!oldmode)
3489 entry_2 |= (useable << 20);
3491 /* Install the new entry ... */
3492 install:
3493 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3494 lp[0] = tswap32(entry_1);
3495 lp[1] = tswap32(entry_2);
3496 return 0;
3499 /* specific and weird i386 syscalls */
3500 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3501 unsigned long bytecount)
3503 abi_long ret;
3505 switch (func) {
3506 case 0:
3507 ret = read_ldt(ptr, bytecount);
3508 break;
3509 case 1:
3510 ret = write_ldt(env, ptr, bytecount, 1);
3511 break;
3512 case 0x11:
3513 ret = write_ldt(env, ptr, bytecount, 0);
3514 break;
3515 default:
3516 ret = -TARGET_ENOSYS;
3517 break;
3519 return ret;
3522 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3523 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3525 uint64_t *gdt_table = g2h(env->gdt.base);
3526 struct target_modify_ldt_ldt_s ldt_info;
3527 struct target_modify_ldt_ldt_s *target_ldt_info;
3528 int seg_32bit, contents, read_exec_only, limit_in_pages;
3529 int seg_not_present, useable, lm;
3530 uint32_t *lp, entry_1, entry_2;
3531 int i;
3533 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3534 if (!target_ldt_info)
3535 return -TARGET_EFAULT;
3536 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3537 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3538 ldt_info.limit = tswap32(target_ldt_info->limit);
3539 ldt_info.flags = tswap32(target_ldt_info->flags);
3540 if (ldt_info.entry_number == -1) {
3541 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3542 if (gdt_table[i] == 0) {
3543 ldt_info.entry_number = i;
3544 target_ldt_info->entry_number = tswap32(i);
3545 break;
3549 unlock_user_struct(target_ldt_info, ptr, 1);
3551 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3552 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3553 return -TARGET_EINVAL;
3554 seg_32bit = ldt_info.flags & 1;
3555 contents = (ldt_info.flags >> 1) & 3;
3556 read_exec_only = (ldt_info.flags >> 3) & 1;
3557 limit_in_pages = (ldt_info.flags >> 4) & 1;
3558 seg_not_present = (ldt_info.flags >> 5) & 1;
3559 useable = (ldt_info.flags >> 6) & 1;
3560 #ifdef TARGET_ABI32
3561 lm = 0;
3562 #else
3563 lm = (ldt_info.flags >> 7) & 1;
3564 #endif
3566 if (contents == 3) {
3567 if (seg_not_present == 0)
3568 return -TARGET_EINVAL;
3571 /* NOTE: same code as Linux kernel */
3572 /* Allow LDTs to be cleared by the user. */
3573 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3574 if ((contents == 0 &&
3575 read_exec_only == 1 &&
3576 seg_32bit == 0 &&
3577 limit_in_pages == 0 &&
3578 seg_not_present == 1 &&
3579 useable == 0 )) {
3580 entry_1 = 0;
3581 entry_2 = 0;
3582 goto install;
3586 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3587 (ldt_info.limit & 0x0ffff);
3588 entry_2 = (ldt_info.base_addr & 0xff000000) |
3589 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3590 (ldt_info.limit & 0xf0000) |
3591 ((read_exec_only ^ 1) << 9) |
3592 (contents << 10) |
3593 ((seg_not_present ^ 1) << 15) |
3594 (seg_32bit << 22) |
3595 (limit_in_pages << 23) |
3596 (useable << 20) |
3597 (lm << 21) |
3598 0x7000;
3600 /* Install the new entry ... */
3601 install:
3602 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3603 lp[0] = tswap32(entry_1);
3604 lp[1] = tswap32(entry_2);
3605 return 0;
3608 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3610 struct target_modify_ldt_ldt_s *target_ldt_info;
3611 uint64_t *gdt_table = g2h(env->gdt.base);
3612 uint32_t base_addr, limit, flags;
3613 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3614 int seg_not_present, useable, lm;
3615 uint32_t *lp, entry_1, entry_2;
3617 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3618 if (!target_ldt_info)
3619 return -TARGET_EFAULT;
3620 idx = tswap32(target_ldt_info->entry_number);
3621 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3622 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3623 unlock_user_struct(target_ldt_info, ptr, 1);
3624 return -TARGET_EINVAL;
3626 lp = (uint32_t *)(gdt_table + idx);
3627 entry_1 = tswap32(lp[0]);
3628 entry_2 = tswap32(lp[1]);
3630 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3631 contents = (entry_2 >> 10) & 3;
3632 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3633 seg_32bit = (entry_2 >> 22) & 1;
3634 limit_in_pages = (entry_2 >> 23) & 1;
3635 useable = (entry_2 >> 20) & 1;
3636 #ifdef TARGET_ABI32
3637 lm = 0;
3638 #else
3639 lm = (entry_2 >> 21) & 1;
3640 #endif
3641 flags = (seg_32bit << 0) | (contents << 1) |
3642 (read_exec_only << 3) | (limit_in_pages << 4) |
3643 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3644 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3645 base_addr = (entry_1 >> 16) |
3646 (entry_2 & 0xff000000) |
3647 ((entry_2 & 0xff) << 16);
3648 target_ldt_info->base_addr = tswapl(base_addr);
3649 target_ldt_info->limit = tswap32(limit);
3650 target_ldt_info->flags = tswap32(flags);
3651 unlock_user_struct(target_ldt_info, ptr, 1);
3652 return 0;
3654 #endif /* TARGET_I386 && TARGET_ABI32 */
3656 #ifndef TARGET_ABI32
3657 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3659 abi_long ret;
3660 abi_ulong val;
3661 int idx;
3663 switch(code) {
3664 case TARGET_ARCH_SET_GS:
3665 case TARGET_ARCH_SET_FS:
3666 if (code == TARGET_ARCH_SET_GS)
3667 idx = R_GS;
3668 else
3669 idx = R_FS;
3670 cpu_x86_load_seg(env, idx, 0);
3671 env->segs[idx].base = addr;
3672 break;
3673 case TARGET_ARCH_GET_GS:
3674 case TARGET_ARCH_GET_FS:
3675 if (code == TARGET_ARCH_GET_GS)
3676 idx = R_GS;
3677 else
3678 idx = R_FS;
3679 val = env->segs[idx].base;
3680 if (put_user(val, addr, abi_ulong))
3681 return -TARGET_EFAULT;
3682 break;
3683 default:
3684 ret = -TARGET_EINVAL;
3685 break;
3687 return 0;
3689 #endif
3691 #endif /* defined(TARGET_I386) */
3693 #if defined(CONFIG_USE_NPTL)
3695 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3697 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3698 typedef struct {
3699 CPUState *env;
3700 pthread_mutex_t mutex;
3701 pthread_cond_t cond;
3702 pthread_t thread;
3703 uint32_t tid;
3704 abi_ulong child_tidptr;
3705 abi_ulong parent_tidptr;
3706 sigset_t sigmask;
3707 } new_thread_info;
3709 static void *clone_func(void *arg)
3711 new_thread_info *info = arg;
3712 CPUState *env;
3713 TaskState *ts;
3715 env = info->env;
3716 thread_env = env;
3717 ts = (TaskState *)thread_env->opaque;
3718 info->tid = gettid();
3719 env->host_tid = info->tid;
3720 task_settid(ts);
3721 if (info->child_tidptr)
3722 put_user_u32(info->tid, info->child_tidptr);
3723 if (info->parent_tidptr)
3724 put_user_u32(info->tid, info->parent_tidptr);
3725 /* Enable signals. */
3726 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3727 /* Signal to the parent that we're ready. */
3728 pthread_mutex_lock(&info->mutex);
3729 pthread_cond_broadcast(&info->cond);
3730 pthread_mutex_unlock(&info->mutex);
3731 /* Wait until the parent has finshed initializing the tls state. */
3732 pthread_mutex_lock(&clone_lock);
3733 pthread_mutex_unlock(&clone_lock);
3734 cpu_loop(env);
3735 /* never exits */
3736 return NULL;
3738 #else
3739 /* this stack is the equivalent of the kernel stack associated with a
3740 thread/process */
3741 #define NEW_STACK_SIZE 8192
3743 static int clone_func(void *arg)
3745 CPUState *env = arg;
3746 cpu_loop(env);
3747 /* never exits */
3748 return 0;
3750 #endif
3752 /* do_fork() Must return host values and target errnos (unlike most
3753 do_*() functions). */
3754 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3755 abi_ulong parent_tidptr, target_ulong newtls,
3756 abi_ulong child_tidptr)
3758 int ret;
3759 TaskState *ts;
3760 CPUState *new_env;
3761 #if defined(CONFIG_USE_NPTL)
3762 unsigned int nptl_flags;
3763 sigset_t sigmask;
3764 #else
3765 uint8_t *new_stack;
3766 #endif
3768 /* Emulate vfork() with fork() */
3769 if (flags & CLONE_VFORK)
3770 flags &= ~(CLONE_VFORK | CLONE_VM);
3772 if (flags & CLONE_VM) {
3773 TaskState *parent_ts = (TaskState *)env->opaque;
3774 #if defined(CONFIG_USE_NPTL)
3775 new_thread_info info;
3776 pthread_attr_t attr;
3777 #endif
3778 ts = qemu_mallocz(sizeof(TaskState));
3779 init_task_state(ts);
3780 /* we create a new CPU instance. */
3781 new_env = cpu_copy(env);
3782 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3783 cpu_reset(new_env);
3784 #endif
3785 /* Init regs that differ from the parent. */
3786 cpu_clone_regs(new_env, newsp);
3787 new_env->opaque = ts;
3788 ts->bprm = parent_ts->bprm;
3789 ts->info = parent_ts->info;
3790 #if defined(CONFIG_USE_NPTL)
3791 nptl_flags = flags;
3792 flags &= ~CLONE_NPTL_FLAGS2;
3794 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3795 ts->child_tidptr = child_tidptr;
3798 if (nptl_flags & CLONE_SETTLS)
3799 cpu_set_tls (new_env, newtls);
3801 /* Grab a mutex so that thread setup appears atomic. */
3802 pthread_mutex_lock(&clone_lock);
3804 memset(&info, 0, sizeof(info));
3805 pthread_mutex_init(&info.mutex, NULL);
3806 pthread_mutex_lock(&info.mutex);
3807 pthread_cond_init(&info.cond, NULL);
3808 info.env = new_env;
3809 if (nptl_flags & CLONE_CHILD_SETTID)
3810 info.child_tidptr = child_tidptr;
3811 if (nptl_flags & CLONE_PARENT_SETTID)
3812 info.parent_tidptr = parent_tidptr;
3814 ret = pthread_attr_init(&attr);
3815 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
3816 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3817 /* It is not safe to deliver signals until the child has finished
3818 initializing, so temporarily block all signals. */
3819 sigfillset(&sigmask);
3820 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3822 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3823 /* TODO: Free new CPU state if thread creation failed. */
3825 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3826 pthread_attr_destroy(&attr);
3827 if (ret == 0) {
3828 /* Wait for the child to initialize. */
3829 pthread_cond_wait(&info.cond, &info.mutex);
3830 ret = info.tid;
3831 if (flags & CLONE_PARENT_SETTID)
3832 put_user_u32(ret, parent_tidptr);
3833 } else {
3834 ret = -1;
3836 pthread_mutex_unlock(&info.mutex);
3837 pthread_cond_destroy(&info.cond);
3838 pthread_mutex_destroy(&info.mutex);
3839 pthread_mutex_unlock(&clone_lock);
3840 #else
3841 if (flags & CLONE_NPTL_FLAGS2)
3842 return -EINVAL;
3843 /* This is probably going to die very quickly, but do it anyway. */
3844 new_stack = qemu_mallocz (NEW_STACK_SIZE);
3845 #ifdef __ia64__
3846 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
3847 #else
3848 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3849 #endif
3850 #endif
3851 } else {
3852 /* if no CLONE_VM, we consider it is a fork */
3853 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3854 return -EINVAL;
3855 fork_start();
3856 ret = fork();
3857 if (ret == 0) {
3858 /* Child Process. */
3859 cpu_clone_regs(env, newsp);
3860 fork_end(1);
3861 #if defined(CONFIG_USE_NPTL)
3862 /* There is a race condition here. The parent process could
3863 theoretically read the TID in the child process before the child
3864 tid is set. This would require using either ptrace
3865 (not implemented) or having *_tidptr to point at a shared memory
3866 mapping. We can't repeat the spinlock hack used above because
3867 the child process gets its own copy of the lock. */
3868 if (flags & CLONE_CHILD_SETTID)
3869 put_user_u32(gettid(), child_tidptr);
3870 if (flags & CLONE_PARENT_SETTID)
3871 put_user_u32(gettid(), parent_tidptr);
3872 ts = (TaskState *)env->opaque;
3873 if (flags & CLONE_SETTLS)
3874 cpu_set_tls (env, newtls);
3875 if (flags & CLONE_CHILD_CLEARTID)
3876 ts->child_tidptr = child_tidptr;
3877 #endif
3878 } else {
3879 fork_end(0);
3882 return ret;
3885 /* warning : doesn't handle linux specific flags... */
3886 static int target_to_host_fcntl_cmd(int cmd)
3888 switch(cmd) {
3889 case TARGET_F_DUPFD:
3890 case TARGET_F_GETFD:
3891 case TARGET_F_SETFD:
3892 case TARGET_F_GETFL:
3893 case TARGET_F_SETFL:
3894 return cmd;
3895 case TARGET_F_GETLK:
3896 return F_GETLK;
3897 case TARGET_F_SETLK:
3898 return F_SETLK;
3899 case TARGET_F_SETLKW:
3900 return F_SETLKW;
3901 case TARGET_F_GETOWN:
3902 return F_GETOWN;
3903 case TARGET_F_SETOWN:
3904 return F_SETOWN;
3905 case TARGET_F_GETSIG:
3906 return F_GETSIG;
3907 case TARGET_F_SETSIG:
3908 return F_SETSIG;
3909 #if TARGET_ABI_BITS == 32
3910 case TARGET_F_GETLK64:
3911 return F_GETLK64;
3912 case TARGET_F_SETLK64:
3913 return F_SETLK64;
3914 case TARGET_F_SETLKW64:
3915 return F_SETLKW64;
3916 #endif
3917 case TARGET_F_SETLEASE:
3918 return F_SETLEASE;
3919 case TARGET_F_GETLEASE:
3920 return F_GETLEASE;
3921 #ifdef F_DUPFD_CLOEXEC
3922 case TARGET_F_DUPFD_CLOEXEC:
3923 return F_DUPFD_CLOEXEC;
3924 #endif
3925 case TARGET_F_NOTIFY:
3926 return F_NOTIFY;
3927 default:
3928 return -TARGET_EINVAL;
3930 return -TARGET_EINVAL;
3933 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3935 struct flock fl;
3936 struct target_flock *target_fl;
3937 struct flock64 fl64;
3938 struct target_flock64 *target_fl64;
3939 abi_long ret;
3940 int host_cmd = target_to_host_fcntl_cmd(cmd);
3942 if (host_cmd == -TARGET_EINVAL)
3943 return host_cmd;
3945 switch(cmd) {
3946 case TARGET_F_GETLK:
3947 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3948 return -TARGET_EFAULT;
3949 fl.l_type = tswap16(target_fl->l_type);
3950 fl.l_whence = tswap16(target_fl->l_whence);
3951 fl.l_start = tswapl(target_fl->l_start);
3952 fl.l_len = tswapl(target_fl->l_len);
3953 fl.l_pid = tswap32(target_fl->l_pid);
3954 unlock_user_struct(target_fl, arg, 0);
3955 ret = get_errno(fcntl(fd, host_cmd, &fl));
3956 if (ret == 0) {
3957 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3958 return -TARGET_EFAULT;
3959 target_fl->l_type = tswap16(fl.l_type);
3960 target_fl->l_whence = tswap16(fl.l_whence);
3961 target_fl->l_start = tswapl(fl.l_start);
3962 target_fl->l_len = tswapl(fl.l_len);
3963 target_fl->l_pid = tswap32(fl.l_pid);
3964 unlock_user_struct(target_fl, arg, 1);
3966 break;
3968 case TARGET_F_SETLK:
3969 case TARGET_F_SETLKW:
3970 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3971 return -TARGET_EFAULT;
3972 fl.l_type = tswap16(target_fl->l_type);
3973 fl.l_whence = tswap16(target_fl->l_whence);
3974 fl.l_start = tswapl(target_fl->l_start);
3975 fl.l_len = tswapl(target_fl->l_len);
3976 fl.l_pid = tswap32(target_fl->l_pid);
3977 unlock_user_struct(target_fl, arg, 0);
3978 ret = get_errno(fcntl(fd, host_cmd, &fl));
3979 break;
3981 case TARGET_F_GETLK64:
3982 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3983 return -TARGET_EFAULT;
3984 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3985 fl64.l_whence = tswap16(target_fl64->l_whence);
3986 fl64.l_start = tswapl(target_fl64->l_start);
3987 fl64.l_len = tswapl(target_fl64->l_len);
3988 fl64.l_pid = tswap32(target_fl64->l_pid);
3989 unlock_user_struct(target_fl64, arg, 0);
3990 ret = get_errno(fcntl(fd, host_cmd, &fl64));
3991 if (ret == 0) {
3992 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3993 return -TARGET_EFAULT;
3994 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3995 target_fl64->l_whence = tswap16(fl64.l_whence);
3996 target_fl64->l_start = tswapl(fl64.l_start);
3997 target_fl64->l_len = tswapl(fl64.l_len);
3998 target_fl64->l_pid = tswap32(fl64.l_pid);
3999 unlock_user_struct(target_fl64, arg, 1);
4001 break;
4002 case TARGET_F_SETLK64:
4003 case TARGET_F_SETLKW64:
4004 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4005 return -TARGET_EFAULT;
4006 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4007 fl64.l_whence = tswap16(target_fl64->l_whence);
4008 fl64.l_start = tswapl(target_fl64->l_start);
4009 fl64.l_len = tswapl(target_fl64->l_len);
4010 fl64.l_pid = tswap32(target_fl64->l_pid);
4011 unlock_user_struct(target_fl64, arg, 0);
4012 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4013 break;
4015 case TARGET_F_GETFL:
4016 ret = get_errno(fcntl(fd, host_cmd, arg));
4017 if (ret >= 0) {
4018 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4020 break;
4022 case TARGET_F_SETFL:
4023 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4024 break;
4026 case TARGET_F_SETOWN:
4027 case TARGET_F_GETOWN:
4028 case TARGET_F_SETSIG:
4029 case TARGET_F_GETSIG:
4030 case TARGET_F_SETLEASE:
4031 case TARGET_F_GETLEASE:
4032 ret = get_errno(fcntl(fd, host_cmd, arg));
4033 break;
4035 default:
4036 ret = get_errno(fcntl(fd, cmd, arg));
4037 break;
4039 return ret;
4042 #ifdef USE_UID16
4044 static inline int high2lowuid(int uid)
4046 if (uid > 65535)
4047 return 65534;
4048 else
4049 return uid;
4052 static inline int high2lowgid(int gid)
4054 if (gid > 65535)
4055 return 65534;
4056 else
4057 return gid;
4060 static inline int low2highuid(int uid)
4062 if ((int16_t)uid == -1)
4063 return -1;
4064 else
4065 return uid;
4068 static inline int low2highgid(int gid)
4070 if ((int16_t)gid == -1)
4071 return -1;
4072 else
4073 return gid;
4076 #endif /* USE_UID16 */
4078 void syscall_init(void)
4080 IOCTLEntry *ie;
4081 const argtype *arg_type;
4082 int size;
4083 int i;
4085 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4086 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4087 #include "syscall_types.h"
4088 #undef STRUCT
4089 #undef STRUCT_SPECIAL
4091 /* we patch the ioctl size if necessary. We rely on the fact that
4092 no ioctl has all the bits at '1' in the size field */
4093 ie = ioctl_entries;
4094 while (ie->target_cmd != 0) {
4095 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4096 TARGET_IOC_SIZEMASK) {
4097 arg_type = ie->arg_type;
4098 if (arg_type[0] != TYPE_PTR) {
4099 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4100 ie->target_cmd);
4101 exit(1);
4103 arg_type++;
4104 size = thunk_type_size(arg_type, 0);
4105 ie->target_cmd = (ie->target_cmd &
4106 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4107 (size << TARGET_IOC_SIZESHIFT);
4110 /* Build target_to_host_errno_table[] table from
4111 * host_to_target_errno_table[]. */
4112 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4113 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4115 /* automatic consistency check if same arch */
4116 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4117 (defined(__x86_64__) && defined(TARGET_X86_64))
4118 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4119 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4120 ie->name, ie->target_cmd, ie->host_cmd);
4122 #endif
4123 ie++;
4127 #if TARGET_ABI_BITS == 32
4128 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4130 #ifdef TARGET_WORDS_BIGENDIAN
4131 return ((uint64_t)word0 << 32) | word1;
4132 #else
4133 return ((uint64_t)word1 << 32) | word0;
4134 #endif
4136 #else /* TARGET_ABI_BITS == 32 */
4137 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4139 return word0;
4141 #endif /* TARGET_ABI_BITS != 32 */
4143 #ifdef TARGET_NR_truncate64
4144 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4145 abi_long arg2,
4146 abi_long arg3,
4147 abi_long arg4)
4149 #ifdef TARGET_ARM
4150 if (((CPUARMState *)cpu_env)->eabi)
4152 arg2 = arg3;
4153 arg3 = arg4;
4155 #endif
4156 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4158 #endif
4160 #ifdef TARGET_NR_ftruncate64
4161 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4162 abi_long arg2,
4163 abi_long arg3,
4164 abi_long arg4)
4166 #ifdef TARGET_ARM
4167 if (((CPUARMState *)cpu_env)->eabi)
4169 arg2 = arg3;
4170 arg3 = arg4;
4172 #endif
4173 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4175 #endif
4177 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4178 abi_ulong target_addr)
4180 struct target_timespec *target_ts;
4182 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4183 return -TARGET_EFAULT;
4184 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4185 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4186 unlock_user_struct(target_ts, target_addr, 0);
4187 return 0;
4190 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4191 struct timespec *host_ts)
4193 struct target_timespec *target_ts;
4195 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4196 return -TARGET_EFAULT;
4197 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4198 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4199 unlock_user_struct(target_ts, target_addr, 1);
4200 return 0;
4203 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4204 static inline abi_long host_to_target_stat64(void *cpu_env,
4205 abi_ulong target_addr,
4206 struct stat *host_st)
4208 #ifdef TARGET_ARM
4209 if (((CPUARMState *)cpu_env)->eabi) {
4210 struct target_eabi_stat64 *target_st;
4212 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4213 return -TARGET_EFAULT;
4214 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4215 __put_user(host_st->st_dev, &target_st->st_dev);
4216 __put_user(host_st->st_ino, &target_st->st_ino);
4217 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4218 __put_user(host_st->st_ino, &target_st->__st_ino);
4219 #endif
4220 __put_user(host_st->st_mode, &target_st->st_mode);
4221 __put_user(host_st->st_nlink, &target_st->st_nlink);
4222 __put_user(host_st->st_uid, &target_st->st_uid);
4223 __put_user(host_st->st_gid, &target_st->st_gid);
4224 __put_user(host_st->st_rdev, &target_st->st_rdev);
4225 __put_user(host_st->st_size, &target_st->st_size);
4226 __put_user(host_st->st_blksize, &target_st->st_blksize);
4227 __put_user(host_st->st_blocks, &target_st->st_blocks);
4228 __put_user(host_st->st_atime, &target_st->target_st_atime);
4229 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4230 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4231 unlock_user_struct(target_st, target_addr, 1);
4232 } else
4233 #endif
4235 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4236 struct target_stat *target_st;
4237 #else
4238 struct target_stat64 *target_st;
4239 #endif
4241 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4242 return -TARGET_EFAULT;
4243 memset(target_st, 0, sizeof(*target_st));
4244 __put_user(host_st->st_dev, &target_st->st_dev);
4245 __put_user(host_st->st_ino, &target_st->st_ino);
4246 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4247 __put_user(host_st->st_ino, &target_st->__st_ino);
4248 #endif
4249 __put_user(host_st->st_mode, &target_st->st_mode);
4250 __put_user(host_st->st_nlink, &target_st->st_nlink);
4251 __put_user(host_st->st_uid, &target_st->st_uid);
4252 __put_user(host_st->st_gid, &target_st->st_gid);
4253 __put_user(host_st->st_rdev, &target_st->st_rdev);
4254 /* XXX: better use of kernel struct */
4255 __put_user(host_st->st_size, &target_st->st_size);
4256 __put_user(host_st->st_blksize, &target_st->st_blksize);
4257 __put_user(host_st->st_blocks, &target_st->st_blocks);
4258 __put_user(host_st->st_atime, &target_st->target_st_atime);
4259 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4260 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4261 unlock_user_struct(target_st, target_addr, 1);
4264 return 0;
4266 #endif
4268 #if defined(CONFIG_USE_NPTL)
4269 /* ??? Using host futex calls even when target atomic operations
4270 are not really atomic probably breaks things. However implementing
4271 futexes locally would make futexes shared between multiple processes
4272 tricky. However they're probably useless because guest atomic
4273 operations won't work either. */
4274 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4275 target_ulong uaddr2, int val3)
4277 struct timespec ts, *pts;
4278 int base_op;
4280 /* ??? We assume FUTEX_* constants are the same on both host
4281 and target. */
4282 #ifdef FUTEX_CMD_MASK
4283 base_op = op & FUTEX_CMD_MASK;
4284 #else
4285 base_op = op;
4286 #endif
4287 switch (base_op) {
4288 case FUTEX_WAIT:
4289 if (timeout) {
4290 pts = &ts;
4291 target_to_host_timespec(pts, timeout);
4292 } else {
4293 pts = NULL;
4295 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4296 pts, NULL, 0));
4297 case FUTEX_WAKE:
4298 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4299 case FUTEX_FD:
4300 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4301 case FUTEX_REQUEUE:
4302 case FUTEX_CMP_REQUEUE:
4303 case FUTEX_WAKE_OP:
4304 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4305 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4306 But the prototype takes a `struct timespec *'; insert casts
4307 to satisfy the compiler. We do not need to tswap TIMEOUT
4308 since it's not compared to guest memory. */
4309 pts = (struct timespec *)(uintptr_t) timeout;
4310 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4311 g2h(uaddr2),
4312 (base_op == FUTEX_CMP_REQUEUE
4313 ? tswap32(val3)
4314 : val3)));
4315 default:
4316 return -TARGET_ENOSYS;
4319 #endif
4321 /* Map host to target signal numbers for the wait family of syscalls.
4322 Assume all other status bits are the same. */
4323 static int host_to_target_waitstatus(int status)
4325 if (WIFSIGNALED(status)) {
4326 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4328 if (WIFSTOPPED(status)) {
4329 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4330 | (status & 0xff);
4332 return status;
4335 int get_osversion(void)
4337 static int osversion;
4338 struct new_utsname buf;
4339 const char *s;
4340 int i, n, tmp;
4341 if (osversion)
4342 return osversion;
4343 if (qemu_uname_release && *qemu_uname_release) {
4344 s = qemu_uname_release;
4345 } else {
4346 if (sys_uname(&buf))
4347 return 0;
4348 s = buf.release;
4350 tmp = 0;
4351 for (i = 0; i < 3; i++) {
4352 n = 0;
4353 while (*s >= '0' && *s <= '9') {
4354 n *= 10;
4355 n += *s - '0';
4356 s++;
4358 tmp = (tmp << 8) + n;
4359 if (*s == '.')
4360 s++;
4362 osversion = tmp;
4363 return osversion;
4366 /* do_syscall() should always have a single exit point at the end so
4367 that actions, such as logging of syscall results, can be performed.
4368 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4369 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4370 abi_long arg2, abi_long arg3, abi_long arg4,
4371 abi_long arg5, abi_long arg6)
4373 abi_long ret;
4374 struct stat st;
4375 struct statfs stfs;
4376 void *p;
4378 #ifdef DEBUG
4379 gemu_log("syscall %d", num);
4380 #endif
4381 if(do_strace)
4382 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4384 switch(num) {
4385 case TARGET_NR_exit:
4386 #ifdef CONFIG_USE_NPTL
4387 /* In old applications this may be used to implement _exit(2).
4388 However in threaded applictions it is used for thread termination,
4389 and _exit_group is used for application termination.
4390 Do thread termination if we have more then one thread. */
4391 /* FIXME: This probably breaks if a signal arrives. We should probably
4392 be disabling signals. */
4393 if (first_cpu->next_cpu) {
4394 TaskState *ts;
4395 CPUState **lastp;
4396 CPUState *p;
4398 cpu_list_lock();
4399 lastp = &first_cpu;
4400 p = first_cpu;
4401 while (p && p != (CPUState *)cpu_env) {
4402 lastp = &p->next_cpu;
4403 p = p->next_cpu;
4405 /* If we didn't find the CPU for this thread then something is
4406 horribly wrong. */
4407 if (!p)
4408 abort();
4409 /* Remove the CPU from the list. */
4410 *lastp = p->next_cpu;
4411 cpu_list_unlock();
4412 ts = ((CPUState *)cpu_env)->opaque;
4413 if (ts->child_tidptr) {
4414 put_user_u32(0, ts->child_tidptr);
4415 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4416 NULL, NULL, 0);
4418 thread_env = NULL;
4419 qemu_free(cpu_env);
4420 qemu_free(ts);
4421 pthread_exit(NULL);
4423 #endif
4424 #ifdef TARGET_GPROF
4425 _mcleanup();
4426 #endif
4427 gdb_exit(cpu_env, arg1);
4428 _exit(arg1);
4429 ret = 0; /* avoid warning */
4430 break;
4431 case TARGET_NR_read:
4432 if (arg3 == 0)
4433 ret = 0;
4434 else {
4435 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4436 goto efault;
4437 ret = get_errno(read(arg1, p, arg3));
4438 unlock_user(p, arg2, ret);
4440 break;
4441 case TARGET_NR_write:
4442 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4443 goto efault;
4444 ret = get_errno(write(arg1, p, arg3));
4445 unlock_user(p, arg2, 0);
4446 break;
4447 case TARGET_NR_open:
4448 if (!(p = lock_user_string(arg1)))
4449 goto efault;
4450 ret = get_errno(open(path(p),
4451 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4452 arg3));
4453 unlock_user(p, arg1, 0);
4454 break;
4455 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4456 case TARGET_NR_openat:
4457 if (!(p = lock_user_string(arg2)))
4458 goto efault;
4459 ret = get_errno(sys_openat(arg1,
4460 path(p),
4461 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4462 arg4));
4463 unlock_user(p, arg2, 0);
4464 break;
4465 #endif
4466 case TARGET_NR_close:
4467 ret = get_errno(close(arg1));
4468 break;
4469 case TARGET_NR_brk:
4470 ret = do_brk(arg1);
4471 break;
4472 case TARGET_NR_fork:
4473 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4474 break;
4475 #ifdef TARGET_NR_waitpid
4476 case TARGET_NR_waitpid:
4478 int status;
4479 ret = get_errno(waitpid(arg1, &status, arg3));
4480 if (!is_error(ret) && arg2
4481 && put_user_s32(host_to_target_waitstatus(status), arg2))
4482 goto efault;
4484 break;
4485 #endif
4486 #ifdef TARGET_NR_waitid
4487 case TARGET_NR_waitid:
4489 siginfo_t info;
4490 info.si_pid = 0;
4491 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4492 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4493 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4494 goto efault;
4495 host_to_target_siginfo(p, &info);
4496 unlock_user(p, arg3, sizeof(target_siginfo_t));
4499 break;
4500 #endif
4501 #ifdef TARGET_NR_creat /* not on alpha */
4502 case TARGET_NR_creat:
4503 if (!(p = lock_user_string(arg1)))
4504 goto efault;
4505 ret = get_errno(creat(p, arg2));
4506 unlock_user(p, arg1, 0);
4507 break;
4508 #endif
4509 case TARGET_NR_link:
4511 void * p2;
4512 p = lock_user_string(arg1);
4513 p2 = lock_user_string(arg2);
4514 if (!p || !p2)
4515 ret = -TARGET_EFAULT;
4516 else
4517 ret = get_errno(link(p, p2));
4518 unlock_user(p2, arg2, 0);
4519 unlock_user(p, arg1, 0);
4521 break;
4522 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4523 case TARGET_NR_linkat:
4525 void * p2 = NULL;
4526 if (!arg2 || !arg4)
4527 goto efault;
4528 p = lock_user_string(arg2);
4529 p2 = lock_user_string(arg4);
4530 if (!p || !p2)
4531 ret = -TARGET_EFAULT;
4532 else
4533 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4534 unlock_user(p, arg2, 0);
4535 unlock_user(p2, arg4, 0);
4537 break;
4538 #endif
4539 case TARGET_NR_unlink:
4540 if (!(p = lock_user_string(arg1)))
4541 goto efault;
4542 ret = get_errno(unlink(p));
4543 unlock_user(p, arg1, 0);
4544 break;
4545 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4546 case TARGET_NR_unlinkat:
4547 if (!(p = lock_user_string(arg2)))
4548 goto efault;
4549 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4550 unlock_user(p, arg2, 0);
4551 break;
4552 #endif
4553 case TARGET_NR_execve:
4555 char **argp, **envp;
4556 int argc, envc;
4557 abi_ulong gp;
4558 abi_ulong guest_argp;
4559 abi_ulong guest_envp;
4560 abi_ulong addr;
4561 char **q;
4563 argc = 0;
4564 guest_argp = arg2;
4565 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4566 if (get_user_ual(addr, gp))
4567 goto efault;
4568 if (!addr)
4569 break;
4570 argc++;
4572 envc = 0;
4573 guest_envp = arg3;
4574 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4575 if (get_user_ual(addr, gp))
4576 goto efault;
4577 if (!addr)
4578 break;
4579 envc++;
4582 argp = alloca((argc + 1) * sizeof(void *));
4583 envp = alloca((envc + 1) * sizeof(void *));
4585 for (gp = guest_argp, q = argp; gp;
4586 gp += sizeof(abi_ulong), q++) {
4587 if (get_user_ual(addr, gp))
4588 goto execve_efault;
4589 if (!addr)
4590 break;
4591 if (!(*q = lock_user_string(addr)))
4592 goto execve_efault;
4594 *q = NULL;
4596 for (gp = guest_envp, q = envp; gp;
4597 gp += sizeof(abi_ulong), q++) {
4598 if (get_user_ual(addr, gp))
4599 goto execve_efault;
4600 if (!addr)
4601 break;
4602 if (!(*q = lock_user_string(addr)))
4603 goto execve_efault;
4605 *q = NULL;
4607 if (!(p = lock_user_string(arg1)))
4608 goto execve_efault;
4609 ret = get_errno(execve(p, argp, envp));
4610 unlock_user(p, arg1, 0);
4612 goto execve_end;
4614 execve_efault:
4615 ret = -TARGET_EFAULT;
4617 execve_end:
4618 for (gp = guest_argp, q = argp; *q;
4619 gp += sizeof(abi_ulong), q++) {
4620 if (get_user_ual(addr, gp)
4621 || !addr)
4622 break;
4623 unlock_user(*q, addr, 0);
4625 for (gp = guest_envp, q = envp; *q;
4626 gp += sizeof(abi_ulong), q++) {
4627 if (get_user_ual(addr, gp)
4628 || !addr)
4629 break;
4630 unlock_user(*q, addr, 0);
4633 break;
4634 case TARGET_NR_chdir:
4635 if (!(p = lock_user_string(arg1)))
4636 goto efault;
4637 ret = get_errno(chdir(p));
4638 unlock_user(p, arg1, 0);
4639 break;
4640 #ifdef TARGET_NR_time
4641 case TARGET_NR_time:
4643 time_t host_time;
4644 ret = get_errno(time(&host_time));
4645 if (!is_error(ret)
4646 && arg1
4647 && put_user_sal(host_time, arg1))
4648 goto efault;
4650 break;
4651 #endif
4652 case TARGET_NR_mknod:
4653 if (!(p = lock_user_string(arg1)))
4654 goto efault;
4655 ret = get_errno(mknod(p, arg2, arg3));
4656 unlock_user(p, arg1, 0);
4657 break;
4658 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4659 case TARGET_NR_mknodat:
4660 if (!(p = lock_user_string(arg2)))
4661 goto efault;
4662 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4663 unlock_user(p, arg2, 0);
4664 break;
4665 #endif
4666 case TARGET_NR_chmod:
4667 if (!(p = lock_user_string(arg1)))
4668 goto efault;
4669 ret = get_errno(chmod(p, arg2));
4670 unlock_user(p, arg1, 0);
4671 break;
4672 #ifdef TARGET_NR_break
4673 case TARGET_NR_break:
4674 goto unimplemented;
4675 #endif
4676 #ifdef TARGET_NR_oldstat
4677 case TARGET_NR_oldstat:
4678 goto unimplemented;
4679 #endif
4680 case TARGET_NR_lseek:
4681 ret = get_errno(lseek(arg1, arg2, arg3));
4682 break;
4683 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4684 /* Alpha specific */
4685 case TARGET_NR_getxpid:
4686 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4687 ret = get_errno(getpid());
4688 break;
4689 #endif
4690 #ifdef TARGET_NR_getpid
4691 case TARGET_NR_getpid:
4692 ret = get_errno(getpid());
4693 break;
4694 #endif
4695 case TARGET_NR_mount:
4697 /* need to look at the data field */
4698 void *p2, *p3;
4699 p = lock_user_string(arg1);
4700 p2 = lock_user_string(arg2);
4701 p3 = lock_user_string(arg3);
4702 if (!p || !p2 || !p3)
4703 ret = -TARGET_EFAULT;
4704 else {
4705 /* FIXME - arg5 should be locked, but it isn't clear how to
4706 * do that since it's not guaranteed to be a NULL-terminated
4707 * string.
4709 if ( ! arg5 )
4710 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4711 else
4712 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4714 unlock_user(p, arg1, 0);
4715 unlock_user(p2, arg2, 0);
4716 unlock_user(p3, arg3, 0);
4717 break;
4719 #ifdef TARGET_NR_umount
4720 case TARGET_NR_umount:
4721 if (!(p = lock_user_string(arg1)))
4722 goto efault;
4723 ret = get_errno(umount(p));
4724 unlock_user(p, arg1, 0);
4725 break;
4726 #endif
4727 #ifdef TARGET_NR_stime /* not on alpha */
4728 case TARGET_NR_stime:
4730 time_t host_time;
4731 if (get_user_sal(host_time, arg1))
4732 goto efault;
4733 ret = get_errno(stime(&host_time));
4735 break;
4736 #endif
4737 case TARGET_NR_ptrace:
4738 goto unimplemented;
4739 #ifdef TARGET_NR_alarm /* not on alpha */
4740 case TARGET_NR_alarm:
4741 ret = alarm(arg1);
4742 break;
4743 #endif
4744 #ifdef TARGET_NR_oldfstat
4745 case TARGET_NR_oldfstat:
4746 goto unimplemented;
4747 #endif
4748 #ifdef TARGET_NR_pause /* not on alpha */
4749 case TARGET_NR_pause:
4750 ret = get_errno(pause());
4751 break;
4752 #endif
4753 #ifdef TARGET_NR_utime
4754 case TARGET_NR_utime:
4756 struct utimbuf tbuf, *host_tbuf;
4757 struct target_utimbuf *target_tbuf;
4758 if (arg2) {
4759 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4760 goto efault;
4761 tbuf.actime = tswapl(target_tbuf->actime);
4762 tbuf.modtime = tswapl(target_tbuf->modtime);
4763 unlock_user_struct(target_tbuf, arg2, 0);
4764 host_tbuf = &tbuf;
4765 } else {
4766 host_tbuf = NULL;
4768 if (!(p = lock_user_string(arg1)))
4769 goto efault;
4770 ret = get_errno(utime(p, host_tbuf));
4771 unlock_user(p, arg1, 0);
4773 break;
4774 #endif
4775 case TARGET_NR_utimes:
4777 struct timeval *tvp, tv[2];
4778 if (arg2) {
4779 if (copy_from_user_timeval(&tv[0], arg2)
4780 || copy_from_user_timeval(&tv[1],
4781 arg2 + sizeof(struct target_timeval)))
4782 goto efault;
4783 tvp = tv;
4784 } else {
4785 tvp = NULL;
4787 if (!(p = lock_user_string(arg1)))
4788 goto efault;
4789 ret = get_errno(utimes(p, tvp));
4790 unlock_user(p, arg1, 0);
4792 break;
4793 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4794 case TARGET_NR_futimesat:
4796 struct timeval *tvp, tv[2];
4797 if (arg3) {
4798 if (copy_from_user_timeval(&tv[0], arg3)
4799 || copy_from_user_timeval(&tv[1],
4800 arg3 + sizeof(struct target_timeval)))
4801 goto efault;
4802 tvp = tv;
4803 } else {
4804 tvp = NULL;
4806 if (!(p = lock_user_string(arg2)))
4807 goto efault;
4808 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4809 unlock_user(p, arg2, 0);
4811 break;
4812 #endif
4813 #ifdef TARGET_NR_stty
4814 case TARGET_NR_stty:
4815 goto unimplemented;
4816 #endif
4817 #ifdef TARGET_NR_gtty
4818 case TARGET_NR_gtty:
4819 goto unimplemented;
4820 #endif
4821 case TARGET_NR_access:
4822 if (!(p = lock_user_string(arg1)))
4823 goto efault;
4824 ret = get_errno(access(path(p), arg2));
4825 unlock_user(p, arg1, 0);
4826 break;
4827 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4828 case TARGET_NR_faccessat:
4829 if (!(p = lock_user_string(arg2)))
4830 goto efault;
4831 ret = get_errno(sys_faccessat(arg1, p, arg3));
4832 unlock_user(p, arg2, 0);
4833 break;
4834 #endif
4835 #ifdef TARGET_NR_nice /* not on alpha */
4836 case TARGET_NR_nice:
4837 ret = get_errno(nice(arg1));
4838 break;
4839 #endif
4840 #ifdef TARGET_NR_ftime
4841 case TARGET_NR_ftime:
4842 goto unimplemented;
4843 #endif
4844 case TARGET_NR_sync:
4845 sync();
4846 ret = 0;
4847 break;
4848 case TARGET_NR_kill:
4849 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4850 break;
4851 case TARGET_NR_rename:
4853 void *p2;
4854 p = lock_user_string(arg1);
4855 p2 = lock_user_string(arg2);
4856 if (!p || !p2)
4857 ret = -TARGET_EFAULT;
4858 else
4859 ret = get_errno(rename(p, p2));
4860 unlock_user(p2, arg2, 0);
4861 unlock_user(p, arg1, 0);
4863 break;
4864 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4865 case TARGET_NR_renameat:
4867 void *p2;
4868 p = lock_user_string(arg2);
4869 p2 = lock_user_string(arg4);
4870 if (!p || !p2)
4871 ret = -TARGET_EFAULT;
4872 else
4873 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4874 unlock_user(p2, arg4, 0);
4875 unlock_user(p, arg2, 0);
4877 break;
4878 #endif
4879 case TARGET_NR_mkdir:
4880 if (!(p = lock_user_string(arg1)))
4881 goto efault;
4882 ret = get_errno(mkdir(p, arg2));
4883 unlock_user(p, arg1, 0);
4884 break;
4885 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4886 case TARGET_NR_mkdirat:
4887 if (!(p = lock_user_string(arg2)))
4888 goto efault;
4889 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4890 unlock_user(p, arg2, 0);
4891 break;
4892 #endif
4893 case TARGET_NR_rmdir:
4894 if (!(p = lock_user_string(arg1)))
4895 goto efault;
4896 ret = get_errno(rmdir(p));
4897 unlock_user(p, arg1, 0);
4898 break;
4899 case TARGET_NR_dup:
4900 ret = get_errno(dup(arg1));
4901 break;
4902 case TARGET_NR_pipe:
4903 ret = do_pipe(cpu_env, arg1, 0, 0);
4904 break;
4905 #ifdef TARGET_NR_pipe2
4906 case TARGET_NR_pipe2:
4907 ret = do_pipe(cpu_env, arg1, arg2, 1);
4908 break;
4909 #endif
4910 case TARGET_NR_times:
4912 struct target_tms *tmsp;
4913 struct tms tms;
4914 ret = get_errno(times(&tms));
4915 if (arg1) {
4916 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4917 if (!tmsp)
4918 goto efault;
4919 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4920 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4921 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4922 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4924 if (!is_error(ret))
4925 ret = host_to_target_clock_t(ret);
4927 break;
4928 #ifdef TARGET_NR_prof
4929 case TARGET_NR_prof:
4930 goto unimplemented;
4931 #endif
4932 #ifdef TARGET_NR_signal
4933 case TARGET_NR_signal:
4934 goto unimplemented;
4935 #endif
4936 case TARGET_NR_acct:
4937 if (arg1 == 0) {
4938 ret = get_errno(acct(NULL));
4939 } else {
4940 if (!(p = lock_user_string(arg1)))
4941 goto efault;
4942 ret = get_errno(acct(path(p)));
4943 unlock_user(p, arg1, 0);
4945 break;
4946 #ifdef TARGET_NR_umount2 /* not on alpha */
4947 case TARGET_NR_umount2:
4948 if (!(p = lock_user_string(arg1)))
4949 goto efault;
4950 ret = get_errno(umount2(p, arg2));
4951 unlock_user(p, arg1, 0);
4952 break;
4953 #endif
4954 #ifdef TARGET_NR_lock
4955 case TARGET_NR_lock:
4956 goto unimplemented;
4957 #endif
4958 case TARGET_NR_ioctl:
4959 ret = do_ioctl(arg1, arg2, arg3);
4960 break;
4961 case TARGET_NR_fcntl:
4962 ret = do_fcntl(arg1, arg2, arg3);
4963 break;
4964 #ifdef TARGET_NR_mpx
4965 case TARGET_NR_mpx:
4966 goto unimplemented;
4967 #endif
4968 case TARGET_NR_setpgid:
4969 ret = get_errno(setpgid(arg1, arg2));
4970 break;
4971 #ifdef TARGET_NR_ulimit
4972 case TARGET_NR_ulimit:
4973 goto unimplemented;
4974 #endif
4975 #ifdef TARGET_NR_oldolduname
4976 case TARGET_NR_oldolduname:
4977 goto unimplemented;
4978 #endif
4979 case TARGET_NR_umask:
4980 ret = get_errno(umask(arg1));
4981 break;
4982 case TARGET_NR_chroot:
4983 if (!(p = lock_user_string(arg1)))
4984 goto efault;
4985 ret = get_errno(chroot(p));
4986 unlock_user(p, arg1, 0);
4987 break;
4988 case TARGET_NR_ustat:
4989 goto unimplemented;
4990 case TARGET_NR_dup2:
4991 ret = get_errno(dup2(arg1, arg2));
4992 break;
4993 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
4994 case TARGET_NR_dup3:
4995 ret = get_errno(dup3(arg1, arg2, arg3));
4996 break;
4997 #endif
4998 #ifdef TARGET_NR_getppid /* not on alpha */
4999 case TARGET_NR_getppid:
5000 ret = get_errno(getppid());
5001 break;
5002 #endif
5003 case TARGET_NR_getpgrp:
5004 ret = get_errno(getpgrp());
5005 break;
5006 case TARGET_NR_setsid:
5007 ret = get_errno(setsid());
5008 break;
5009 #ifdef TARGET_NR_sigaction
5010 case TARGET_NR_sigaction:
5012 #if defined(TARGET_ALPHA)
5013 struct target_sigaction act, oact, *pact = 0;
5014 struct target_old_sigaction *old_act;
5015 if (arg2) {
5016 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5017 goto efault;
5018 act._sa_handler = old_act->_sa_handler;
5019 target_siginitset(&act.sa_mask, old_act->sa_mask);
5020 act.sa_flags = old_act->sa_flags;
5021 act.sa_restorer = 0;
5022 unlock_user_struct(old_act, arg2, 0);
5023 pact = &act;
5025 ret = get_errno(do_sigaction(arg1, pact, &oact));
5026 if (!is_error(ret) && arg3) {
5027 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5028 goto efault;
5029 old_act->_sa_handler = oact._sa_handler;
5030 old_act->sa_mask = oact.sa_mask.sig[0];
5031 old_act->sa_flags = oact.sa_flags;
5032 unlock_user_struct(old_act, arg3, 1);
5034 #elif defined(TARGET_MIPS)
5035 struct target_sigaction act, oact, *pact, *old_act;
5037 if (arg2) {
5038 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5039 goto efault;
5040 act._sa_handler = old_act->_sa_handler;
5041 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5042 act.sa_flags = old_act->sa_flags;
5043 unlock_user_struct(old_act, arg2, 0);
5044 pact = &act;
5045 } else {
5046 pact = NULL;
5049 ret = get_errno(do_sigaction(arg1, pact, &oact));
5051 if (!is_error(ret) && arg3) {
5052 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5053 goto efault;
5054 old_act->_sa_handler = oact._sa_handler;
5055 old_act->sa_flags = oact.sa_flags;
5056 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5057 old_act->sa_mask.sig[1] = 0;
5058 old_act->sa_mask.sig[2] = 0;
5059 old_act->sa_mask.sig[3] = 0;
5060 unlock_user_struct(old_act, arg3, 1);
5062 #else
5063 struct target_old_sigaction *old_act;
5064 struct target_sigaction act, oact, *pact;
5065 if (arg2) {
5066 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5067 goto efault;
5068 act._sa_handler = old_act->_sa_handler;
5069 target_siginitset(&act.sa_mask, old_act->sa_mask);
5070 act.sa_flags = old_act->sa_flags;
5071 act.sa_restorer = old_act->sa_restorer;
5072 unlock_user_struct(old_act, arg2, 0);
5073 pact = &act;
5074 } else {
5075 pact = NULL;
5077 ret = get_errno(do_sigaction(arg1, pact, &oact));
5078 if (!is_error(ret) && arg3) {
5079 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5080 goto efault;
5081 old_act->_sa_handler = oact._sa_handler;
5082 old_act->sa_mask = oact.sa_mask.sig[0];
5083 old_act->sa_flags = oact.sa_flags;
5084 old_act->sa_restorer = oact.sa_restorer;
5085 unlock_user_struct(old_act, arg3, 1);
5087 #endif
5089 break;
5090 #endif
5091 case TARGET_NR_rt_sigaction:
5093 #if defined(TARGET_ALPHA)
5094 struct target_sigaction act, oact, *pact = 0;
5095 struct target_rt_sigaction *rt_act;
5096 /* ??? arg4 == sizeof(sigset_t). */
5097 if (arg2) {
5098 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5099 goto efault;
5100 act._sa_handler = rt_act->_sa_handler;
5101 act.sa_mask = rt_act->sa_mask;
5102 act.sa_flags = rt_act->sa_flags;
5103 act.sa_restorer = arg5;
5104 unlock_user_struct(rt_act, arg2, 0);
5105 pact = &act;
5107 ret = get_errno(do_sigaction(arg1, pact, &oact));
5108 if (!is_error(ret) && arg3) {
5109 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5110 goto efault;
5111 rt_act->_sa_handler = oact._sa_handler;
5112 rt_act->sa_mask = oact.sa_mask;
5113 rt_act->sa_flags = oact.sa_flags;
5114 unlock_user_struct(rt_act, arg3, 1);
5116 #else
5117 struct target_sigaction *act;
5118 struct target_sigaction *oact;
5120 if (arg2) {
5121 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5122 goto efault;
5123 } else
5124 act = NULL;
5125 if (arg3) {
5126 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5127 ret = -TARGET_EFAULT;
5128 goto rt_sigaction_fail;
5130 } else
5131 oact = NULL;
5132 ret = get_errno(do_sigaction(arg1, act, oact));
5133 rt_sigaction_fail:
5134 if (act)
5135 unlock_user_struct(act, arg2, 0);
5136 if (oact)
5137 unlock_user_struct(oact, arg3, 1);
5138 #endif
5140 break;
5141 #ifdef TARGET_NR_sgetmask /* not on alpha */
5142 case TARGET_NR_sgetmask:
5144 sigset_t cur_set;
5145 abi_ulong target_set;
5146 sigprocmask(0, NULL, &cur_set);
5147 host_to_target_old_sigset(&target_set, &cur_set);
5148 ret = target_set;
5150 break;
5151 #endif
5152 #ifdef TARGET_NR_ssetmask /* not on alpha */
5153 case TARGET_NR_ssetmask:
5155 sigset_t set, oset, cur_set;
5156 abi_ulong target_set = arg1;
5157 sigprocmask(0, NULL, &cur_set);
5158 target_to_host_old_sigset(&set, &target_set);
5159 sigorset(&set, &set, &cur_set);
5160 sigprocmask(SIG_SETMASK, &set, &oset);
5161 host_to_target_old_sigset(&target_set, &oset);
5162 ret = target_set;
5164 break;
5165 #endif
5166 #ifdef TARGET_NR_sigprocmask
5167 case TARGET_NR_sigprocmask:
5169 #if defined(TARGET_ALPHA)
5170 sigset_t set, oldset;
5171 abi_ulong mask;
5172 int how;
5174 switch (arg1) {
5175 case TARGET_SIG_BLOCK:
5176 how = SIG_BLOCK;
5177 break;
5178 case TARGET_SIG_UNBLOCK:
5179 how = SIG_UNBLOCK;
5180 break;
5181 case TARGET_SIG_SETMASK:
5182 how = SIG_SETMASK;
5183 break;
5184 default:
5185 ret = -TARGET_EINVAL;
5186 goto fail;
5188 mask = arg2;
5189 target_to_host_old_sigset(&set, &mask);
5191 ret = get_errno(sigprocmask(how, &set, &oldset));
5193 if (!is_error(ret)) {
5194 host_to_target_old_sigset(&mask, &oldset);
5195 ret = mask;
5196 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5198 #else
5199 sigset_t set, oldset, *set_ptr;
5200 int how;
5202 if (arg2) {
5203 switch (arg1) {
5204 case TARGET_SIG_BLOCK:
5205 how = SIG_BLOCK;
5206 break;
5207 case TARGET_SIG_UNBLOCK:
5208 how = SIG_UNBLOCK;
5209 break;
5210 case TARGET_SIG_SETMASK:
5211 how = SIG_SETMASK;
5212 break;
5213 default:
5214 ret = -TARGET_EINVAL;
5215 goto fail;
5217 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5218 goto efault;
5219 target_to_host_old_sigset(&set, p);
5220 unlock_user(p, arg2, 0);
5221 set_ptr = &set;
5222 } else {
5223 how = 0;
5224 set_ptr = NULL;
5226 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5227 if (!is_error(ret) && arg3) {
5228 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5229 goto efault;
5230 host_to_target_old_sigset(p, &oldset);
5231 unlock_user(p, arg3, sizeof(target_sigset_t));
5233 #endif
5235 break;
5236 #endif
5237 case TARGET_NR_rt_sigprocmask:
5239 int how = arg1;
5240 sigset_t set, oldset, *set_ptr;
5242 if (arg2) {
5243 switch(how) {
5244 case TARGET_SIG_BLOCK:
5245 how = SIG_BLOCK;
5246 break;
5247 case TARGET_SIG_UNBLOCK:
5248 how = SIG_UNBLOCK;
5249 break;
5250 case TARGET_SIG_SETMASK:
5251 how = SIG_SETMASK;
5252 break;
5253 default:
5254 ret = -TARGET_EINVAL;
5255 goto fail;
5257 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5258 goto efault;
5259 target_to_host_sigset(&set, p);
5260 unlock_user(p, arg2, 0);
5261 set_ptr = &set;
5262 } else {
5263 how = 0;
5264 set_ptr = NULL;
5266 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5267 if (!is_error(ret) && arg3) {
5268 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5269 goto efault;
5270 host_to_target_sigset(p, &oldset);
5271 unlock_user(p, arg3, sizeof(target_sigset_t));
5274 break;
5275 #ifdef TARGET_NR_sigpending
5276 case TARGET_NR_sigpending:
5278 sigset_t set;
5279 ret = get_errno(sigpending(&set));
5280 if (!is_error(ret)) {
5281 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5282 goto efault;
5283 host_to_target_old_sigset(p, &set);
5284 unlock_user(p, arg1, sizeof(target_sigset_t));
5287 break;
5288 #endif
5289 case TARGET_NR_rt_sigpending:
5291 sigset_t set;
5292 ret = get_errno(sigpending(&set));
5293 if (!is_error(ret)) {
5294 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5295 goto efault;
5296 host_to_target_sigset(p, &set);
5297 unlock_user(p, arg1, sizeof(target_sigset_t));
5300 break;
5301 #ifdef TARGET_NR_sigsuspend
5302 case TARGET_NR_sigsuspend:
5304 sigset_t set;
5305 #if defined(TARGET_ALPHA)
5306 abi_ulong mask = arg1;
5307 target_to_host_old_sigset(&set, &mask);
5308 #else
5309 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5310 goto efault;
5311 target_to_host_old_sigset(&set, p);
5312 unlock_user(p, arg1, 0);
5313 #endif
5314 ret = get_errno(sigsuspend(&set));
5316 break;
5317 #endif
5318 case TARGET_NR_rt_sigsuspend:
5320 sigset_t set;
5321 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5322 goto efault;
5323 target_to_host_sigset(&set, p);
5324 unlock_user(p, arg1, 0);
5325 ret = get_errno(sigsuspend(&set));
5327 break;
5328 case TARGET_NR_rt_sigtimedwait:
5330 sigset_t set;
5331 struct timespec uts, *puts;
5332 siginfo_t uinfo;
5334 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5335 goto efault;
5336 target_to_host_sigset(&set, p);
5337 unlock_user(p, arg1, 0);
5338 if (arg3) {
5339 puts = &uts;
5340 target_to_host_timespec(puts, arg3);
5341 } else {
5342 puts = NULL;
5344 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5345 if (!is_error(ret) && arg2) {
5346 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5347 goto efault;
5348 host_to_target_siginfo(p, &uinfo);
5349 unlock_user(p, arg2, sizeof(target_siginfo_t));
5352 break;
5353 case TARGET_NR_rt_sigqueueinfo:
5355 siginfo_t uinfo;
5356 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5357 goto efault;
5358 target_to_host_siginfo(&uinfo, p);
5359 unlock_user(p, arg1, 0);
5360 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5362 break;
5363 #ifdef TARGET_NR_sigreturn
5364 case TARGET_NR_sigreturn:
5365 /* NOTE: ret is eax, so not transcoding must be done */
5366 ret = do_sigreturn(cpu_env);
5367 break;
5368 #endif
5369 case TARGET_NR_rt_sigreturn:
5370 /* NOTE: ret is eax, so not transcoding must be done */
5371 ret = do_rt_sigreturn(cpu_env);
5372 break;
5373 case TARGET_NR_sethostname:
5374 if (!(p = lock_user_string(arg1)))
5375 goto efault;
5376 ret = get_errno(sethostname(p, arg2));
5377 unlock_user(p, arg1, 0);
5378 break;
5379 case TARGET_NR_setrlimit:
5381 int resource = arg1;
5382 struct target_rlimit *target_rlim;
5383 struct rlimit rlim;
5384 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5385 goto efault;
5386 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5387 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5388 unlock_user_struct(target_rlim, arg2, 0);
5389 ret = get_errno(setrlimit(resource, &rlim));
5391 break;
5392 case TARGET_NR_getrlimit:
5394 int resource = arg1;
5395 struct target_rlimit *target_rlim;
5396 struct rlimit rlim;
5398 ret = get_errno(getrlimit(resource, &rlim));
5399 if (!is_error(ret)) {
5400 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5401 goto efault;
5402 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5403 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5404 unlock_user_struct(target_rlim, arg2, 1);
5407 break;
5408 case TARGET_NR_getrusage:
5410 struct rusage rusage;
5411 ret = get_errno(getrusage(arg1, &rusage));
5412 if (!is_error(ret)) {
5413 host_to_target_rusage(arg2, &rusage);
5416 break;
5417 case TARGET_NR_gettimeofday:
5419 struct timeval tv;
5420 ret = get_errno(gettimeofday(&tv, NULL));
5421 if (!is_error(ret)) {
5422 if (copy_to_user_timeval(arg1, &tv))
5423 goto efault;
5426 break;
5427 case TARGET_NR_settimeofday:
5429 struct timeval tv;
5430 if (copy_from_user_timeval(&tv, arg1))
5431 goto efault;
5432 ret = get_errno(settimeofday(&tv, NULL));
5434 break;
5435 #ifdef TARGET_NR_select
5436 case TARGET_NR_select:
5438 struct target_sel_arg_struct *sel;
5439 abi_ulong inp, outp, exp, tvp;
5440 long nsel;
5442 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5443 goto efault;
5444 nsel = tswapl(sel->n);
5445 inp = tswapl(sel->inp);
5446 outp = tswapl(sel->outp);
5447 exp = tswapl(sel->exp);
5448 tvp = tswapl(sel->tvp);
5449 unlock_user_struct(sel, arg1, 0);
5450 ret = do_select(nsel, inp, outp, exp, tvp);
5452 break;
5453 #endif
5454 #ifdef TARGET_NR_pselect6
5455 case TARGET_NR_pselect6:
5456 goto unimplemented_nowarn;
5457 #endif
5458 case TARGET_NR_symlink:
5460 void *p2;
5461 p = lock_user_string(arg1);
5462 p2 = lock_user_string(arg2);
5463 if (!p || !p2)
5464 ret = -TARGET_EFAULT;
5465 else
5466 ret = get_errno(symlink(p, p2));
5467 unlock_user(p2, arg2, 0);
5468 unlock_user(p, arg1, 0);
5470 break;
5471 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5472 case TARGET_NR_symlinkat:
5474 void *p2;
5475 p = lock_user_string(arg1);
5476 p2 = lock_user_string(arg3);
5477 if (!p || !p2)
5478 ret = -TARGET_EFAULT;
5479 else
5480 ret = get_errno(sys_symlinkat(p, arg2, p2));
5481 unlock_user(p2, arg3, 0);
5482 unlock_user(p, arg1, 0);
5484 break;
5485 #endif
5486 #ifdef TARGET_NR_oldlstat
5487 case TARGET_NR_oldlstat:
5488 goto unimplemented;
5489 #endif
5490 case TARGET_NR_readlink:
5492 void *p2, *temp;
5493 p = lock_user_string(arg1);
5494 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5495 if (!p || !p2)
5496 ret = -TARGET_EFAULT;
5497 else {
5498 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5499 char real[PATH_MAX];
5500 temp = realpath(exec_path,real);
5501 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5502 snprintf((char *)p2, arg3, "%s", real);
5504 else
5505 ret = get_errno(readlink(path(p), p2, arg3));
5507 unlock_user(p2, arg2, ret);
5508 unlock_user(p, arg1, 0);
5510 break;
5511 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5512 case TARGET_NR_readlinkat:
5514 void *p2;
5515 p = lock_user_string(arg2);
5516 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5517 if (!p || !p2)
5518 ret = -TARGET_EFAULT;
5519 else
5520 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5521 unlock_user(p2, arg3, ret);
5522 unlock_user(p, arg2, 0);
5524 break;
5525 #endif
5526 #ifdef TARGET_NR_uselib
5527 case TARGET_NR_uselib:
5528 goto unimplemented;
5529 #endif
5530 #ifdef TARGET_NR_swapon
5531 case TARGET_NR_swapon:
5532 if (!(p = lock_user_string(arg1)))
5533 goto efault;
5534 ret = get_errno(swapon(p, arg2));
5535 unlock_user(p, arg1, 0);
5536 break;
5537 #endif
5538 case TARGET_NR_reboot:
5539 goto unimplemented;
5540 #ifdef TARGET_NR_readdir
5541 case TARGET_NR_readdir:
5542 goto unimplemented;
5543 #endif
5544 #ifdef TARGET_NR_mmap
5545 case TARGET_NR_mmap:
5546 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5548 abi_ulong *v;
5549 abi_ulong v1, v2, v3, v4, v5, v6;
5550 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5551 goto efault;
5552 v1 = tswapl(v[0]);
5553 v2 = tswapl(v[1]);
5554 v3 = tswapl(v[2]);
5555 v4 = tswapl(v[3]);
5556 v5 = tswapl(v[4]);
5557 v6 = tswapl(v[5]);
5558 unlock_user(v, arg1, 0);
5559 ret = get_errno(target_mmap(v1, v2, v3,
5560 target_to_host_bitmask(v4, mmap_flags_tbl),
5561 v5, v6));
5563 #else
5564 ret = get_errno(target_mmap(arg1, arg2, arg3,
5565 target_to_host_bitmask(arg4, mmap_flags_tbl),
5566 arg5,
5567 arg6));
5568 #endif
5569 break;
5570 #endif
5571 #ifdef TARGET_NR_mmap2
5572 case TARGET_NR_mmap2:
5573 #ifndef MMAP_SHIFT
5574 #define MMAP_SHIFT 12
5575 #endif
5576 ret = get_errno(target_mmap(arg1, arg2, arg3,
5577 target_to_host_bitmask(arg4, mmap_flags_tbl),
5578 arg5,
5579 arg6 << MMAP_SHIFT));
5580 break;
5581 #endif
5582 case TARGET_NR_munmap:
5583 ret = get_errno(target_munmap(arg1, arg2));
5584 break;
5585 case TARGET_NR_mprotect:
5587 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5588 /* Special hack to detect libc making the stack executable. */
5589 if ((arg3 & PROT_GROWSDOWN)
5590 && arg1 >= ts->info->stack_limit
5591 && arg1 <= ts->info->start_stack) {
5592 arg3 &= ~PROT_GROWSDOWN;
5593 arg2 = arg2 + arg1 - ts->info->stack_limit;
5594 arg1 = ts->info->stack_limit;
5597 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5598 break;
5599 #ifdef TARGET_NR_mremap
5600 case TARGET_NR_mremap:
5601 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5602 break;
5603 #endif
5604 /* ??? msync/mlock/munlock are broken for softmmu. */
5605 #ifdef TARGET_NR_msync
5606 case TARGET_NR_msync:
5607 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5608 break;
5609 #endif
5610 #ifdef TARGET_NR_mlock
5611 case TARGET_NR_mlock:
5612 ret = get_errno(mlock(g2h(arg1), arg2));
5613 break;
5614 #endif
5615 #ifdef TARGET_NR_munlock
5616 case TARGET_NR_munlock:
5617 ret = get_errno(munlock(g2h(arg1), arg2));
5618 break;
5619 #endif
5620 #ifdef TARGET_NR_mlockall
5621 case TARGET_NR_mlockall:
5622 ret = get_errno(mlockall(arg1));
5623 break;
5624 #endif
5625 #ifdef TARGET_NR_munlockall
5626 case TARGET_NR_munlockall:
5627 ret = get_errno(munlockall());
5628 break;
5629 #endif
5630 case TARGET_NR_truncate:
5631 if (!(p = lock_user_string(arg1)))
5632 goto efault;
5633 ret = get_errno(truncate(p, arg2));
5634 unlock_user(p, arg1, 0);
5635 break;
5636 case TARGET_NR_ftruncate:
5637 ret = get_errno(ftruncate(arg1, arg2));
5638 break;
5639 case TARGET_NR_fchmod:
5640 ret = get_errno(fchmod(arg1, arg2));
5641 break;
5642 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5643 case TARGET_NR_fchmodat:
5644 if (!(p = lock_user_string(arg2)))
5645 goto efault;
5646 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5647 unlock_user(p, arg2, 0);
5648 break;
5649 #endif
5650 case TARGET_NR_getpriority:
5651 /* libc does special remapping of the return value of
5652 * sys_getpriority() so it's just easiest to call
5653 * sys_getpriority() directly rather than through libc. */
5654 ret = get_errno(sys_getpriority(arg1, arg2));
5655 break;
5656 case TARGET_NR_setpriority:
5657 ret = get_errno(setpriority(arg1, arg2, arg3));
5658 break;
5659 #ifdef TARGET_NR_profil
5660 case TARGET_NR_profil:
5661 goto unimplemented;
5662 #endif
5663 case TARGET_NR_statfs:
5664 if (!(p = lock_user_string(arg1)))
5665 goto efault;
5666 ret = get_errno(statfs(path(p), &stfs));
5667 unlock_user(p, arg1, 0);
5668 convert_statfs:
5669 if (!is_error(ret)) {
5670 struct target_statfs *target_stfs;
5672 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5673 goto efault;
5674 __put_user(stfs.f_type, &target_stfs->f_type);
5675 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5676 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5677 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5678 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5679 __put_user(stfs.f_files, &target_stfs->f_files);
5680 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5681 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5682 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5683 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5684 unlock_user_struct(target_stfs, arg2, 1);
5686 break;
5687 case TARGET_NR_fstatfs:
5688 ret = get_errno(fstatfs(arg1, &stfs));
5689 goto convert_statfs;
5690 #ifdef TARGET_NR_statfs64
5691 case TARGET_NR_statfs64:
5692 if (!(p = lock_user_string(arg1)))
5693 goto efault;
5694 ret = get_errno(statfs(path(p), &stfs));
5695 unlock_user(p, arg1, 0);
5696 convert_statfs64:
5697 if (!is_error(ret)) {
5698 struct target_statfs64 *target_stfs;
5700 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5701 goto efault;
5702 __put_user(stfs.f_type, &target_stfs->f_type);
5703 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5704 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5705 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5706 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5707 __put_user(stfs.f_files, &target_stfs->f_files);
5708 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5709 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5710 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5711 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5712 unlock_user_struct(target_stfs, arg3, 1);
5714 break;
5715 case TARGET_NR_fstatfs64:
5716 ret = get_errno(fstatfs(arg1, &stfs));
5717 goto convert_statfs64;
5718 #endif
5719 #ifdef TARGET_NR_ioperm
5720 case TARGET_NR_ioperm:
5721 goto unimplemented;
5722 #endif
5723 #ifdef TARGET_NR_socketcall
5724 case TARGET_NR_socketcall:
5725 ret = do_socketcall(arg1, arg2);
5726 break;
5727 #endif
5728 #ifdef TARGET_NR_accept
5729 case TARGET_NR_accept:
5730 ret = do_accept(arg1, arg2, arg3);
5731 break;
5732 #endif
5733 #ifdef TARGET_NR_bind
5734 case TARGET_NR_bind:
5735 ret = do_bind(arg1, arg2, arg3);
5736 break;
5737 #endif
5738 #ifdef TARGET_NR_connect
5739 case TARGET_NR_connect:
5740 ret = do_connect(arg1, arg2, arg3);
5741 break;
5742 #endif
5743 #ifdef TARGET_NR_getpeername
5744 case TARGET_NR_getpeername:
5745 ret = do_getpeername(arg1, arg2, arg3);
5746 break;
5747 #endif
5748 #ifdef TARGET_NR_getsockname
5749 case TARGET_NR_getsockname:
5750 ret = do_getsockname(arg1, arg2, arg3);
5751 break;
5752 #endif
5753 #ifdef TARGET_NR_getsockopt
5754 case TARGET_NR_getsockopt:
5755 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5756 break;
5757 #endif
5758 #ifdef TARGET_NR_listen
5759 case TARGET_NR_listen:
5760 ret = get_errno(listen(arg1, arg2));
5761 break;
5762 #endif
5763 #ifdef TARGET_NR_recv
5764 case TARGET_NR_recv:
5765 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5766 break;
5767 #endif
5768 #ifdef TARGET_NR_recvfrom
5769 case TARGET_NR_recvfrom:
5770 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5771 break;
5772 #endif
5773 #ifdef TARGET_NR_recvmsg
5774 case TARGET_NR_recvmsg:
5775 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5776 break;
5777 #endif
5778 #ifdef TARGET_NR_send
5779 case TARGET_NR_send:
5780 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5781 break;
5782 #endif
5783 #ifdef TARGET_NR_sendmsg
5784 case TARGET_NR_sendmsg:
5785 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5786 break;
5787 #endif
5788 #ifdef TARGET_NR_sendto
5789 case TARGET_NR_sendto:
5790 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5791 break;
5792 #endif
5793 #ifdef TARGET_NR_shutdown
5794 case TARGET_NR_shutdown:
5795 ret = get_errno(shutdown(arg1, arg2));
5796 break;
5797 #endif
5798 #ifdef TARGET_NR_socket
5799 case TARGET_NR_socket:
5800 ret = do_socket(arg1, arg2, arg3);
5801 break;
5802 #endif
5803 #ifdef TARGET_NR_socketpair
5804 case TARGET_NR_socketpair:
5805 ret = do_socketpair(arg1, arg2, arg3, arg4);
5806 break;
5807 #endif
5808 #ifdef TARGET_NR_setsockopt
5809 case TARGET_NR_setsockopt:
5810 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5811 break;
5812 #endif
5814 case TARGET_NR_syslog:
5815 if (!(p = lock_user_string(arg2)))
5816 goto efault;
5817 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5818 unlock_user(p, arg2, 0);
5819 break;
5821 case TARGET_NR_setitimer:
5823 struct itimerval value, ovalue, *pvalue;
5825 if (arg2) {
5826 pvalue = &value;
5827 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5828 || copy_from_user_timeval(&pvalue->it_value,
5829 arg2 + sizeof(struct target_timeval)))
5830 goto efault;
5831 } else {
5832 pvalue = NULL;
5834 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5835 if (!is_error(ret) && arg3) {
5836 if (copy_to_user_timeval(arg3,
5837 &ovalue.it_interval)
5838 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5839 &ovalue.it_value))
5840 goto efault;
5843 break;
5844 case TARGET_NR_getitimer:
5846 struct itimerval value;
5848 ret = get_errno(getitimer(arg1, &value));
5849 if (!is_error(ret) && arg2) {
5850 if (copy_to_user_timeval(arg2,
5851 &value.it_interval)
5852 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5853 &value.it_value))
5854 goto efault;
5857 break;
5858 case TARGET_NR_stat:
5859 if (!(p = lock_user_string(arg1)))
5860 goto efault;
5861 ret = get_errno(stat(path(p), &st));
5862 unlock_user(p, arg1, 0);
5863 goto do_stat;
5864 case TARGET_NR_lstat:
5865 if (!(p = lock_user_string(arg1)))
5866 goto efault;
5867 ret = get_errno(lstat(path(p), &st));
5868 unlock_user(p, arg1, 0);
5869 goto do_stat;
5870 case TARGET_NR_fstat:
5872 ret = get_errno(fstat(arg1, &st));
5873 do_stat:
5874 if (!is_error(ret)) {
5875 struct target_stat *target_st;
5877 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5878 goto efault;
5879 memset(target_st, 0, sizeof(*target_st));
5880 __put_user(st.st_dev, &target_st->st_dev);
5881 __put_user(st.st_ino, &target_st->st_ino);
5882 __put_user(st.st_mode, &target_st->st_mode);
5883 __put_user(st.st_uid, &target_st->st_uid);
5884 __put_user(st.st_gid, &target_st->st_gid);
5885 __put_user(st.st_nlink, &target_st->st_nlink);
5886 __put_user(st.st_rdev, &target_st->st_rdev);
5887 __put_user(st.st_size, &target_st->st_size);
5888 __put_user(st.st_blksize, &target_st->st_blksize);
5889 __put_user(st.st_blocks, &target_st->st_blocks);
5890 __put_user(st.st_atime, &target_st->target_st_atime);
5891 __put_user(st.st_mtime, &target_st->target_st_mtime);
5892 __put_user(st.st_ctime, &target_st->target_st_ctime);
5893 unlock_user_struct(target_st, arg2, 1);
5896 break;
5897 #ifdef TARGET_NR_olduname
5898 case TARGET_NR_olduname:
5899 goto unimplemented;
5900 #endif
5901 #ifdef TARGET_NR_iopl
5902 case TARGET_NR_iopl:
5903 goto unimplemented;
5904 #endif
5905 case TARGET_NR_vhangup:
5906 ret = get_errno(vhangup());
5907 break;
5908 #ifdef TARGET_NR_idle
5909 case TARGET_NR_idle:
5910 goto unimplemented;
5911 #endif
5912 #ifdef TARGET_NR_syscall
5913 case TARGET_NR_syscall:
5914 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5915 break;
5916 #endif
5917 case TARGET_NR_wait4:
5919 int status;
5920 abi_long status_ptr = arg2;
5921 struct rusage rusage, *rusage_ptr;
5922 abi_ulong target_rusage = arg4;
5923 if (target_rusage)
5924 rusage_ptr = &rusage;
5925 else
5926 rusage_ptr = NULL;
5927 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5928 if (!is_error(ret)) {
5929 if (status_ptr) {
5930 status = host_to_target_waitstatus(status);
5931 if (put_user_s32(status, status_ptr))
5932 goto efault;
5934 if (target_rusage)
5935 host_to_target_rusage(target_rusage, &rusage);
5938 break;
5939 #ifdef TARGET_NR_swapoff
5940 case TARGET_NR_swapoff:
5941 if (!(p = lock_user_string(arg1)))
5942 goto efault;
5943 ret = get_errno(swapoff(p));
5944 unlock_user(p, arg1, 0);
5945 break;
5946 #endif
5947 case TARGET_NR_sysinfo:
5949 struct target_sysinfo *target_value;
5950 struct sysinfo value;
5951 ret = get_errno(sysinfo(&value));
5952 if (!is_error(ret) && arg1)
5954 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5955 goto efault;
5956 __put_user(value.uptime, &target_value->uptime);
5957 __put_user(value.loads[0], &target_value->loads[0]);
5958 __put_user(value.loads[1], &target_value->loads[1]);
5959 __put_user(value.loads[2], &target_value->loads[2]);
5960 __put_user(value.totalram, &target_value->totalram);
5961 __put_user(value.freeram, &target_value->freeram);
5962 __put_user(value.sharedram, &target_value->sharedram);
5963 __put_user(value.bufferram, &target_value->bufferram);
5964 __put_user(value.totalswap, &target_value->totalswap);
5965 __put_user(value.freeswap, &target_value->freeswap);
5966 __put_user(value.procs, &target_value->procs);
5967 __put_user(value.totalhigh, &target_value->totalhigh);
5968 __put_user(value.freehigh, &target_value->freehigh);
5969 __put_user(value.mem_unit, &target_value->mem_unit);
5970 unlock_user_struct(target_value, arg1, 1);
5973 break;
5974 #ifdef TARGET_NR_ipc
5975 case TARGET_NR_ipc:
5976 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5977 break;
5978 #endif
5979 #ifdef TARGET_NR_semget
5980 case TARGET_NR_semget:
5981 ret = get_errno(semget(arg1, arg2, arg3));
5982 break;
5983 #endif
5984 #ifdef TARGET_NR_semop
5985 case TARGET_NR_semop:
5986 ret = get_errno(do_semop(arg1, arg2, arg3));
5987 break;
5988 #endif
5989 #ifdef TARGET_NR_semctl
5990 case TARGET_NR_semctl:
5991 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5992 break;
5993 #endif
5994 #ifdef TARGET_NR_msgctl
5995 case TARGET_NR_msgctl:
5996 ret = do_msgctl(arg1, arg2, arg3);
5997 break;
5998 #endif
5999 #ifdef TARGET_NR_msgget
6000 case TARGET_NR_msgget:
6001 ret = get_errno(msgget(arg1, arg2));
6002 break;
6003 #endif
6004 #ifdef TARGET_NR_msgrcv
6005 case TARGET_NR_msgrcv:
6006 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6007 break;
6008 #endif
6009 #ifdef TARGET_NR_msgsnd
6010 case TARGET_NR_msgsnd:
6011 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6012 break;
6013 #endif
6014 #ifdef TARGET_NR_shmget
6015 case TARGET_NR_shmget:
6016 ret = get_errno(shmget(arg1, arg2, arg3));
6017 break;
6018 #endif
6019 #ifdef TARGET_NR_shmctl
6020 case TARGET_NR_shmctl:
6021 ret = do_shmctl(arg1, arg2, arg3);
6022 break;
6023 #endif
6024 #ifdef TARGET_NR_shmat
6025 case TARGET_NR_shmat:
6026 ret = do_shmat(arg1, arg2, arg3);
6027 break;
6028 #endif
6029 #ifdef TARGET_NR_shmdt
6030 case TARGET_NR_shmdt:
6031 ret = do_shmdt(arg1);
6032 break;
6033 #endif
6034 case TARGET_NR_fsync:
6035 ret = get_errno(fsync(arg1));
6036 break;
6037 case TARGET_NR_clone:
6038 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6039 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6040 #elif defined(TARGET_CRIS)
6041 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6042 #else
6043 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6044 #endif
6045 break;
6046 #ifdef __NR_exit_group
6047 /* new thread calls */
6048 case TARGET_NR_exit_group:
6049 #ifdef TARGET_GPROF
6050 _mcleanup();
6051 #endif
6052 gdb_exit(cpu_env, arg1);
6053 ret = get_errno(exit_group(arg1));
6054 break;
6055 #endif
6056 case TARGET_NR_setdomainname:
6057 if (!(p = lock_user_string(arg1)))
6058 goto efault;
6059 ret = get_errno(setdomainname(p, arg2));
6060 unlock_user(p, arg1, 0);
6061 break;
6062 case TARGET_NR_uname:
6063 /* no need to transcode because we use the linux syscall */
6065 struct new_utsname * buf;
6067 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6068 goto efault;
6069 ret = get_errno(sys_uname(buf));
6070 if (!is_error(ret)) {
6071 /* Overrite the native machine name with whatever is being
6072 emulated. */
6073 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6074 /* Allow the user to override the reported release. */
6075 if (qemu_uname_release && *qemu_uname_release)
6076 strcpy (buf->release, qemu_uname_release);
6078 unlock_user_struct(buf, arg1, 1);
6080 break;
6081 #ifdef TARGET_I386
6082 case TARGET_NR_modify_ldt:
6083 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6084 break;
6085 #if !defined(TARGET_X86_64)
6086 case TARGET_NR_vm86old:
6087 goto unimplemented;
6088 case TARGET_NR_vm86:
6089 ret = do_vm86(cpu_env, arg1, arg2);
6090 break;
6091 #endif
6092 #endif
6093 case TARGET_NR_adjtimex:
6094 goto unimplemented;
6095 #ifdef TARGET_NR_create_module
6096 case TARGET_NR_create_module:
6097 #endif
6098 case TARGET_NR_init_module:
6099 case TARGET_NR_delete_module:
6100 #ifdef TARGET_NR_get_kernel_syms
6101 case TARGET_NR_get_kernel_syms:
6102 #endif
6103 goto unimplemented;
6104 case TARGET_NR_quotactl:
6105 goto unimplemented;
6106 case TARGET_NR_getpgid:
6107 ret = get_errno(getpgid(arg1));
6108 break;
6109 case TARGET_NR_fchdir:
6110 ret = get_errno(fchdir(arg1));
6111 break;
6112 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6113 case TARGET_NR_bdflush:
6114 goto unimplemented;
6115 #endif
6116 #ifdef TARGET_NR_sysfs
6117 case TARGET_NR_sysfs:
6118 goto unimplemented;
6119 #endif
6120 case TARGET_NR_personality:
6121 ret = get_errno(personality(arg1));
6122 break;
6123 #ifdef TARGET_NR_afs_syscall
6124 case TARGET_NR_afs_syscall:
6125 goto unimplemented;
6126 #endif
6127 #ifdef TARGET_NR__llseek /* Not on alpha */
6128 case TARGET_NR__llseek:
6130 int64_t res;
6131 #if !defined(__NR_llseek)
6132 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6133 if (res == -1) {
6134 ret = get_errno(res);
6135 } else {
6136 ret = 0;
6138 #else
6139 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6140 #endif
6141 if ((ret == 0) && put_user_s64(res, arg4)) {
6142 goto efault;
6145 break;
6146 #endif
6147 case TARGET_NR_getdents:
6148 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6150 struct target_dirent *target_dirp;
6151 struct linux_dirent *dirp;
6152 abi_long count = arg3;
6154 dirp = malloc(count);
6155 if (!dirp) {
6156 ret = -TARGET_ENOMEM;
6157 goto fail;
6160 ret = get_errno(sys_getdents(arg1, dirp, count));
6161 if (!is_error(ret)) {
6162 struct linux_dirent *de;
6163 struct target_dirent *tde;
6164 int len = ret;
6165 int reclen, treclen;
6166 int count1, tnamelen;
6168 count1 = 0;
6169 de = dirp;
6170 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6171 goto efault;
6172 tde = target_dirp;
6173 while (len > 0) {
6174 reclen = de->d_reclen;
6175 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6176 tde->d_reclen = tswap16(treclen);
6177 tde->d_ino = tswapl(de->d_ino);
6178 tde->d_off = tswapl(de->d_off);
6179 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6180 if (tnamelen > 256)
6181 tnamelen = 256;
6182 /* XXX: may not be correct */
6183 pstrcpy(tde->d_name, tnamelen, de->d_name);
6184 de = (struct linux_dirent *)((char *)de + reclen);
6185 len -= reclen;
6186 tde = (struct target_dirent *)((char *)tde + treclen);
6187 count1 += treclen;
6189 ret = count1;
6190 unlock_user(target_dirp, arg2, ret);
6192 free(dirp);
6194 #else
6196 struct linux_dirent *dirp;
6197 abi_long count = arg3;
6199 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6200 goto efault;
6201 ret = get_errno(sys_getdents(arg1, dirp, count));
6202 if (!is_error(ret)) {
6203 struct linux_dirent *de;
6204 int len = ret;
6205 int reclen;
6206 de = dirp;
6207 while (len > 0) {
6208 reclen = de->d_reclen;
6209 if (reclen > len)
6210 break;
6211 de->d_reclen = tswap16(reclen);
6212 tswapls(&de->d_ino);
6213 tswapls(&de->d_off);
6214 de = (struct linux_dirent *)((char *)de + reclen);
6215 len -= reclen;
6218 unlock_user(dirp, arg2, ret);
6220 #endif
6221 break;
6222 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6223 case TARGET_NR_getdents64:
6225 struct linux_dirent64 *dirp;
6226 abi_long count = arg3;
6227 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6228 goto efault;
6229 ret = get_errno(sys_getdents64(arg1, dirp, count));
6230 if (!is_error(ret)) {
6231 struct linux_dirent64 *de;
6232 int len = ret;
6233 int reclen;
6234 de = dirp;
6235 while (len > 0) {
6236 reclen = de->d_reclen;
6237 if (reclen > len)
6238 break;
6239 de->d_reclen = tswap16(reclen);
6240 tswap64s((uint64_t *)&de->d_ino);
6241 tswap64s((uint64_t *)&de->d_off);
6242 de = (struct linux_dirent64 *)((char *)de + reclen);
6243 len -= reclen;
6246 unlock_user(dirp, arg2, ret);
6248 break;
6249 #endif /* TARGET_NR_getdents64 */
6250 #ifdef TARGET_NR__newselect
6251 case TARGET_NR__newselect:
6252 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6253 break;
6254 #endif
6255 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6256 # ifdef TARGET_NR_poll
6257 case TARGET_NR_poll:
6258 # endif
6259 # ifdef TARGET_NR_ppoll
6260 case TARGET_NR_ppoll:
6261 # endif
6263 struct target_pollfd *target_pfd;
6264 unsigned int nfds = arg2;
6265 int timeout = arg3;
6266 struct pollfd *pfd;
6267 unsigned int i;
6269 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6270 if (!target_pfd)
6271 goto efault;
6273 pfd = alloca(sizeof(struct pollfd) * nfds);
6274 for(i = 0; i < nfds; i++) {
6275 pfd[i].fd = tswap32(target_pfd[i].fd);
6276 pfd[i].events = tswap16(target_pfd[i].events);
6279 # ifdef TARGET_NR_ppoll
6280 if (num == TARGET_NR_ppoll) {
6281 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6282 target_sigset_t *target_set;
6283 sigset_t _set, *set = &_set;
6285 if (arg3) {
6286 if (target_to_host_timespec(timeout_ts, arg3)) {
6287 unlock_user(target_pfd, arg1, 0);
6288 goto efault;
6290 } else {
6291 timeout_ts = NULL;
6294 if (arg4) {
6295 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6296 if (!target_set) {
6297 unlock_user(target_pfd, arg1, 0);
6298 goto efault;
6300 target_to_host_sigset(set, target_set);
6301 } else {
6302 set = NULL;
6305 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6307 if (!is_error(ret) && arg3) {
6308 host_to_target_timespec(arg3, timeout_ts);
6310 if (arg4) {
6311 unlock_user(target_set, arg4, 0);
6313 } else
6314 # endif
6315 ret = get_errno(poll(pfd, nfds, timeout));
6317 if (!is_error(ret)) {
6318 for(i = 0; i < nfds; i++) {
6319 target_pfd[i].revents = tswap16(pfd[i].revents);
6322 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6324 break;
6325 #endif
6326 case TARGET_NR_flock:
6327 /* NOTE: the flock constant seems to be the same for every
6328 Linux platform */
6329 ret = get_errno(flock(arg1, arg2));
6330 break;
6331 case TARGET_NR_readv:
6333 int count = arg3;
6334 struct iovec *vec;
6336 vec = alloca(count * sizeof(struct iovec));
6337 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6338 goto efault;
6339 ret = get_errno(readv(arg1, vec, count));
6340 unlock_iovec(vec, arg2, count, 1);
6342 break;
6343 case TARGET_NR_writev:
6345 int count = arg3;
6346 struct iovec *vec;
6348 vec = alloca(count * sizeof(struct iovec));
6349 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6350 goto efault;
6351 ret = get_errno(writev(arg1, vec, count));
6352 unlock_iovec(vec, arg2, count, 0);
6354 break;
6355 case TARGET_NR_getsid:
6356 ret = get_errno(getsid(arg1));
6357 break;
6358 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6359 case TARGET_NR_fdatasync:
6360 ret = get_errno(fdatasync(arg1));
6361 break;
6362 #endif
6363 case TARGET_NR__sysctl:
6364 /* We don't implement this, but ENOTDIR is always a safe
6365 return value. */
6366 ret = -TARGET_ENOTDIR;
6367 break;
6368 case TARGET_NR_sched_getaffinity:
6370 unsigned int mask_size;
6371 unsigned long *mask;
6374 * sched_getaffinity needs multiples of ulong, so need to take
6375 * care of mismatches between target ulong and host ulong sizes.
6377 if (arg2 & (sizeof(abi_ulong) - 1)) {
6378 ret = -TARGET_EINVAL;
6379 break;
6381 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6383 mask = alloca(mask_size);
6384 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6386 if (!is_error(ret)) {
6387 if (arg2 > ret) {
6388 /* Zero out any extra space kernel didn't fill */
6389 unsigned long zero = arg2 - ret;
6390 p = alloca(zero);
6391 memset(p, 0, zero);
6392 if (copy_to_user(arg3 + zero, p, zero)) {
6393 goto efault;
6395 arg2 = ret;
6397 if (copy_to_user(arg3, mask, arg2)) {
6398 goto efault;
6400 ret = arg2;
6403 break;
6404 case TARGET_NR_sched_setaffinity:
6406 unsigned int mask_size;
6407 unsigned long *mask;
6410 * sched_setaffinity needs multiples of ulong, so need to take
6411 * care of mismatches between target ulong and host ulong sizes.
6413 if (arg2 & (sizeof(abi_ulong) - 1)) {
6414 ret = -TARGET_EINVAL;
6415 break;
6417 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6419 mask = alloca(mask_size);
6420 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6421 goto efault;
6423 memcpy(mask, p, arg2);
6424 unlock_user_struct(p, arg2, 0);
6426 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6428 break;
6429 case TARGET_NR_sched_setparam:
6431 struct sched_param *target_schp;
6432 struct sched_param schp;
6434 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6435 goto efault;
6436 schp.sched_priority = tswap32(target_schp->sched_priority);
6437 unlock_user_struct(target_schp, arg2, 0);
6438 ret = get_errno(sched_setparam(arg1, &schp));
6440 break;
6441 case TARGET_NR_sched_getparam:
6443 struct sched_param *target_schp;
6444 struct sched_param schp;
6445 ret = get_errno(sched_getparam(arg1, &schp));
6446 if (!is_error(ret)) {
6447 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6448 goto efault;
6449 target_schp->sched_priority = tswap32(schp.sched_priority);
6450 unlock_user_struct(target_schp, arg2, 1);
6453 break;
6454 case TARGET_NR_sched_setscheduler:
6456 struct sched_param *target_schp;
6457 struct sched_param schp;
6458 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6459 goto efault;
6460 schp.sched_priority = tswap32(target_schp->sched_priority);
6461 unlock_user_struct(target_schp, arg3, 0);
6462 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6464 break;
6465 case TARGET_NR_sched_getscheduler:
6466 ret = get_errno(sched_getscheduler(arg1));
6467 break;
6468 case TARGET_NR_sched_yield:
6469 ret = get_errno(sched_yield());
6470 break;
6471 case TARGET_NR_sched_get_priority_max:
6472 ret = get_errno(sched_get_priority_max(arg1));
6473 break;
6474 case TARGET_NR_sched_get_priority_min:
6475 ret = get_errno(sched_get_priority_min(arg1));
6476 break;
6477 case TARGET_NR_sched_rr_get_interval:
6479 struct timespec ts;
6480 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6481 if (!is_error(ret)) {
6482 host_to_target_timespec(arg2, &ts);
6485 break;
6486 case TARGET_NR_nanosleep:
6488 struct timespec req, rem;
6489 target_to_host_timespec(&req, arg1);
6490 ret = get_errno(nanosleep(&req, &rem));
6491 if (is_error(ret) && arg2) {
6492 host_to_target_timespec(arg2, &rem);
6495 break;
6496 #ifdef TARGET_NR_query_module
6497 case TARGET_NR_query_module:
6498 goto unimplemented;
6499 #endif
6500 #ifdef TARGET_NR_nfsservctl
6501 case TARGET_NR_nfsservctl:
6502 goto unimplemented;
6503 #endif
6504 case TARGET_NR_prctl:
6505 switch (arg1)
6507 case PR_GET_PDEATHSIG:
6509 int deathsig;
6510 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6511 if (!is_error(ret) && arg2
6512 && put_user_ual(deathsig, arg2))
6513 goto efault;
6515 break;
6516 default:
6517 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6518 break;
6520 break;
6521 #ifdef TARGET_NR_arch_prctl
6522 case TARGET_NR_arch_prctl:
6523 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6524 ret = do_arch_prctl(cpu_env, arg1, arg2);
6525 break;
6526 #else
6527 goto unimplemented;
6528 #endif
6529 #endif
6530 #ifdef TARGET_NR_pread
6531 case TARGET_NR_pread:
6532 #ifdef TARGET_ARM
6533 if (((CPUARMState *)cpu_env)->eabi)
6534 arg4 = arg5;
6535 #endif
6536 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6537 goto efault;
6538 ret = get_errno(pread(arg1, p, arg3, arg4));
6539 unlock_user(p, arg2, ret);
6540 break;
6541 case TARGET_NR_pwrite:
6542 #ifdef TARGET_ARM
6543 if (((CPUARMState *)cpu_env)->eabi)
6544 arg4 = arg5;
6545 #endif
6546 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6547 goto efault;
6548 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6549 unlock_user(p, arg2, 0);
6550 break;
6551 #endif
6552 #ifdef TARGET_NR_pread64
6553 case TARGET_NR_pread64:
6554 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6555 goto efault;
6556 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6557 unlock_user(p, arg2, ret);
6558 break;
6559 case TARGET_NR_pwrite64:
6560 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6561 goto efault;
6562 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6563 unlock_user(p, arg2, 0);
6564 break;
6565 #endif
6566 case TARGET_NR_getcwd:
6567 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6568 goto efault;
6569 ret = get_errno(sys_getcwd1(p, arg2));
6570 unlock_user(p, arg1, ret);
6571 break;
6572 case TARGET_NR_capget:
6573 goto unimplemented;
6574 case TARGET_NR_capset:
6575 goto unimplemented;
6576 case TARGET_NR_sigaltstack:
6577 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6578 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6579 defined(TARGET_M68K)
6580 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6581 break;
6582 #else
6583 goto unimplemented;
6584 #endif
6585 case TARGET_NR_sendfile:
6586 goto unimplemented;
6587 #ifdef TARGET_NR_getpmsg
6588 case TARGET_NR_getpmsg:
6589 goto unimplemented;
6590 #endif
6591 #ifdef TARGET_NR_putpmsg
6592 case TARGET_NR_putpmsg:
6593 goto unimplemented;
6594 #endif
6595 #ifdef TARGET_NR_vfork
6596 case TARGET_NR_vfork:
6597 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6598 0, 0, 0, 0));
6599 break;
6600 #endif
6601 #ifdef TARGET_NR_ugetrlimit
6602 case TARGET_NR_ugetrlimit:
6604 struct rlimit rlim;
6605 ret = get_errno(getrlimit(arg1, &rlim));
6606 if (!is_error(ret)) {
6607 struct target_rlimit *target_rlim;
6608 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6609 goto efault;
6610 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6611 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6612 unlock_user_struct(target_rlim, arg2, 1);
6614 break;
6616 #endif
6617 #ifdef TARGET_NR_truncate64
6618 case TARGET_NR_truncate64:
6619 if (!(p = lock_user_string(arg1)))
6620 goto efault;
6621 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6622 unlock_user(p, arg1, 0);
6623 break;
6624 #endif
6625 #ifdef TARGET_NR_ftruncate64
6626 case TARGET_NR_ftruncate64:
6627 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6628 break;
6629 #endif
6630 #ifdef TARGET_NR_stat64
6631 case TARGET_NR_stat64:
6632 if (!(p = lock_user_string(arg1)))
6633 goto efault;
6634 ret = get_errno(stat(path(p), &st));
6635 unlock_user(p, arg1, 0);
6636 if (!is_error(ret))
6637 ret = host_to_target_stat64(cpu_env, arg2, &st);
6638 break;
6639 #endif
6640 #ifdef TARGET_NR_lstat64
6641 case TARGET_NR_lstat64:
6642 if (!(p = lock_user_string(arg1)))
6643 goto efault;
6644 ret = get_errno(lstat(path(p), &st));
6645 unlock_user(p, arg1, 0);
6646 if (!is_error(ret))
6647 ret = host_to_target_stat64(cpu_env, arg2, &st);
6648 break;
6649 #endif
6650 #ifdef TARGET_NR_fstat64
6651 case TARGET_NR_fstat64:
6652 ret = get_errno(fstat(arg1, &st));
6653 if (!is_error(ret))
6654 ret = host_to_target_stat64(cpu_env, arg2, &st);
6655 break;
6656 #endif
6657 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6658 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6659 #ifdef TARGET_NR_fstatat64
6660 case TARGET_NR_fstatat64:
6661 #endif
6662 #ifdef TARGET_NR_newfstatat
6663 case TARGET_NR_newfstatat:
6664 #endif
6665 if (!(p = lock_user_string(arg2)))
6666 goto efault;
6667 #ifdef __NR_fstatat64
6668 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6669 #else
6670 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6671 #endif
6672 if (!is_error(ret))
6673 ret = host_to_target_stat64(cpu_env, arg3, &st);
6674 break;
6675 #endif
6676 #ifdef USE_UID16
6677 case TARGET_NR_lchown:
6678 if (!(p = lock_user_string(arg1)))
6679 goto efault;
6680 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6681 unlock_user(p, arg1, 0);
6682 break;
6683 case TARGET_NR_getuid:
6684 ret = get_errno(high2lowuid(getuid()));
6685 break;
6686 case TARGET_NR_getgid:
6687 ret = get_errno(high2lowgid(getgid()));
6688 break;
6689 case TARGET_NR_geteuid:
6690 ret = get_errno(high2lowuid(geteuid()));
6691 break;
6692 case TARGET_NR_getegid:
6693 ret = get_errno(high2lowgid(getegid()));
6694 break;
6695 case TARGET_NR_setreuid:
6696 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6697 break;
6698 case TARGET_NR_setregid:
6699 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6700 break;
6701 case TARGET_NR_getgroups:
6703 int gidsetsize = arg1;
6704 uint16_t *target_grouplist;
6705 gid_t *grouplist;
6706 int i;
6708 grouplist = alloca(gidsetsize * sizeof(gid_t));
6709 ret = get_errno(getgroups(gidsetsize, grouplist));
6710 if (gidsetsize == 0)
6711 break;
6712 if (!is_error(ret)) {
6713 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6714 if (!target_grouplist)
6715 goto efault;
6716 for(i = 0;i < ret; i++)
6717 target_grouplist[i] = tswap16(grouplist[i]);
6718 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6721 break;
6722 case TARGET_NR_setgroups:
6724 int gidsetsize = arg1;
6725 uint16_t *target_grouplist;
6726 gid_t *grouplist;
6727 int i;
6729 grouplist = alloca(gidsetsize * sizeof(gid_t));
6730 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6731 if (!target_grouplist) {
6732 ret = -TARGET_EFAULT;
6733 goto fail;
6735 for(i = 0;i < gidsetsize; i++)
6736 grouplist[i] = tswap16(target_grouplist[i]);
6737 unlock_user(target_grouplist, arg2, 0);
6738 ret = get_errno(setgroups(gidsetsize, grouplist));
6740 break;
6741 case TARGET_NR_fchown:
6742 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6743 break;
6744 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6745 case TARGET_NR_fchownat:
6746 if (!(p = lock_user_string(arg2)))
6747 goto efault;
6748 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6749 unlock_user(p, arg2, 0);
6750 break;
6751 #endif
6752 #ifdef TARGET_NR_setresuid
6753 case TARGET_NR_setresuid:
6754 ret = get_errno(setresuid(low2highuid(arg1),
6755 low2highuid(arg2),
6756 low2highuid(arg3)));
6757 break;
6758 #endif
6759 #ifdef TARGET_NR_getresuid
6760 case TARGET_NR_getresuid:
6762 uid_t ruid, euid, suid;
6763 ret = get_errno(getresuid(&ruid, &euid, &suid));
6764 if (!is_error(ret)) {
6765 if (put_user_u16(high2lowuid(ruid), arg1)
6766 || put_user_u16(high2lowuid(euid), arg2)
6767 || put_user_u16(high2lowuid(suid), arg3))
6768 goto efault;
6771 break;
6772 #endif
6773 #ifdef TARGET_NR_getresgid
6774 case TARGET_NR_setresgid:
6775 ret = get_errno(setresgid(low2highgid(arg1),
6776 low2highgid(arg2),
6777 low2highgid(arg3)));
6778 break;
6779 #endif
6780 #ifdef TARGET_NR_getresgid
6781 case TARGET_NR_getresgid:
6783 gid_t rgid, egid, sgid;
6784 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6785 if (!is_error(ret)) {
6786 if (put_user_u16(high2lowgid(rgid), arg1)
6787 || put_user_u16(high2lowgid(egid), arg2)
6788 || put_user_u16(high2lowgid(sgid), arg3))
6789 goto efault;
6792 break;
6793 #endif
6794 case TARGET_NR_chown:
6795 if (!(p = lock_user_string(arg1)))
6796 goto efault;
6797 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6798 unlock_user(p, arg1, 0);
6799 break;
6800 case TARGET_NR_setuid:
6801 ret = get_errno(setuid(low2highuid(arg1)));
6802 break;
6803 case TARGET_NR_setgid:
6804 ret = get_errno(setgid(low2highgid(arg1)));
6805 break;
6806 case TARGET_NR_setfsuid:
6807 ret = get_errno(setfsuid(arg1));
6808 break;
6809 case TARGET_NR_setfsgid:
6810 ret = get_errno(setfsgid(arg1));
6811 break;
6812 #endif /* USE_UID16 */
6814 #ifdef TARGET_NR_lchown32
6815 case TARGET_NR_lchown32:
6816 if (!(p = lock_user_string(arg1)))
6817 goto efault;
6818 ret = get_errno(lchown(p, arg2, arg3));
6819 unlock_user(p, arg1, 0);
6820 break;
6821 #endif
6822 #ifdef TARGET_NR_getuid32
6823 case TARGET_NR_getuid32:
6824 ret = get_errno(getuid());
6825 break;
6826 #endif
6828 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6829 /* Alpha specific */
6830 case TARGET_NR_getxuid:
6832 uid_t euid;
6833 euid=geteuid();
6834 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6836 ret = get_errno(getuid());
6837 break;
6838 #endif
6839 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6840 /* Alpha specific */
6841 case TARGET_NR_getxgid:
6843 uid_t egid;
6844 egid=getegid();
6845 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6847 ret = get_errno(getgid());
6848 break;
6849 #endif
6850 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6851 /* Alpha specific */
6852 case TARGET_NR_osf_getsysinfo:
6853 ret = -TARGET_EOPNOTSUPP;
6854 switch (arg1) {
6855 case TARGET_GSI_IEEE_FP_CONTROL:
6857 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
6859 /* Copied from linux ieee_fpcr_to_swcr. */
6860 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
6861 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
6862 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
6863 | SWCR_TRAP_ENABLE_DZE
6864 | SWCR_TRAP_ENABLE_OVF);
6865 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
6866 | SWCR_TRAP_ENABLE_INE);
6867 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
6868 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
6870 if (put_user_u64 (swcr, arg2))
6871 goto efault;
6872 ret = 0;
6874 break;
6876 /* case GSI_IEEE_STATE_AT_SIGNAL:
6877 -- Not implemented in linux kernel.
6878 case GSI_UACPROC:
6879 -- Retrieves current unaligned access state; not much used.
6880 case GSI_PROC_TYPE:
6881 -- Retrieves implver information; surely not used.
6882 case GSI_GET_HWRPB:
6883 -- Grabs a copy of the HWRPB; surely not used.
6886 break;
6887 #endif
6888 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
6889 /* Alpha specific */
6890 case TARGET_NR_osf_setsysinfo:
6891 ret = -TARGET_EOPNOTSUPP;
6892 switch (arg1) {
6893 case TARGET_SSI_IEEE_FP_CONTROL:
6894 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
6896 uint64_t swcr, fpcr, orig_fpcr;
6898 if (get_user_u64 (swcr, arg2))
6899 goto efault;
6900 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
6901 fpcr = orig_fpcr & FPCR_DYN_MASK;
6903 /* Copied from linux ieee_swcr_to_fpcr. */
6904 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
6905 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
6906 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
6907 | SWCR_TRAP_ENABLE_DZE
6908 | SWCR_TRAP_ENABLE_OVF)) << 48;
6909 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
6910 | SWCR_TRAP_ENABLE_INE)) << 57;
6911 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
6912 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
6914 cpu_alpha_store_fpcr (cpu_env, fpcr);
6915 ret = 0;
6917 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
6918 /* Old exceptions are not signaled. */
6919 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
6921 /* If any exceptions set by this call, and are unmasked,
6922 send a signal. */
6923 /* ??? FIXME */
6926 break;
6928 /* case SSI_NVPAIRS:
6929 -- Used with SSIN_UACPROC to enable unaligned accesses.
6930 case SSI_IEEE_STATE_AT_SIGNAL:
6931 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
6932 -- Not implemented in linux kernel
6935 break;
6936 #endif
6937 #ifdef TARGET_NR_osf_sigprocmask
6938 /* Alpha specific. */
6939 case TARGET_NR_osf_sigprocmask:
6941 abi_ulong mask;
6942 int how = arg1;
6943 sigset_t set, oldset;
6945 switch(arg1) {
6946 case TARGET_SIG_BLOCK:
6947 how = SIG_BLOCK;
6948 break;
6949 case TARGET_SIG_UNBLOCK:
6950 how = SIG_UNBLOCK;
6951 break;
6952 case TARGET_SIG_SETMASK:
6953 how = SIG_SETMASK;
6954 break;
6955 default:
6956 ret = -TARGET_EINVAL;
6957 goto fail;
6959 mask = arg2;
6960 target_to_host_old_sigset(&set, &mask);
6961 sigprocmask(arg1, &set, &oldset);
6962 host_to_target_old_sigset(&mask, &oldset);
6963 ret = mask;
6965 break;
6966 #endif
6968 #ifdef TARGET_NR_getgid32
6969 case TARGET_NR_getgid32:
6970 ret = get_errno(getgid());
6971 break;
6972 #endif
6973 #ifdef TARGET_NR_geteuid32
6974 case TARGET_NR_geteuid32:
6975 ret = get_errno(geteuid());
6976 break;
6977 #endif
6978 #ifdef TARGET_NR_getegid32
6979 case TARGET_NR_getegid32:
6980 ret = get_errno(getegid());
6981 break;
6982 #endif
6983 #ifdef TARGET_NR_setreuid32
6984 case TARGET_NR_setreuid32:
6985 ret = get_errno(setreuid(arg1, arg2));
6986 break;
6987 #endif
6988 #ifdef TARGET_NR_setregid32
6989 case TARGET_NR_setregid32:
6990 ret = get_errno(setregid(arg1, arg2));
6991 break;
6992 #endif
6993 #ifdef TARGET_NR_getgroups32
6994 case TARGET_NR_getgroups32:
6996 int gidsetsize = arg1;
6997 uint32_t *target_grouplist;
6998 gid_t *grouplist;
6999 int i;
7001 grouplist = alloca(gidsetsize * sizeof(gid_t));
7002 ret = get_errno(getgroups(gidsetsize, grouplist));
7003 if (gidsetsize == 0)
7004 break;
7005 if (!is_error(ret)) {
7006 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7007 if (!target_grouplist) {
7008 ret = -TARGET_EFAULT;
7009 goto fail;
7011 for(i = 0;i < ret; i++)
7012 target_grouplist[i] = tswap32(grouplist[i]);
7013 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7016 break;
7017 #endif
7018 #ifdef TARGET_NR_setgroups32
7019 case TARGET_NR_setgroups32:
7021 int gidsetsize = arg1;
7022 uint32_t *target_grouplist;
7023 gid_t *grouplist;
7024 int i;
7026 grouplist = alloca(gidsetsize * sizeof(gid_t));
7027 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7028 if (!target_grouplist) {
7029 ret = -TARGET_EFAULT;
7030 goto fail;
7032 for(i = 0;i < gidsetsize; i++)
7033 grouplist[i] = tswap32(target_grouplist[i]);
7034 unlock_user(target_grouplist, arg2, 0);
7035 ret = get_errno(setgroups(gidsetsize, grouplist));
7037 break;
7038 #endif
7039 #ifdef TARGET_NR_fchown32
7040 case TARGET_NR_fchown32:
7041 ret = get_errno(fchown(arg1, arg2, arg3));
7042 break;
7043 #endif
7044 #ifdef TARGET_NR_setresuid32
7045 case TARGET_NR_setresuid32:
7046 ret = get_errno(setresuid(arg1, arg2, arg3));
7047 break;
7048 #endif
7049 #ifdef TARGET_NR_getresuid32
7050 case TARGET_NR_getresuid32:
7052 uid_t ruid, euid, suid;
7053 ret = get_errno(getresuid(&ruid, &euid, &suid));
7054 if (!is_error(ret)) {
7055 if (put_user_u32(ruid, arg1)
7056 || put_user_u32(euid, arg2)
7057 || put_user_u32(suid, arg3))
7058 goto efault;
7061 break;
7062 #endif
7063 #ifdef TARGET_NR_setresgid32
7064 case TARGET_NR_setresgid32:
7065 ret = get_errno(setresgid(arg1, arg2, arg3));
7066 break;
7067 #endif
7068 #ifdef TARGET_NR_getresgid32
7069 case TARGET_NR_getresgid32:
7071 gid_t rgid, egid, sgid;
7072 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7073 if (!is_error(ret)) {
7074 if (put_user_u32(rgid, arg1)
7075 || put_user_u32(egid, arg2)
7076 || put_user_u32(sgid, arg3))
7077 goto efault;
7080 break;
7081 #endif
7082 #ifdef TARGET_NR_chown32
7083 case TARGET_NR_chown32:
7084 if (!(p = lock_user_string(arg1)))
7085 goto efault;
7086 ret = get_errno(chown(p, arg2, arg3));
7087 unlock_user(p, arg1, 0);
7088 break;
7089 #endif
7090 #ifdef TARGET_NR_setuid32
7091 case TARGET_NR_setuid32:
7092 ret = get_errno(setuid(arg1));
7093 break;
7094 #endif
7095 #ifdef TARGET_NR_setgid32
7096 case TARGET_NR_setgid32:
7097 ret = get_errno(setgid(arg1));
7098 break;
7099 #endif
7100 #ifdef TARGET_NR_setfsuid32
7101 case TARGET_NR_setfsuid32:
7102 ret = get_errno(setfsuid(arg1));
7103 break;
7104 #endif
7105 #ifdef TARGET_NR_setfsgid32
7106 case TARGET_NR_setfsgid32:
7107 ret = get_errno(setfsgid(arg1));
7108 break;
7109 #endif
7111 case TARGET_NR_pivot_root:
7112 goto unimplemented;
7113 #ifdef TARGET_NR_mincore
7114 case TARGET_NR_mincore:
7116 void *a;
7117 ret = -TARGET_EFAULT;
7118 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7119 goto efault;
7120 if (!(p = lock_user_string(arg3)))
7121 goto mincore_fail;
7122 ret = get_errno(mincore(a, arg2, p));
7123 unlock_user(p, arg3, ret);
7124 mincore_fail:
7125 unlock_user(a, arg1, 0);
7127 break;
7128 #endif
7129 #ifdef TARGET_NR_arm_fadvise64_64
7130 case TARGET_NR_arm_fadvise64_64:
7133 * arm_fadvise64_64 looks like fadvise64_64 but
7134 * with different argument order
7136 abi_long temp;
7137 temp = arg3;
7138 arg3 = arg4;
7139 arg4 = temp;
7141 #endif
7142 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7143 #ifdef TARGET_NR_fadvise64_64
7144 case TARGET_NR_fadvise64_64:
7145 #endif
7146 #ifdef TARGET_NR_fadvise64
7147 case TARGET_NR_fadvise64:
7148 #endif
7149 #ifdef TARGET_S390X
7150 switch (arg4) {
7151 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7152 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7153 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7154 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7155 default: break;
7157 #endif
7158 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7159 break;
7160 #endif
7161 #ifdef TARGET_NR_madvise
7162 case TARGET_NR_madvise:
7163 /* A straight passthrough may not be safe because qemu sometimes
7164 turns private flie-backed mappings into anonymous mappings.
7165 This will break MADV_DONTNEED.
7166 This is a hint, so ignoring and returning success is ok. */
7167 ret = get_errno(0);
7168 break;
7169 #endif
7170 #if TARGET_ABI_BITS == 32
7171 case TARGET_NR_fcntl64:
7173 int cmd;
7174 struct flock64 fl;
7175 struct target_flock64 *target_fl;
7176 #ifdef TARGET_ARM
7177 struct target_eabi_flock64 *target_efl;
7178 #endif
7180 cmd = target_to_host_fcntl_cmd(arg2);
7181 if (cmd == -TARGET_EINVAL)
7182 return cmd;
7184 switch(arg2) {
7185 case TARGET_F_GETLK64:
7186 #ifdef TARGET_ARM
7187 if (((CPUARMState *)cpu_env)->eabi) {
7188 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7189 goto efault;
7190 fl.l_type = tswap16(target_efl->l_type);
7191 fl.l_whence = tswap16(target_efl->l_whence);
7192 fl.l_start = tswap64(target_efl->l_start);
7193 fl.l_len = tswap64(target_efl->l_len);
7194 fl.l_pid = tswap32(target_efl->l_pid);
7195 unlock_user_struct(target_efl, arg3, 0);
7196 } else
7197 #endif
7199 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7200 goto efault;
7201 fl.l_type = tswap16(target_fl->l_type);
7202 fl.l_whence = tswap16(target_fl->l_whence);
7203 fl.l_start = tswap64(target_fl->l_start);
7204 fl.l_len = tswap64(target_fl->l_len);
7205 fl.l_pid = tswap32(target_fl->l_pid);
7206 unlock_user_struct(target_fl, arg3, 0);
7208 ret = get_errno(fcntl(arg1, cmd, &fl));
7209 if (ret == 0) {
7210 #ifdef TARGET_ARM
7211 if (((CPUARMState *)cpu_env)->eabi) {
7212 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7213 goto efault;
7214 target_efl->l_type = tswap16(fl.l_type);
7215 target_efl->l_whence = tswap16(fl.l_whence);
7216 target_efl->l_start = tswap64(fl.l_start);
7217 target_efl->l_len = tswap64(fl.l_len);
7218 target_efl->l_pid = tswap32(fl.l_pid);
7219 unlock_user_struct(target_efl, arg3, 1);
7220 } else
7221 #endif
7223 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7224 goto efault;
7225 target_fl->l_type = tswap16(fl.l_type);
7226 target_fl->l_whence = tswap16(fl.l_whence);
7227 target_fl->l_start = tswap64(fl.l_start);
7228 target_fl->l_len = tswap64(fl.l_len);
7229 target_fl->l_pid = tswap32(fl.l_pid);
7230 unlock_user_struct(target_fl, arg3, 1);
7233 break;
7235 case TARGET_F_SETLK64:
7236 case TARGET_F_SETLKW64:
7237 #ifdef TARGET_ARM
7238 if (((CPUARMState *)cpu_env)->eabi) {
7239 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7240 goto efault;
7241 fl.l_type = tswap16(target_efl->l_type);
7242 fl.l_whence = tswap16(target_efl->l_whence);
7243 fl.l_start = tswap64(target_efl->l_start);
7244 fl.l_len = tswap64(target_efl->l_len);
7245 fl.l_pid = tswap32(target_efl->l_pid);
7246 unlock_user_struct(target_efl, arg3, 0);
7247 } else
7248 #endif
7250 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7251 goto efault;
7252 fl.l_type = tswap16(target_fl->l_type);
7253 fl.l_whence = tswap16(target_fl->l_whence);
7254 fl.l_start = tswap64(target_fl->l_start);
7255 fl.l_len = tswap64(target_fl->l_len);
7256 fl.l_pid = tswap32(target_fl->l_pid);
7257 unlock_user_struct(target_fl, arg3, 0);
7259 ret = get_errno(fcntl(arg1, cmd, &fl));
7260 break;
7261 default:
7262 ret = do_fcntl(arg1, arg2, arg3);
7263 break;
7265 break;
7267 #endif
7268 #ifdef TARGET_NR_cacheflush
7269 case TARGET_NR_cacheflush:
7270 /* self-modifying code is handled automatically, so nothing needed */
7271 ret = 0;
7272 break;
7273 #endif
7274 #ifdef TARGET_NR_security
7275 case TARGET_NR_security:
7276 goto unimplemented;
7277 #endif
7278 #ifdef TARGET_NR_getpagesize
7279 case TARGET_NR_getpagesize:
7280 ret = TARGET_PAGE_SIZE;
7281 break;
7282 #endif
7283 case TARGET_NR_gettid:
7284 ret = get_errno(gettid());
7285 break;
7286 #ifdef TARGET_NR_readahead
7287 case TARGET_NR_readahead:
7288 #if TARGET_ABI_BITS == 32
7289 #ifdef TARGET_ARM
7290 if (((CPUARMState *)cpu_env)->eabi)
7292 arg2 = arg3;
7293 arg3 = arg4;
7294 arg4 = arg5;
7296 #endif
7297 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7298 #else
7299 ret = get_errno(readahead(arg1, arg2, arg3));
7300 #endif
7301 break;
7302 #endif
7303 #ifdef TARGET_NR_setxattr
7304 case TARGET_NR_setxattr:
7305 case TARGET_NR_lsetxattr:
7306 case TARGET_NR_fsetxattr:
7307 case TARGET_NR_getxattr:
7308 case TARGET_NR_lgetxattr:
7309 case TARGET_NR_fgetxattr:
7310 case TARGET_NR_listxattr:
7311 case TARGET_NR_llistxattr:
7312 case TARGET_NR_flistxattr:
7313 case TARGET_NR_removexattr:
7314 case TARGET_NR_lremovexattr:
7315 case TARGET_NR_fremovexattr:
7316 ret = -TARGET_EOPNOTSUPP;
7317 break;
7318 #endif
7319 #ifdef TARGET_NR_set_thread_area
7320 case TARGET_NR_set_thread_area:
7321 #if defined(TARGET_MIPS)
7322 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7323 ret = 0;
7324 break;
7325 #elif defined(TARGET_CRIS)
7326 if (arg1 & 0xff)
7327 ret = -TARGET_EINVAL;
7328 else {
7329 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7330 ret = 0;
7332 break;
7333 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7334 ret = do_set_thread_area(cpu_env, arg1);
7335 break;
7336 #else
7337 goto unimplemented_nowarn;
7338 #endif
7339 #endif
7340 #ifdef TARGET_NR_get_thread_area
7341 case TARGET_NR_get_thread_area:
7342 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7343 ret = do_get_thread_area(cpu_env, arg1);
7344 #else
7345 goto unimplemented_nowarn;
7346 #endif
7347 #endif
7348 #ifdef TARGET_NR_getdomainname
7349 case TARGET_NR_getdomainname:
7350 goto unimplemented_nowarn;
7351 #endif
7353 #ifdef TARGET_NR_clock_gettime
7354 case TARGET_NR_clock_gettime:
7356 struct timespec ts;
7357 ret = get_errno(clock_gettime(arg1, &ts));
7358 if (!is_error(ret)) {
7359 host_to_target_timespec(arg2, &ts);
7361 break;
7363 #endif
7364 #ifdef TARGET_NR_clock_getres
7365 case TARGET_NR_clock_getres:
7367 struct timespec ts;
7368 ret = get_errno(clock_getres(arg1, &ts));
7369 if (!is_error(ret)) {
7370 host_to_target_timespec(arg2, &ts);
7372 break;
7374 #endif
7375 #ifdef TARGET_NR_clock_nanosleep
7376 case TARGET_NR_clock_nanosleep:
7378 struct timespec ts;
7379 target_to_host_timespec(&ts, arg3);
7380 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7381 if (arg4)
7382 host_to_target_timespec(arg4, &ts);
7383 break;
7385 #endif
7387 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7388 case TARGET_NR_set_tid_address:
7389 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7390 break;
7391 #endif
7393 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7394 case TARGET_NR_tkill:
7395 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7396 break;
7397 #endif
7399 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7400 case TARGET_NR_tgkill:
7401 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7402 target_to_host_signal(arg3)));
7403 break;
7404 #endif
7406 #ifdef TARGET_NR_set_robust_list
7407 case TARGET_NR_set_robust_list:
7408 goto unimplemented_nowarn;
7409 #endif
7411 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7412 case TARGET_NR_utimensat:
7414 struct timespec *tsp, ts[2];
7415 if (!arg3) {
7416 tsp = NULL;
7417 } else {
7418 target_to_host_timespec(ts, arg3);
7419 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7420 tsp = ts;
7422 if (!arg2)
7423 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7424 else {
7425 if (!(p = lock_user_string(arg2))) {
7426 ret = -TARGET_EFAULT;
7427 goto fail;
7429 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7430 unlock_user(p, arg2, 0);
7433 break;
7434 #endif
7435 #if defined(CONFIG_USE_NPTL)
7436 case TARGET_NR_futex:
7437 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7438 break;
7439 #endif
7440 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7441 case TARGET_NR_inotify_init:
7442 ret = get_errno(sys_inotify_init());
7443 break;
7444 #endif
7445 #ifdef CONFIG_INOTIFY1
7446 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7447 case TARGET_NR_inotify_init1:
7448 ret = get_errno(sys_inotify_init1(arg1));
7449 break;
7450 #endif
7451 #endif
7452 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7453 case TARGET_NR_inotify_add_watch:
7454 p = lock_user_string(arg2);
7455 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7456 unlock_user(p, arg2, 0);
7457 break;
7458 #endif
7459 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7460 case TARGET_NR_inotify_rm_watch:
7461 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7462 break;
7463 #endif
7465 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7466 case TARGET_NR_mq_open:
7468 struct mq_attr posix_mq_attr;
7470 p = lock_user_string(arg1 - 1);
7471 if (arg4 != 0)
7472 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7473 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7474 unlock_user (p, arg1, 0);
7476 break;
7478 case TARGET_NR_mq_unlink:
7479 p = lock_user_string(arg1 - 1);
7480 ret = get_errno(mq_unlink(p));
7481 unlock_user (p, arg1, 0);
7482 break;
7484 case TARGET_NR_mq_timedsend:
7486 struct timespec ts;
7488 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7489 if (arg5 != 0) {
7490 target_to_host_timespec(&ts, arg5);
7491 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7492 host_to_target_timespec(arg5, &ts);
7494 else
7495 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7496 unlock_user (p, arg2, arg3);
7498 break;
7500 case TARGET_NR_mq_timedreceive:
7502 struct timespec ts;
7503 unsigned int prio;
7505 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7506 if (arg5 != 0) {
7507 target_to_host_timespec(&ts, arg5);
7508 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7509 host_to_target_timespec(arg5, &ts);
7511 else
7512 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7513 unlock_user (p, arg2, arg3);
7514 if (arg4 != 0)
7515 put_user_u32(prio, arg4);
7517 break;
7519 /* Not implemented for now... */
7520 /* case TARGET_NR_mq_notify: */
7521 /* break; */
7523 case TARGET_NR_mq_getsetattr:
7525 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7526 ret = 0;
7527 if (arg3 != 0) {
7528 ret = mq_getattr(arg1, &posix_mq_attr_out);
7529 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7531 if (arg2 != 0) {
7532 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7533 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7537 break;
7538 #endif
7540 #ifdef CONFIG_SPLICE
7541 #ifdef TARGET_NR_tee
7542 case TARGET_NR_tee:
7544 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7546 break;
7547 #endif
7548 #ifdef TARGET_NR_splice
7549 case TARGET_NR_splice:
7551 loff_t loff_in, loff_out;
7552 loff_t *ploff_in = NULL, *ploff_out = NULL;
7553 if(arg2) {
7554 get_user_u64(loff_in, arg2);
7555 ploff_in = &loff_in;
7557 if(arg4) {
7558 get_user_u64(loff_out, arg2);
7559 ploff_out = &loff_out;
7561 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7563 break;
7564 #endif
7565 #ifdef TARGET_NR_vmsplice
7566 case TARGET_NR_vmsplice:
7568 int count = arg3;
7569 struct iovec *vec;
7571 vec = alloca(count * sizeof(struct iovec));
7572 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7573 goto efault;
7574 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7575 unlock_iovec(vec, arg2, count, 0);
7577 break;
7578 #endif
7579 #endif /* CONFIG_SPLICE */
7580 #ifdef CONFIG_EVENTFD
7581 #if defined(TARGET_NR_eventfd)
7582 case TARGET_NR_eventfd:
7583 ret = get_errno(eventfd(arg1, 0));
7584 break;
7585 #endif
7586 #if defined(TARGET_NR_eventfd2)
7587 case TARGET_NR_eventfd2:
7588 ret = get_errno(eventfd(arg1, arg2));
7589 break;
7590 #endif
7591 #endif /* CONFIG_EVENTFD */
7592 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7593 case TARGET_NR_fallocate:
7594 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7595 break;
7596 #endif
7597 #if defined(CONFIG_SYNC_FILE_RANGE)
7598 #if defined(TARGET_NR_sync_file_range)
7599 case TARGET_NR_sync_file_range:
7600 #if TARGET_ABI_BITS == 32
7601 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7602 target_offset64(arg4, arg5), arg6));
7603 #else
7604 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7605 #endif
7606 break;
7607 #endif
7608 #if defined(TARGET_NR_sync_file_range2)
7609 case TARGET_NR_sync_file_range2:
7610 /* This is like sync_file_range but the arguments are reordered */
7611 #if TARGET_ABI_BITS == 32
7612 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7613 target_offset64(arg5, arg6), arg2));
7614 #else
7615 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7616 #endif
7617 break;
7618 #endif
7619 #endif
7620 #if defined(CONFIG_EPOLL)
7621 #if defined(TARGET_NR_epoll_create)
7622 case TARGET_NR_epoll_create:
7623 ret = get_errno(epoll_create(arg1));
7624 break;
7625 #endif
7626 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7627 case TARGET_NR_epoll_create1:
7628 ret = get_errno(epoll_create1(arg1));
7629 break;
7630 #endif
7631 #if defined(TARGET_NR_epoll_ctl)
7632 case TARGET_NR_epoll_ctl:
7634 struct epoll_event ep;
7635 struct epoll_event *epp = 0;
7636 if (arg4) {
7637 struct target_epoll_event *target_ep;
7638 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7639 goto efault;
7641 ep.events = tswap32(target_ep->events);
7642 /* The epoll_data_t union is just opaque data to the kernel,
7643 * so we transfer all 64 bits across and need not worry what
7644 * actual data type it is.
7646 ep.data.u64 = tswap64(target_ep->data.u64);
7647 unlock_user_struct(target_ep, arg4, 0);
7648 epp = &ep;
7650 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7651 break;
7653 #endif
7655 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7656 #define IMPLEMENT_EPOLL_PWAIT
7657 #endif
7658 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7659 #if defined(TARGET_NR_epoll_wait)
7660 case TARGET_NR_epoll_wait:
7661 #endif
7662 #if defined(IMPLEMENT_EPOLL_PWAIT)
7663 case TARGET_NR_epoll_pwait:
7664 #endif
7666 struct target_epoll_event *target_ep;
7667 struct epoll_event *ep;
7668 int epfd = arg1;
7669 int maxevents = arg3;
7670 int timeout = arg4;
7672 target_ep = lock_user(VERIFY_WRITE, arg2,
7673 maxevents * sizeof(struct target_epoll_event), 1);
7674 if (!target_ep) {
7675 goto efault;
7678 ep = alloca(maxevents * sizeof(struct epoll_event));
7680 switch (num) {
7681 #if defined(IMPLEMENT_EPOLL_PWAIT)
7682 case TARGET_NR_epoll_pwait:
7684 target_sigset_t *target_set;
7685 sigset_t _set, *set = &_set;
7687 if (arg5) {
7688 target_set = lock_user(VERIFY_READ, arg5,
7689 sizeof(target_sigset_t), 1);
7690 if (!target_set) {
7691 unlock_user(target_ep, arg2, 0);
7692 goto efault;
7694 target_to_host_sigset(set, target_set);
7695 unlock_user(target_set, arg5, 0);
7696 } else {
7697 set = NULL;
7700 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
7701 break;
7703 #endif
7704 #if defined(TARGET_NR_epoll_wait)
7705 case TARGET_NR_epoll_wait:
7706 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
7707 break;
7708 #endif
7709 default:
7710 ret = -TARGET_ENOSYS;
7712 if (!is_error(ret)) {
7713 int i;
7714 for (i = 0; i < ret; i++) {
7715 target_ep[i].events = tswap32(ep[i].events);
7716 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
7719 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
7720 break;
7722 #endif
7723 #endif
7724 default:
7725 unimplemented:
7726 gemu_log("qemu: Unsupported syscall: %d\n", num);
7727 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7728 unimplemented_nowarn:
7729 #endif
7730 ret = -TARGET_ENOSYS;
7731 break;
7733 fail:
7734 #ifdef DEBUG
7735 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
7736 #endif
7737 if(do_strace)
7738 print_syscall_ret(num, ret);
7739 return ret;
7740 efault:
7741 ret = -TARGET_EFAULT;
7742 goto fail;