4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
88 #include <linux/mtio.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
202 #define __NR__llseek __NR_lseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
218 _syscall2(int, sys_getpriority
, int, which
, int, who
);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
221 loff_t
*, res
, uint
, wh
);
223 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
224 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group
,int,error_code
)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address
,int *,tidptr
)
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
240 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
245 unsigned long *, user_mask_ptr
);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
250 static bitmask_transtbl fcntl_flags_tbl
[] = {
251 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
252 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
253 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
254 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
255 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
256 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
257 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
258 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
259 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
260 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
261 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
262 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
263 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
270 #define COPY_UTSNAME_FIELD(dest, src) \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
277 static int sys_uname(struct new_utsname
*buf
)
279 struct utsname uts_buf
;
281 if (uname(&uts_buf
) < 0)
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 memset(buf
, 0, sizeof(*buf
));
291 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
292 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
293 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
294 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
295 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
297 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf
, size_t size
)
306 if (getcwd(buf
, size
) == NULL
) {
307 /* getcwd() sets errno */
310 return strlen(buf
)+1;
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
322 return (faccessat(dirfd
, pathname
, mode
, 0));
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
328 return (fchmodat(dirfd
, pathname
, mode
, 0));
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
333 gid_t group
, int flags
)
335 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
342 return (fstatat(dirfd
, pathname
, buf
, flags
));
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
349 return (fstatat(dirfd
, pathname
, buf
, flags
));
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd
, const char *pathname
,
354 const struct timeval times
[2])
356 return (futimesat(dirfd
, pathname
, times
));
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd
, const char *oldpath
,
361 int newdirfd
, const char *newpath
, int flags
)
363 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
369 return (mkdirat(dirfd
, pathname
, mode
));
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
376 return (mknodat(dirfd
, pathname
, mode
, dev
));
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
383 * open(2) has extra parameter 'mode' when called with
386 if ((flags
& O_CREAT
) != 0) {
391 * Get the 'mode' parameter and translate it to
395 mode
= va_arg(ap
, mode_t
);
396 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
399 return (openat(dirfd
, pathname
, flags
, mode
));
401 return (openat(dirfd
, pathname
, flags
));
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
407 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd
, const char *oldpath
,
412 int newdirfd
, const char *newpath
)
414 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
420 return (symlinkat(oldpath
, newdirfd
, newpath
));
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
426 return (unlinkat(dirfd
, pathname
, flags
));
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
442 uid_t
,owner
,gid_t
,group
,int,flags
)
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
447 struct stat
*,buf
,int,flags
)
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
451 const struct timeval
*,times
)
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
456 struct stat
*,buf
,int,flags
)
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
460 int,newdirfd
,const char *,newpath
,int,flags
)
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
467 mode_t
,mode
,dev_t
,dev
)
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
474 char *,buf
,size_t,bufsize
)
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
478 int,newdirfd
,const char *,newpath
)
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
482 int,newdirfd
,const char *,newpath
)
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd
, const char *pathname
,
492 const struct timespec times
[2], int flags
)
494 if (pathname
== NULL
)
495 return futimens(dirfd
, times
);
497 return utimensat(dirfd
, pathname
, times
, flags
);
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
502 const struct timespec
*,tsp
,int,flags
)
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
518 return (inotify_add_watch(fd
, pathname
, mask
));
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
524 return (inotify_rm_watch(fd
, wd
));
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags
)
531 return (inotify_init1(flags
));
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
545 # define __NR_ppoll -1
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
549 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
553 extern int personality(int);
554 extern int flock(int, int);
555 extern int setfsuid(int);
556 extern int setfsgid(int);
557 extern int setgroups(int, gid_t
*);
559 #define ERRNO_TABLE_SIZE 1200
561 /* target_to_host_errno_table[] is initialized from
562 * host_to_target_errno_table[] in syscall_init(). */
563 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
567 * This list is the union of errno values overridden in asm-<arch>/errno.h
568 * minus the errnos that are not actually generic to all archs.
570 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
571 [EIDRM
] = TARGET_EIDRM
,
572 [ECHRNG
] = TARGET_ECHRNG
,
573 [EL2NSYNC
] = TARGET_EL2NSYNC
,
574 [EL3HLT
] = TARGET_EL3HLT
,
575 [EL3RST
] = TARGET_EL3RST
,
576 [ELNRNG
] = TARGET_ELNRNG
,
577 [EUNATCH
] = TARGET_EUNATCH
,
578 [ENOCSI
] = TARGET_ENOCSI
,
579 [EL2HLT
] = TARGET_EL2HLT
,
580 [EDEADLK
] = TARGET_EDEADLK
,
581 [ENOLCK
] = TARGET_ENOLCK
,
582 [EBADE
] = TARGET_EBADE
,
583 [EBADR
] = TARGET_EBADR
,
584 [EXFULL
] = TARGET_EXFULL
,
585 [ENOANO
] = TARGET_ENOANO
,
586 [EBADRQC
] = TARGET_EBADRQC
,
587 [EBADSLT
] = TARGET_EBADSLT
,
588 [EBFONT
] = TARGET_EBFONT
,
589 [ENOSTR
] = TARGET_ENOSTR
,
590 [ENODATA
] = TARGET_ENODATA
,
591 [ETIME
] = TARGET_ETIME
,
592 [ENOSR
] = TARGET_ENOSR
,
593 [ENONET
] = TARGET_ENONET
,
594 [ENOPKG
] = TARGET_ENOPKG
,
595 [EREMOTE
] = TARGET_EREMOTE
,
596 [ENOLINK
] = TARGET_ENOLINK
,
597 [EADV
] = TARGET_EADV
,
598 [ESRMNT
] = TARGET_ESRMNT
,
599 [ECOMM
] = TARGET_ECOMM
,
600 [EPROTO
] = TARGET_EPROTO
,
601 [EDOTDOT
] = TARGET_EDOTDOT
,
602 [EMULTIHOP
] = TARGET_EMULTIHOP
,
603 [EBADMSG
] = TARGET_EBADMSG
,
604 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
605 [EOVERFLOW
] = TARGET_EOVERFLOW
,
606 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
607 [EBADFD
] = TARGET_EBADFD
,
608 [EREMCHG
] = TARGET_EREMCHG
,
609 [ELIBACC
] = TARGET_ELIBACC
,
610 [ELIBBAD
] = TARGET_ELIBBAD
,
611 [ELIBSCN
] = TARGET_ELIBSCN
,
612 [ELIBMAX
] = TARGET_ELIBMAX
,
613 [ELIBEXEC
] = TARGET_ELIBEXEC
,
614 [EILSEQ
] = TARGET_EILSEQ
,
615 [ENOSYS
] = TARGET_ENOSYS
,
616 [ELOOP
] = TARGET_ELOOP
,
617 [ERESTART
] = TARGET_ERESTART
,
618 [ESTRPIPE
] = TARGET_ESTRPIPE
,
619 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
620 [EUSERS
] = TARGET_EUSERS
,
621 [ENOTSOCK
] = TARGET_ENOTSOCK
,
622 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
623 [EMSGSIZE
] = TARGET_EMSGSIZE
,
624 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
625 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
626 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
627 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
628 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
629 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
630 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
631 [EADDRINUSE
] = TARGET_EADDRINUSE
,
632 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
633 [ENETDOWN
] = TARGET_ENETDOWN
,
634 [ENETUNREACH
] = TARGET_ENETUNREACH
,
635 [ENETRESET
] = TARGET_ENETRESET
,
636 [ECONNABORTED
] = TARGET_ECONNABORTED
,
637 [ECONNRESET
] = TARGET_ECONNRESET
,
638 [ENOBUFS
] = TARGET_ENOBUFS
,
639 [EISCONN
] = TARGET_EISCONN
,
640 [ENOTCONN
] = TARGET_ENOTCONN
,
641 [EUCLEAN
] = TARGET_EUCLEAN
,
642 [ENOTNAM
] = TARGET_ENOTNAM
,
643 [ENAVAIL
] = TARGET_ENAVAIL
,
644 [EISNAM
] = TARGET_EISNAM
,
645 [EREMOTEIO
] = TARGET_EREMOTEIO
,
646 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
647 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
648 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
649 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
650 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
651 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
652 [EALREADY
] = TARGET_EALREADY
,
653 [EINPROGRESS
] = TARGET_EINPROGRESS
,
654 [ESTALE
] = TARGET_ESTALE
,
655 [ECANCELED
] = TARGET_ECANCELED
,
656 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
657 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
659 [ENOKEY
] = TARGET_ENOKEY
,
662 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
665 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
668 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
671 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
673 #ifdef ENOTRECOVERABLE
674 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
678 static inline int host_to_target_errno(int err
)
680 if(host_to_target_errno_table
[err
])
681 return host_to_target_errno_table
[err
];
685 static inline int target_to_host_errno(int err
)
687 if (target_to_host_errno_table
[err
])
688 return target_to_host_errno_table
[err
];
692 static inline abi_long
get_errno(abi_long ret
)
695 return -host_to_target_errno(errno
);
700 static inline int is_error(abi_long ret
)
702 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
705 char *target_strerror(int err
)
707 return strerror(target_to_host_errno(err
));
710 static abi_ulong target_brk
;
711 static abi_ulong target_original_brk
;
713 void target_set_brk(abi_ulong new_brk
)
715 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
718 /* do_brk() must return target values and target errnos. */
719 abi_long
do_brk(abi_ulong new_brk
)
722 abi_long mapped_addr
;
727 if (new_brk
< target_original_brk
)
730 brk_page
= HOST_PAGE_ALIGN(target_brk
);
732 /* If the new brk is less than this, set it and we're done... */
733 if (new_brk
< brk_page
) {
734 target_brk
= new_brk
;
738 /* We need to allocate more memory after the brk... Note that
739 * we don't use MAP_FIXED because that will map over the top of
740 * any existing mapping (like the one with the host libc or qemu
741 * itself); instead we treat "mapped but at wrong address" as
742 * a failure and unmap again.
744 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
745 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
746 PROT_READ
|PROT_WRITE
,
747 MAP_ANON
|MAP_PRIVATE
, 0, 0));
749 if (mapped_addr
== brk_page
) {
750 target_brk
= new_brk
;
752 } else if (mapped_addr
!= -1) {
753 /* Mapped but at wrong address, meaning there wasn't actually
754 * enough space for this brk.
756 target_munmap(mapped_addr
, new_alloc_size
);
760 #if defined(TARGET_ALPHA)
761 /* We (partially) emulate OSF/1 on Alpha, which requires we
762 return a proper errno, not an unchanged brk value. */
763 return -TARGET_ENOMEM
;
765 /* For everything else, return the previous break. */
769 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
770 abi_ulong target_fds_addr
,
774 abi_ulong b
, *target_fds
;
776 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
777 if (!(target_fds
= lock_user(VERIFY_READ
,
779 sizeof(abi_ulong
) * nw
,
781 return -TARGET_EFAULT
;
785 for (i
= 0; i
< nw
; i
++) {
786 /* grab the abi_ulong */
787 __get_user(b
, &target_fds
[i
]);
788 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
789 /* check the bit inside the abi_ulong */
796 unlock_user(target_fds
, target_fds_addr
, 0);
801 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
807 abi_ulong
*target_fds
;
809 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
810 if (!(target_fds
= lock_user(VERIFY_WRITE
,
812 sizeof(abi_ulong
) * nw
,
814 return -TARGET_EFAULT
;
817 for (i
= 0; i
< nw
; i
++) {
819 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
820 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
823 __put_user(v
, &target_fds
[i
]);
826 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
831 #if defined(__alpha__)
837 static inline abi_long
host_to_target_clock_t(long ticks
)
839 #if HOST_HZ == TARGET_HZ
842 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
846 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
847 const struct rusage
*rusage
)
849 struct target_rusage
*target_rusage
;
851 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
852 return -TARGET_EFAULT
;
853 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
854 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
855 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
856 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
857 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
858 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
859 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
860 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
861 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
862 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
863 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
864 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
865 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
866 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
867 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
868 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
869 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
870 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
871 unlock_user_struct(target_rusage
, target_addr
, 1);
876 static inline rlim_t
target_to_host_rlim(target_ulong target_rlim
)
878 if (target_rlim
== TARGET_RLIM_INFINITY
)
879 return RLIM_INFINITY
;
881 return tswapl(target_rlim
);
884 static inline target_ulong
host_to_target_rlim(rlim_t rlim
)
886 if (rlim
== RLIM_INFINITY
|| rlim
!= (target_long
)rlim
)
887 return TARGET_RLIM_INFINITY
;
892 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
893 abi_ulong target_tv_addr
)
895 struct target_timeval
*target_tv
;
897 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
898 return -TARGET_EFAULT
;
900 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
901 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
903 unlock_user_struct(target_tv
, target_tv_addr
, 0);
908 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
909 const struct timeval
*tv
)
911 struct target_timeval
*target_tv
;
913 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
914 return -TARGET_EFAULT
;
916 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
917 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
919 unlock_user_struct(target_tv
, target_tv_addr
, 1);
924 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
927 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
928 abi_ulong target_mq_attr_addr
)
930 struct target_mq_attr
*target_mq_attr
;
932 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
933 target_mq_attr_addr
, 1))
934 return -TARGET_EFAULT
;
936 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
937 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
938 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
939 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
941 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
946 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
947 const struct mq_attr
*attr
)
949 struct target_mq_attr
*target_mq_attr
;
951 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
952 target_mq_attr_addr
, 0))
953 return -TARGET_EFAULT
;
955 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
956 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
957 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
958 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
960 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
966 /* do_select() must return target values and target errnos. */
967 static abi_long
do_select(int n
,
968 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
969 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
971 fd_set rfds
, wfds
, efds
;
972 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
973 struct timeval tv
, *tv_ptr
;
977 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
978 return -TARGET_EFAULT
;
984 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
985 return -TARGET_EFAULT
;
991 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
992 return -TARGET_EFAULT
;
998 if (target_tv_addr
) {
999 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1000 return -TARGET_EFAULT
;
1006 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1008 if (!is_error(ret
)) {
1009 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1010 return -TARGET_EFAULT
;
1011 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1012 return -TARGET_EFAULT
;
1013 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1014 return -TARGET_EFAULT
;
1016 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1017 return -TARGET_EFAULT
;
1023 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1026 return pipe2(host_pipe
, flags
);
1032 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1033 int flags
, int is_pipe2
)
1037 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1040 return get_errno(ret
);
1042 /* Several targets have special calling conventions for the original
1043 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1045 #if defined(TARGET_ALPHA)
1046 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1047 return host_pipe
[0];
1048 #elif defined(TARGET_MIPS)
1049 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1050 return host_pipe
[0];
1051 #elif defined(TARGET_SH4)
1052 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1053 return host_pipe
[0];
1057 if (put_user_s32(host_pipe
[0], pipedes
)
1058 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1059 return -TARGET_EFAULT
;
1060 return get_errno(ret
);
1063 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1064 abi_ulong target_addr
,
1067 struct target_ip_mreqn
*target_smreqn
;
1069 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1071 return -TARGET_EFAULT
;
1072 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1073 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1074 if (len
== sizeof(struct target_ip_mreqn
))
1075 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
1076 unlock_user(target_smreqn
, target_addr
, 0);
1081 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1082 abi_ulong target_addr
,
1085 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1086 sa_family_t sa_family
;
1087 struct target_sockaddr
*target_saddr
;
1089 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1091 return -TARGET_EFAULT
;
1093 sa_family
= tswap16(target_saddr
->sa_family
);
1095 /* Oops. The caller might send a incomplete sun_path; sun_path
1096 * must be terminated by \0 (see the manual page), but
1097 * unfortunately it is quite common to specify sockaddr_un
1098 * length as "strlen(x->sun_path)" while it should be
1099 * "strlen(...) + 1". We'll fix that here if needed.
1100 * Linux kernel has a similar feature.
1103 if (sa_family
== AF_UNIX
) {
1104 if (len
< unix_maxlen
&& len
> 0) {
1105 char *cp
= (char*)target_saddr
;
1107 if ( cp
[len
-1] && !cp
[len
] )
1110 if (len
> unix_maxlen
)
1114 memcpy(addr
, target_saddr
, len
);
1115 addr
->sa_family
= sa_family
;
1116 unlock_user(target_saddr
, target_addr
, 0);
1121 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1122 struct sockaddr
*addr
,
1125 struct target_sockaddr
*target_saddr
;
1127 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1129 return -TARGET_EFAULT
;
1130 memcpy(target_saddr
, addr
, len
);
1131 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1132 unlock_user(target_saddr
, target_addr
, len
);
1137 /* ??? Should this also swap msgh->name? */
1138 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1139 struct target_msghdr
*target_msgh
)
1141 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1142 abi_long msg_controllen
;
1143 abi_ulong target_cmsg_addr
;
1144 struct target_cmsghdr
*target_cmsg
;
1145 socklen_t space
= 0;
1147 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1148 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1150 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1151 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1153 return -TARGET_EFAULT
;
1155 while (cmsg
&& target_cmsg
) {
1156 void *data
= CMSG_DATA(cmsg
);
1157 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1159 int len
= tswapl(target_cmsg
->cmsg_len
)
1160 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1162 space
+= CMSG_SPACE(len
);
1163 if (space
> msgh
->msg_controllen
) {
1164 space
-= CMSG_SPACE(len
);
1165 gemu_log("Host cmsg overflow\n");
1169 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1170 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1171 cmsg
->cmsg_len
= CMSG_LEN(len
);
1173 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1174 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1175 memcpy(data
, target_data
, len
);
1177 int *fd
= (int *)data
;
1178 int *target_fd
= (int *)target_data
;
1179 int i
, numfds
= len
/ sizeof(int);
1181 for (i
= 0; i
< numfds
; i
++)
1182 fd
[i
] = tswap32(target_fd
[i
]);
1185 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1186 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1188 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1190 msgh
->msg_controllen
= space
;
1194 /* ??? Should this also swap msgh->name? */
1195 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1196 struct msghdr
*msgh
)
1198 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1199 abi_long msg_controllen
;
1200 abi_ulong target_cmsg_addr
;
1201 struct target_cmsghdr
*target_cmsg
;
1202 socklen_t space
= 0;
1204 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1205 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1207 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1208 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1210 return -TARGET_EFAULT
;
1212 while (cmsg
&& target_cmsg
) {
1213 void *data
= CMSG_DATA(cmsg
);
1214 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1216 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1218 space
+= TARGET_CMSG_SPACE(len
);
1219 if (space
> msg_controllen
) {
1220 space
-= TARGET_CMSG_SPACE(len
);
1221 gemu_log("Target cmsg overflow\n");
1225 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1226 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1227 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1229 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1230 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1231 memcpy(target_data
, data
, len
);
1233 int *fd
= (int *)data
;
1234 int *target_fd
= (int *)target_data
;
1235 int i
, numfds
= len
/ sizeof(int);
1237 for (i
= 0; i
< numfds
; i
++)
1238 target_fd
[i
] = tswap32(fd
[i
]);
1241 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1242 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1244 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1246 target_msgh
->msg_controllen
= tswapl(space
);
1250 /* do_setsockopt() Must return target values and target errnos. */
1251 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1252 abi_ulong optval_addr
, socklen_t optlen
)
1256 struct ip_mreqn
*ip_mreq
;
1257 struct ip_mreq_source
*ip_mreq_source
;
1261 /* TCP options all take an 'int' value. */
1262 if (optlen
< sizeof(uint32_t))
1263 return -TARGET_EINVAL
;
1265 if (get_user_u32(val
, optval_addr
))
1266 return -TARGET_EFAULT
;
1267 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1274 case IP_ROUTER_ALERT
:
1278 case IP_MTU_DISCOVER
:
1284 case IP_MULTICAST_TTL
:
1285 case IP_MULTICAST_LOOP
:
1287 if (optlen
>= sizeof(uint32_t)) {
1288 if (get_user_u32(val
, optval_addr
))
1289 return -TARGET_EFAULT
;
1290 } else if (optlen
>= 1) {
1291 if (get_user_u8(val
, optval_addr
))
1292 return -TARGET_EFAULT
;
1294 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1296 case IP_ADD_MEMBERSHIP
:
1297 case IP_DROP_MEMBERSHIP
:
1298 if (optlen
< sizeof (struct target_ip_mreq
) ||
1299 optlen
> sizeof (struct target_ip_mreqn
))
1300 return -TARGET_EINVAL
;
1302 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1303 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1304 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1307 case IP_BLOCK_SOURCE
:
1308 case IP_UNBLOCK_SOURCE
:
1309 case IP_ADD_SOURCE_MEMBERSHIP
:
1310 case IP_DROP_SOURCE_MEMBERSHIP
:
1311 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1312 return -TARGET_EINVAL
;
1314 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1315 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1316 unlock_user (ip_mreq_source
, optval_addr
, 0);
1323 case TARGET_SOL_SOCKET
:
1325 /* Options with 'int' argument. */
1326 case TARGET_SO_DEBUG
:
1329 case TARGET_SO_REUSEADDR
:
1330 optname
= SO_REUSEADDR
;
1332 case TARGET_SO_TYPE
:
1335 case TARGET_SO_ERROR
:
1338 case TARGET_SO_DONTROUTE
:
1339 optname
= SO_DONTROUTE
;
1341 case TARGET_SO_BROADCAST
:
1342 optname
= SO_BROADCAST
;
1344 case TARGET_SO_SNDBUF
:
1345 optname
= SO_SNDBUF
;
1347 case TARGET_SO_RCVBUF
:
1348 optname
= SO_RCVBUF
;
1350 case TARGET_SO_KEEPALIVE
:
1351 optname
= SO_KEEPALIVE
;
1353 case TARGET_SO_OOBINLINE
:
1354 optname
= SO_OOBINLINE
;
1356 case TARGET_SO_NO_CHECK
:
1357 optname
= SO_NO_CHECK
;
1359 case TARGET_SO_PRIORITY
:
1360 optname
= SO_PRIORITY
;
1363 case TARGET_SO_BSDCOMPAT
:
1364 optname
= SO_BSDCOMPAT
;
1367 case TARGET_SO_PASSCRED
:
1368 optname
= SO_PASSCRED
;
1370 case TARGET_SO_TIMESTAMP
:
1371 optname
= SO_TIMESTAMP
;
1373 case TARGET_SO_RCVLOWAT
:
1374 optname
= SO_RCVLOWAT
;
1376 case TARGET_SO_RCVTIMEO
:
1377 optname
= SO_RCVTIMEO
;
1379 case TARGET_SO_SNDTIMEO
:
1380 optname
= SO_SNDTIMEO
;
1386 if (optlen
< sizeof(uint32_t))
1387 return -TARGET_EINVAL
;
1389 if (get_user_u32(val
, optval_addr
))
1390 return -TARGET_EFAULT
;
1391 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1395 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1396 ret
= -TARGET_ENOPROTOOPT
;
1401 /* do_getsockopt() Must return target values and target errnos. */
1402 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1403 abi_ulong optval_addr
, abi_ulong optlen
)
1410 case TARGET_SOL_SOCKET
:
1413 /* These don't just return a single integer */
1414 case TARGET_SO_LINGER
:
1415 case TARGET_SO_RCVTIMEO
:
1416 case TARGET_SO_SNDTIMEO
:
1417 case TARGET_SO_PEERCRED
:
1418 case TARGET_SO_PEERNAME
:
1420 /* Options with 'int' argument. */
1421 case TARGET_SO_DEBUG
:
1424 case TARGET_SO_REUSEADDR
:
1425 optname
= SO_REUSEADDR
;
1427 case TARGET_SO_TYPE
:
1430 case TARGET_SO_ERROR
:
1433 case TARGET_SO_DONTROUTE
:
1434 optname
= SO_DONTROUTE
;
1436 case TARGET_SO_BROADCAST
:
1437 optname
= SO_BROADCAST
;
1439 case TARGET_SO_SNDBUF
:
1440 optname
= SO_SNDBUF
;
1442 case TARGET_SO_RCVBUF
:
1443 optname
= SO_RCVBUF
;
1445 case TARGET_SO_KEEPALIVE
:
1446 optname
= SO_KEEPALIVE
;
1448 case TARGET_SO_OOBINLINE
:
1449 optname
= SO_OOBINLINE
;
1451 case TARGET_SO_NO_CHECK
:
1452 optname
= SO_NO_CHECK
;
1454 case TARGET_SO_PRIORITY
:
1455 optname
= SO_PRIORITY
;
1458 case TARGET_SO_BSDCOMPAT
:
1459 optname
= SO_BSDCOMPAT
;
1462 case TARGET_SO_PASSCRED
:
1463 optname
= SO_PASSCRED
;
1465 case TARGET_SO_TIMESTAMP
:
1466 optname
= SO_TIMESTAMP
;
1468 case TARGET_SO_RCVLOWAT
:
1469 optname
= SO_RCVLOWAT
;
1476 /* TCP options all take an 'int' value. */
1478 if (get_user_u32(len
, optlen
))
1479 return -TARGET_EFAULT
;
1481 return -TARGET_EINVAL
;
1483 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1489 if (put_user_u32(val
, optval_addr
))
1490 return -TARGET_EFAULT
;
1492 if (put_user_u8(val
, optval_addr
))
1493 return -TARGET_EFAULT
;
1495 if (put_user_u32(len
, optlen
))
1496 return -TARGET_EFAULT
;
1503 case IP_ROUTER_ALERT
:
1507 case IP_MTU_DISCOVER
:
1513 case IP_MULTICAST_TTL
:
1514 case IP_MULTICAST_LOOP
:
1515 if (get_user_u32(len
, optlen
))
1516 return -TARGET_EFAULT
;
1518 return -TARGET_EINVAL
;
1520 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1523 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1525 if (put_user_u32(len
, optlen
)
1526 || put_user_u8(val
, optval_addr
))
1527 return -TARGET_EFAULT
;
1529 if (len
> sizeof(int))
1531 if (put_user_u32(len
, optlen
)
1532 || put_user_u32(val
, optval_addr
))
1533 return -TARGET_EFAULT
;
1537 ret
= -TARGET_ENOPROTOOPT
;
1543 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1545 ret
= -TARGET_EOPNOTSUPP
;
1552 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1553 * other lock functions have a return code of 0 for failure.
1555 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1556 int count
, int copy
)
1558 struct target_iovec
*target_vec
;
1562 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1564 return -TARGET_EFAULT
;
1565 for(i
= 0;i
< count
; i
++) {
1566 base
= tswapl(target_vec
[i
].iov_base
);
1567 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1568 if (vec
[i
].iov_len
!= 0) {
1569 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1570 /* Don't check lock_user return value. We must call writev even
1571 if a element has invalid base address. */
1573 /* zero length pointer is ignored */
1574 vec
[i
].iov_base
= NULL
;
1577 unlock_user (target_vec
, target_addr
, 0);
1581 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1582 int count
, int copy
)
1584 struct target_iovec
*target_vec
;
1588 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1590 return -TARGET_EFAULT
;
1591 for(i
= 0;i
< count
; i
++) {
1592 if (target_vec
[i
].iov_base
) {
1593 base
= tswapl(target_vec
[i
].iov_base
);
1594 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1597 unlock_user (target_vec
, target_addr
, 0);
1602 /* do_socket() Must return target values and target errnos. */
1603 static abi_long
do_socket(int domain
, int type
, int protocol
)
1605 #if defined(TARGET_MIPS)
1607 case TARGET_SOCK_DGRAM
:
1610 case TARGET_SOCK_STREAM
:
1613 case TARGET_SOCK_RAW
:
1616 case TARGET_SOCK_RDM
:
1619 case TARGET_SOCK_SEQPACKET
:
1620 type
= SOCK_SEQPACKET
;
1622 case TARGET_SOCK_PACKET
:
1627 if (domain
== PF_NETLINK
)
1628 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1629 return get_errno(socket(domain
, type
, protocol
));
1632 /* do_bind() Must return target values and target errnos. */
1633 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1639 if ((int)addrlen
< 0) {
1640 return -TARGET_EINVAL
;
1643 addr
= alloca(addrlen
+1);
1645 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1649 return get_errno(bind(sockfd
, addr
, addrlen
));
1652 /* do_connect() Must return target values and target errnos. */
1653 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1659 if ((int)addrlen
< 0) {
1660 return -TARGET_EINVAL
;
1663 addr
= alloca(addrlen
);
1665 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1669 return get_errno(connect(sockfd
, addr
, addrlen
));
1672 /* do_sendrecvmsg() Must return target values and target errnos. */
1673 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1674 int flags
, int send
)
1677 struct target_msghdr
*msgp
;
1681 abi_ulong target_vec
;
1684 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1688 return -TARGET_EFAULT
;
1689 if (msgp
->msg_name
) {
1690 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1691 msg
.msg_name
= alloca(msg
.msg_namelen
);
1692 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1695 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1699 msg
.msg_name
= NULL
;
1700 msg
.msg_namelen
= 0;
1702 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1703 msg
.msg_control
= alloca(msg
.msg_controllen
);
1704 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1706 count
= tswapl(msgp
->msg_iovlen
);
1707 vec
= alloca(count
* sizeof(struct iovec
));
1708 target_vec
= tswapl(msgp
->msg_iov
);
1709 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1710 msg
.msg_iovlen
= count
;
1714 ret
= target_to_host_cmsg(&msg
, msgp
);
1716 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1718 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1719 if (!is_error(ret
)) {
1721 ret
= host_to_target_cmsg(msgp
, &msg
);
1726 unlock_iovec(vec
, target_vec
, count
, !send
);
1727 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1731 /* do_accept() Must return target values and target errnos. */
1732 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1733 abi_ulong target_addrlen_addr
)
1739 if (target_addr
== 0)
1740 return get_errno(accept(fd
, NULL
, NULL
));
1742 /* linux returns EINVAL if addrlen pointer is invalid */
1743 if (get_user_u32(addrlen
, target_addrlen_addr
))
1744 return -TARGET_EINVAL
;
1746 if ((int)addrlen
< 0) {
1747 return -TARGET_EINVAL
;
1750 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1751 return -TARGET_EINVAL
;
1753 addr
= alloca(addrlen
);
1755 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1756 if (!is_error(ret
)) {
1757 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1758 if (put_user_u32(addrlen
, target_addrlen_addr
))
1759 ret
= -TARGET_EFAULT
;
1764 /* do_getpeername() Must return target values and target errnos. */
1765 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1766 abi_ulong target_addrlen_addr
)
1772 if (get_user_u32(addrlen
, target_addrlen_addr
))
1773 return -TARGET_EFAULT
;
1775 if ((int)addrlen
< 0) {
1776 return -TARGET_EINVAL
;
1779 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1780 return -TARGET_EFAULT
;
1782 addr
= alloca(addrlen
);
1784 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1785 if (!is_error(ret
)) {
1786 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1787 if (put_user_u32(addrlen
, target_addrlen_addr
))
1788 ret
= -TARGET_EFAULT
;
1793 /* do_getsockname() Must return target values and target errnos. */
1794 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1795 abi_ulong target_addrlen_addr
)
1801 if (get_user_u32(addrlen
, target_addrlen_addr
))
1802 return -TARGET_EFAULT
;
1804 if ((int)addrlen
< 0) {
1805 return -TARGET_EINVAL
;
1808 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1809 return -TARGET_EFAULT
;
1811 addr
= alloca(addrlen
);
1813 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1814 if (!is_error(ret
)) {
1815 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1816 if (put_user_u32(addrlen
, target_addrlen_addr
))
1817 ret
= -TARGET_EFAULT
;
1822 /* do_socketpair() Must return target values and target errnos. */
1823 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1824 abi_ulong target_tab_addr
)
1829 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1830 if (!is_error(ret
)) {
1831 if (put_user_s32(tab
[0], target_tab_addr
)
1832 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1833 ret
= -TARGET_EFAULT
;
1838 /* do_sendto() Must return target values and target errnos. */
1839 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1840 abi_ulong target_addr
, socklen_t addrlen
)
1846 if ((int)addrlen
< 0) {
1847 return -TARGET_EINVAL
;
1850 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1852 return -TARGET_EFAULT
;
1854 addr
= alloca(addrlen
);
1855 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1857 unlock_user(host_msg
, msg
, 0);
1860 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1862 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1864 unlock_user(host_msg
, msg
, 0);
1868 /* do_recvfrom() Must return target values and target errnos. */
1869 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1870 abi_ulong target_addr
,
1871 abi_ulong target_addrlen
)
1878 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1880 return -TARGET_EFAULT
;
1882 if (get_user_u32(addrlen
, target_addrlen
)) {
1883 ret
= -TARGET_EFAULT
;
1886 if ((int)addrlen
< 0) {
1887 ret
= -TARGET_EINVAL
;
1890 addr
= alloca(addrlen
);
1891 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1893 addr
= NULL
; /* To keep compiler quiet. */
1894 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1896 if (!is_error(ret
)) {
1898 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1899 if (put_user_u32(addrlen
, target_addrlen
)) {
1900 ret
= -TARGET_EFAULT
;
1904 unlock_user(host_msg
, msg
, len
);
1907 unlock_user(host_msg
, msg
, 0);
1912 #ifdef TARGET_NR_socketcall
1913 /* do_socketcall() Must return target values and target errnos. */
1914 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1917 const int n
= sizeof(abi_ulong
);
1922 abi_ulong domain
, type
, protocol
;
1924 if (get_user_ual(domain
, vptr
)
1925 || get_user_ual(type
, vptr
+ n
)
1926 || get_user_ual(protocol
, vptr
+ 2 * n
))
1927 return -TARGET_EFAULT
;
1929 ret
= do_socket(domain
, type
, protocol
);
1935 abi_ulong target_addr
;
1938 if (get_user_ual(sockfd
, vptr
)
1939 || get_user_ual(target_addr
, vptr
+ n
)
1940 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1941 return -TARGET_EFAULT
;
1943 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1946 case SOCKOP_connect
:
1949 abi_ulong target_addr
;
1952 if (get_user_ual(sockfd
, vptr
)
1953 || get_user_ual(target_addr
, vptr
+ n
)
1954 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1955 return -TARGET_EFAULT
;
1957 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1962 abi_ulong sockfd
, backlog
;
1964 if (get_user_ual(sockfd
, vptr
)
1965 || get_user_ual(backlog
, vptr
+ n
))
1966 return -TARGET_EFAULT
;
1968 ret
= get_errno(listen(sockfd
, backlog
));
1974 abi_ulong target_addr
, target_addrlen
;
1976 if (get_user_ual(sockfd
, vptr
)
1977 || get_user_ual(target_addr
, vptr
+ n
)
1978 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1979 return -TARGET_EFAULT
;
1981 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1984 case SOCKOP_getsockname
:
1987 abi_ulong target_addr
, target_addrlen
;
1989 if (get_user_ual(sockfd
, vptr
)
1990 || get_user_ual(target_addr
, vptr
+ n
)
1991 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1992 return -TARGET_EFAULT
;
1994 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1997 case SOCKOP_getpeername
:
2000 abi_ulong target_addr
, target_addrlen
;
2002 if (get_user_ual(sockfd
, vptr
)
2003 || get_user_ual(target_addr
, vptr
+ n
)
2004 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2005 return -TARGET_EFAULT
;
2007 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2010 case SOCKOP_socketpair
:
2012 abi_ulong domain
, type
, protocol
;
2015 if (get_user_ual(domain
, vptr
)
2016 || get_user_ual(type
, vptr
+ n
)
2017 || get_user_ual(protocol
, vptr
+ 2 * n
)
2018 || get_user_ual(tab
, vptr
+ 3 * n
))
2019 return -TARGET_EFAULT
;
2021 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2031 if (get_user_ual(sockfd
, vptr
)
2032 || get_user_ual(msg
, vptr
+ n
)
2033 || get_user_ual(len
, vptr
+ 2 * n
)
2034 || get_user_ual(flags
, vptr
+ 3 * n
))
2035 return -TARGET_EFAULT
;
2037 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2047 if (get_user_ual(sockfd
, vptr
)
2048 || get_user_ual(msg
, vptr
+ n
)
2049 || get_user_ual(len
, vptr
+ 2 * n
)
2050 || get_user_ual(flags
, vptr
+ 3 * n
))
2051 return -TARGET_EFAULT
;
2053 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2065 if (get_user_ual(sockfd
, vptr
)
2066 || get_user_ual(msg
, vptr
+ n
)
2067 || get_user_ual(len
, vptr
+ 2 * n
)
2068 || get_user_ual(flags
, vptr
+ 3 * n
)
2069 || get_user_ual(addr
, vptr
+ 4 * n
)
2070 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2071 return -TARGET_EFAULT
;
2073 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2076 case SOCKOP_recvfrom
:
2085 if (get_user_ual(sockfd
, vptr
)
2086 || get_user_ual(msg
, vptr
+ n
)
2087 || get_user_ual(len
, vptr
+ 2 * n
)
2088 || get_user_ual(flags
, vptr
+ 3 * n
)
2089 || get_user_ual(addr
, vptr
+ 4 * n
)
2090 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2091 return -TARGET_EFAULT
;
2093 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2096 case SOCKOP_shutdown
:
2098 abi_ulong sockfd
, how
;
2100 if (get_user_ual(sockfd
, vptr
)
2101 || get_user_ual(how
, vptr
+ n
))
2102 return -TARGET_EFAULT
;
2104 ret
= get_errno(shutdown(sockfd
, how
));
2107 case SOCKOP_sendmsg
:
2108 case SOCKOP_recvmsg
:
2111 abi_ulong target_msg
;
2114 if (get_user_ual(fd
, vptr
)
2115 || get_user_ual(target_msg
, vptr
+ n
)
2116 || get_user_ual(flags
, vptr
+ 2 * n
))
2117 return -TARGET_EFAULT
;
2119 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2120 (num
== SOCKOP_sendmsg
));
2123 case SOCKOP_setsockopt
:
2131 if (get_user_ual(sockfd
, vptr
)
2132 || get_user_ual(level
, vptr
+ n
)
2133 || get_user_ual(optname
, vptr
+ 2 * n
)
2134 || get_user_ual(optval
, vptr
+ 3 * n
)
2135 || get_user_ual(optlen
, vptr
+ 4 * n
))
2136 return -TARGET_EFAULT
;
2138 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2141 case SOCKOP_getsockopt
:
2149 if (get_user_ual(sockfd
, vptr
)
2150 || get_user_ual(level
, vptr
+ n
)
2151 || get_user_ual(optname
, vptr
+ 2 * n
)
2152 || get_user_ual(optval
, vptr
+ 3 * n
)
2153 || get_user_ual(optlen
, vptr
+ 4 * n
))
2154 return -TARGET_EFAULT
;
2156 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2160 gemu_log("Unsupported socketcall: %d\n", num
);
2161 ret
= -TARGET_ENOSYS
;
2168 #define N_SHM_REGIONS 32
2170 static struct shm_region
{
2173 } shm_regions
[N_SHM_REGIONS
];
2175 struct target_ipc_perm
2182 unsigned short int mode
;
2183 unsigned short int __pad1
;
2184 unsigned short int __seq
;
2185 unsigned short int __pad2
;
2186 abi_ulong __unused1
;
2187 abi_ulong __unused2
;
2190 struct target_semid_ds
2192 struct target_ipc_perm sem_perm
;
2193 abi_ulong sem_otime
;
2194 abi_ulong __unused1
;
2195 abi_ulong sem_ctime
;
2196 abi_ulong __unused2
;
2197 abi_ulong sem_nsems
;
2198 abi_ulong __unused3
;
2199 abi_ulong __unused4
;
2202 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2203 abi_ulong target_addr
)
2205 struct target_ipc_perm
*target_ip
;
2206 struct target_semid_ds
*target_sd
;
2208 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2209 return -TARGET_EFAULT
;
2210 target_ip
= &(target_sd
->sem_perm
);
2211 host_ip
->__key
= tswapl(target_ip
->__key
);
2212 host_ip
->uid
= tswapl(target_ip
->uid
);
2213 host_ip
->gid
= tswapl(target_ip
->gid
);
2214 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2215 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2216 host_ip
->mode
= tswapl(target_ip
->mode
);
2217 unlock_user_struct(target_sd
, target_addr
, 0);
2221 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2222 struct ipc_perm
*host_ip
)
2224 struct target_ipc_perm
*target_ip
;
2225 struct target_semid_ds
*target_sd
;
2227 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2228 return -TARGET_EFAULT
;
2229 target_ip
= &(target_sd
->sem_perm
);
2230 target_ip
->__key
= tswapl(host_ip
->__key
);
2231 target_ip
->uid
= tswapl(host_ip
->uid
);
2232 target_ip
->gid
= tswapl(host_ip
->gid
);
2233 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2234 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2235 target_ip
->mode
= tswapl(host_ip
->mode
);
2236 unlock_user_struct(target_sd
, target_addr
, 1);
2240 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2241 abi_ulong target_addr
)
2243 struct target_semid_ds
*target_sd
;
2245 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2246 return -TARGET_EFAULT
;
2247 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2248 return -TARGET_EFAULT
;
2249 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2250 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2251 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2252 unlock_user_struct(target_sd
, target_addr
, 0);
2256 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2257 struct semid_ds
*host_sd
)
2259 struct target_semid_ds
*target_sd
;
2261 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2262 return -TARGET_EFAULT
;
2263 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2264 return -TARGET_EFAULT
;;
2265 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2266 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2267 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2268 unlock_user_struct(target_sd
, target_addr
, 1);
2272 struct target_seminfo
{
2285 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2286 struct seminfo
*host_seminfo
)
2288 struct target_seminfo
*target_seminfo
;
2289 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2290 return -TARGET_EFAULT
;
2291 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2292 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2293 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2294 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2295 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2296 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2297 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2298 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2299 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2300 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2301 unlock_user_struct(target_seminfo
, target_addr
, 1);
2307 struct semid_ds
*buf
;
2308 unsigned short *array
;
2309 struct seminfo
*__buf
;
2312 union target_semun
{
2319 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2320 abi_ulong target_addr
)
2323 unsigned short *array
;
2325 struct semid_ds semid_ds
;
2328 semun
.buf
= &semid_ds
;
2330 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2332 return get_errno(ret
);
2334 nsems
= semid_ds
.sem_nsems
;
2336 *host_array
= malloc(nsems
*sizeof(unsigned short));
2337 array
= lock_user(VERIFY_READ
, target_addr
,
2338 nsems
*sizeof(unsigned short), 1);
2340 return -TARGET_EFAULT
;
2342 for(i
=0; i
<nsems
; i
++) {
2343 __get_user((*host_array
)[i
], &array
[i
]);
2345 unlock_user(array
, target_addr
, 0);
2350 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2351 unsigned short **host_array
)
2354 unsigned short *array
;
2356 struct semid_ds semid_ds
;
2359 semun
.buf
= &semid_ds
;
2361 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2363 return get_errno(ret
);
2365 nsems
= semid_ds
.sem_nsems
;
2367 array
= lock_user(VERIFY_WRITE
, target_addr
,
2368 nsems
*sizeof(unsigned short), 0);
2370 return -TARGET_EFAULT
;
2372 for(i
=0; i
<nsems
; i
++) {
2373 __put_user((*host_array
)[i
], &array
[i
]);
2376 unlock_user(array
, target_addr
, 1);
2381 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2382 union target_semun target_su
)
2385 struct semid_ds dsarg
;
2386 unsigned short *array
= NULL
;
2387 struct seminfo seminfo
;
2388 abi_long ret
= -TARGET_EINVAL
;
2395 arg
.val
= tswapl(target_su
.val
);
2396 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2397 target_su
.val
= tswapl(arg
.val
);
2401 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2405 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2406 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2413 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2417 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2418 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2424 arg
.__buf
= &seminfo
;
2425 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2426 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2434 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2441 struct target_sembuf
{
2442 unsigned short sem_num
;
2447 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2448 abi_ulong target_addr
,
2451 struct target_sembuf
*target_sembuf
;
2454 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2455 nsops
*sizeof(struct target_sembuf
), 1);
2457 return -TARGET_EFAULT
;
2459 for(i
=0; i
<nsops
; i
++) {
2460 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2461 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2462 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2465 unlock_user(target_sembuf
, target_addr
, 0);
2470 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2472 struct sembuf sops
[nsops
];
2474 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2475 return -TARGET_EFAULT
;
2477 return semop(semid
, sops
, nsops
);
2480 struct target_msqid_ds
2482 struct target_ipc_perm msg_perm
;
2483 abi_ulong msg_stime
;
2484 #if TARGET_ABI_BITS == 32
2485 abi_ulong __unused1
;
2487 abi_ulong msg_rtime
;
2488 #if TARGET_ABI_BITS == 32
2489 abi_ulong __unused2
;
2491 abi_ulong msg_ctime
;
2492 #if TARGET_ABI_BITS == 32
2493 abi_ulong __unused3
;
2495 abi_ulong __msg_cbytes
;
2497 abi_ulong msg_qbytes
;
2498 abi_ulong msg_lspid
;
2499 abi_ulong msg_lrpid
;
2500 abi_ulong __unused4
;
2501 abi_ulong __unused5
;
2504 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2505 abi_ulong target_addr
)
2507 struct target_msqid_ds
*target_md
;
2509 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2510 return -TARGET_EFAULT
;
2511 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2512 return -TARGET_EFAULT
;
2513 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2514 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2515 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2516 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2517 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2518 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2519 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2520 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2521 unlock_user_struct(target_md
, target_addr
, 0);
2525 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2526 struct msqid_ds
*host_md
)
2528 struct target_msqid_ds
*target_md
;
2530 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2531 return -TARGET_EFAULT
;
2532 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2533 return -TARGET_EFAULT
;
2534 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2535 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2536 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2537 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2538 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2539 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2540 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2541 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2542 unlock_user_struct(target_md
, target_addr
, 1);
2546 struct target_msginfo
{
2554 unsigned short int msgseg
;
2557 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2558 struct msginfo
*host_msginfo
)
2560 struct target_msginfo
*target_msginfo
;
2561 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2562 return -TARGET_EFAULT
;
2563 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2564 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2565 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2566 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2567 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2568 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2569 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2570 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2571 unlock_user_struct(target_msginfo
, target_addr
, 1);
2575 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2577 struct msqid_ds dsarg
;
2578 struct msginfo msginfo
;
2579 abi_long ret
= -TARGET_EINVAL
;
2587 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2588 return -TARGET_EFAULT
;
2589 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2590 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2591 return -TARGET_EFAULT
;
2594 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2598 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2599 if (host_to_target_msginfo(ptr
, &msginfo
))
2600 return -TARGET_EFAULT
;
2607 struct target_msgbuf
{
2612 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2613 unsigned int msgsz
, int msgflg
)
2615 struct target_msgbuf
*target_mb
;
2616 struct msgbuf
*host_mb
;
2619 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2620 return -TARGET_EFAULT
;
2621 host_mb
= malloc(msgsz
+sizeof(long));
2622 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2623 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2624 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2626 unlock_user_struct(target_mb
, msgp
, 0);
2631 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2632 unsigned int msgsz
, abi_long msgtyp
,
2635 struct target_msgbuf
*target_mb
;
2637 struct msgbuf
*host_mb
;
2640 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2641 return -TARGET_EFAULT
;
2643 host_mb
= malloc(msgsz
+sizeof(long));
2644 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2647 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2648 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2649 if (!target_mtext
) {
2650 ret
= -TARGET_EFAULT
;
2653 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2654 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2657 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2662 unlock_user_struct(target_mb
, msgp
, 1);
2666 struct target_shmid_ds
2668 struct target_ipc_perm shm_perm
;
2669 abi_ulong shm_segsz
;
2670 abi_ulong shm_atime
;
2671 #if TARGET_ABI_BITS == 32
2672 abi_ulong __unused1
;
2674 abi_ulong shm_dtime
;
2675 #if TARGET_ABI_BITS == 32
2676 abi_ulong __unused2
;
2678 abi_ulong shm_ctime
;
2679 #if TARGET_ABI_BITS == 32
2680 abi_ulong __unused3
;
2684 abi_ulong shm_nattch
;
2685 unsigned long int __unused4
;
2686 unsigned long int __unused5
;
2689 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2690 abi_ulong target_addr
)
2692 struct target_shmid_ds
*target_sd
;
2694 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2695 return -TARGET_EFAULT
;
2696 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2697 return -TARGET_EFAULT
;
2698 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2699 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2700 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2701 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2702 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2703 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2704 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2705 unlock_user_struct(target_sd
, target_addr
, 0);
2709 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2710 struct shmid_ds
*host_sd
)
2712 struct target_shmid_ds
*target_sd
;
2714 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2715 return -TARGET_EFAULT
;
2716 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2717 return -TARGET_EFAULT
;
2718 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2719 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2720 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2721 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2722 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2723 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2724 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2725 unlock_user_struct(target_sd
, target_addr
, 1);
2729 struct target_shminfo
{
2737 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2738 struct shminfo
*host_shminfo
)
2740 struct target_shminfo
*target_shminfo
;
2741 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2742 return -TARGET_EFAULT
;
2743 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2744 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2745 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2746 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2747 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2748 unlock_user_struct(target_shminfo
, target_addr
, 1);
2752 struct target_shm_info
{
2757 abi_ulong swap_attempts
;
2758 abi_ulong swap_successes
;
2761 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2762 struct shm_info
*host_shm_info
)
2764 struct target_shm_info
*target_shm_info
;
2765 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2766 return -TARGET_EFAULT
;
2767 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2768 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2769 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2770 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2771 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2772 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2773 unlock_user_struct(target_shm_info
, target_addr
, 1);
2777 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2779 struct shmid_ds dsarg
;
2780 struct shminfo shminfo
;
2781 struct shm_info shm_info
;
2782 abi_long ret
= -TARGET_EINVAL
;
2790 if (target_to_host_shmid_ds(&dsarg
, buf
))
2791 return -TARGET_EFAULT
;
2792 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2793 if (host_to_target_shmid_ds(buf
, &dsarg
))
2794 return -TARGET_EFAULT
;
2797 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2798 if (host_to_target_shminfo(buf
, &shminfo
))
2799 return -TARGET_EFAULT
;
2802 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2803 if (host_to_target_shm_info(buf
, &shm_info
))
2804 return -TARGET_EFAULT
;
2809 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2816 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2820 struct shmid_ds shm_info
;
2823 /* find out the length of the shared memory segment */
2824 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2825 if (is_error(ret
)) {
2826 /* can't get length, bail out */
2833 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2835 abi_ulong mmap_start
;
2837 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2839 if (mmap_start
== -1) {
2841 host_raddr
= (void *)-1;
2843 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2846 if (host_raddr
== (void *)-1) {
2848 return get_errno((long)host_raddr
);
2850 raddr
=h2g((unsigned long)host_raddr
);
2852 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2853 PAGE_VALID
| PAGE_READ
|
2854 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2856 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2857 if (shm_regions
[i
].start
== 0) {
2858 shm_regions
[i
].start
= raddr
;
2859 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2869 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2873 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2874 if (shm_regions
[i
].start
== shmaddr
) {
2875 shm_regions
[i
].start
= 0;
2876 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
2881 return get_errno(shmdt(g2h(shmaddr
)));
2884 #ifdef TARGET_NR_ipc
2885 /* ??? This only works with linear mappings. */
2886 /* do_ipc() must return target values and target errnos. */
2887 static abi_long
do_ipc(unsigned int call
, int first
,
2888 int second
, int third
,
2889 abi_long ptr
, abi_long fifth
)
2894 version
= call
>> 16;
2899 ret
= do_semop(first
, ptr
, second
);
2903 ret
= get_errno(semget(first
, second
, third
));
2907 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2911 ret
= get_errno(msgget(first
, second
));
2915 ret
= do_msgsnd(first
, ptr
, second
, third
);
2919 ret
= do_msgctl(first
, second
, ptr
);
2926 struct target_ipc_kludge
{
2931 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2932 ret
= -TARGET_EFAULT
;
2936 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2938 unlock_user_struct(tmp
, ptr
, 0);
2942 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2951 raddr
= do_shmat(first
, ptr
, second
);
2952 if (is_error(raddr
))
2953 return get_errno(raddr
);
2954 if (put_user_ual(raddr
, third
))
2955 return -TARGET_EFAULT
;
2959 ret
= -TARGET_EINVAL
;
2964 ret
= do_shmdt(ptr
);
2968 /* IPC_* flag values are the same on all linux platforms */
2969 ret
= get_errno(shmget(first
, second
, third
));
2972 /* IPC_* and SHM_* command values are the same on all linux platforms */
2974 ret
= do_shmctl(first
, second
, third
);
2977 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2978 ret
= -TARGET_ENOSYS
;
2985 /* kernel structure types definitions */
2987 #define STRUCT(name, ...) STRUCT_ ## name,
2988 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2990 #include "syscall_types.h"
2993 #undef STRUCT_SPECIAL
2995 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2996 #define STRUCT_SPECIAL(name)
2997 #include "syscall_types.h"
2999 #undef STRUCT_SPECIAL
3001 typedef struct IOCTLEntry IOCTLEntry
;
3003 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3004 int fd
, abi_long cmd
, abi_long arg
);
3007 unsigned int target_cmd
;
3008 unsigned int host_cmd
;
3011 do_ioctl_fn
*do_ioctl
;
3012 const argtype arg_type
[5];
3015 #define IOC_R 0x0001
3016 #define IOC_W 0x0002
3017 #define IOC_RW (IOC_R | IOC_W)
3019 #define MAX_STRUCT_SIZE 4096
3021 #ifdef CONFIG_FIEMAP
3022 /* So fiemap access checks don't overflow on 32 bit systems.
3023 * This is very slightly smaller than the limit imposed by
3024 * the underlying kernel.
3026 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3027 / sizeof(struct fiemap_extent))
3029 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3030 int fd
, abi_long cmd
, abi_long arg
)
3032 /* The parameter for this ioctl is a struct fiemap followed
3033 * by an array of struct fiemap_extent whose size is set
3034 * in fiemap->fm_extent_count. The array is filled in by the
3037 int target_size_in
, target_size_out
;
3039 const argtype
*arg_type
= ie
->arg_type
;
3040 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3043 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3047 assert(arg_type
[0] == TYPE_PTR
);
3048 assert(ie
->access
== IOC_RW
);
3050 target_size_in
= thunk_type_size(arg_type
, 0);
3051 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3053 return -TARGET_EFAULT
;
3055 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3056 unlock_user(argptr
, arg
, 0);
3057 fm
= (struct fiemap
*)buf_temp
;
3058 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3059 return -TARGET_EINVAL
;
3062 outbufsz
= sizeof (*fm
) +
3063 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3065 if (outbufsz
> MAX_STRUCT_SIZE
) {
3066 /* We can't fit all the extents into the fixed size buffer.
3067 * Allocate one that is large enough and use it instead.
3069 fm
= malloc(outbufsz
);
3071 return -TARGET_ENOMEM
;
3073 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3076 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3077 if (!is_error(ret
)) {
3078 target_size_out
= target_size_in
;
3079 /* An extent_count of 0 means we were only counting the extents
3080 * so there are no structs to copy
3082 if (fm
->fm_extent_count
!= 0) {
3083 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3085 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3087 ret
= -TARGET_EFAULT
;
3089 /* Convert the struct fiemap */
3090 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3091 if (fm
->fm_extent_count
!= 0) {
3092 p
= argptr
+ target_size_in
;
3093 /* ...and then all the struct fiemap_extents */
3094 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3095 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3100 unlock_user(argptr
, arg
, target_size_out
);
3110 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3111 int fd
, abi_long cmd
, abi_long arg
)
3113 const argtype
*arg_type
= ie
->arg_type
;
3117 struct ifconf
*host_ifconf
;
3119 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3120 int target_ifreq_size
;
3125 abi_long target_ifc_buf
;
3129 assert(arg_type
[0] == TYPE_PTR
);
3130 assert(ie
->access
== IOC_RW
);
3133 target_size
= thunk_type_size(arg_type
, 0);
3135 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3137 return -TARGET_EFAULT
;
3138 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3139 unlock_user(argptr
, arg
, 0);
3141 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3142 target_ifc_len
= host_ifconf
->ifc_len
;
3143 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3145 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3146 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3147 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3149 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3150 if (outbufsz
> MAX_STRUCT_SIZE
) {
3151 /* We can't fit all the extents into the fixed size buffer.
3152 * Allocate one that is large enough and use it instead.
3154 host_ifconf
= malloc(outbufsz
);
3156 return -TARGET_ENOMEM
;
3158 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3161 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3163 host_ifconf
->ifc_len
= host_ifc_len
;
3164 host_ifconf
->ifc_buf
= host_ifc_buf
;
3166 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3167 if (!is_error(ret
)) {
3168 /* convert host ifc_len to target ifc_len */
3170 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3171 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3172 host_ifconf
->ifc_len
= target_ifc_len
;
3174 /* restore target ifc_buf */
3176 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3178 /* copy struct ifconf to target user */
3180 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3182 return -TARGET_EFAULT
;
3183 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3184 unlock_user(argptr
, arg
, target_size
);
3186 /* copy ifreq[] to target user */
3188 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3189 for (i
= 0; i
< nb_ifreq
; i
++) {
3190 thunk_convert(argptr
+ i
* target_ifreq_size
,
3191 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3192 ifreq_arg_type
, THUNK_TARGET
);
3194 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3204 static IOCTLEntry ioctl_entries
[] = {
3205 #define IOCTL(cmd, access, ...) \
3206 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3207 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3208 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3213 /* ??? Implement proper locking for ioctls. */
3214 /* do_ioctl() Must return target values and target errnos. */
3215 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3217 const IOCTLEntry
*ie
;
3218 const argtype
*arg_type
;
3220 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3226 if (ie
->target_cmd
== 0) {
3227 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3228 return -TARGET_ENOSYS
;
3230 if (ie
->target_cmd
== cmd
)
3234 arg_type
= ie
->arg_type
;
3236 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3239 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3242 switch(arg_type
[0]) {
3245 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3250 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3254 target_size
= thunk_type_size(arg_type
, 0);
3255 switch(ie
->access
) {
3257 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3258 if (!is_error(ret
)) {
3259 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3261 return -TARGET_EFAULT
;
3262 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3263 unlock_user(argptr
, arg
, target_size
);
3267 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3269 return -TARGET_EFAULT
;
3270 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3271 unlock_user(argptr
, arg
, 0);
3272 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3276 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3278 return -TARGET_EFAULT
;
3279 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3280 unlock_user(argptr
, arg
, 0);
3281 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3282 if (!is_error(ret
)) {
3283 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3285 return -TARGET_EFAULT
;
3286 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3287 unlock_user(argptr
, arg
, target_size
);
3293 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3294 (long)cmd
, arg_type
[0]);
3295 ret
= -TARGET_ENOSYS
;
3301 static const bitmask_transtbl iflag_tbl
[] = {
3302 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3303 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3304 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3305 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3306 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3307 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3308 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3309 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3310 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3311 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3312 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3313 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3314 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3315 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3319 static const bitmask_transtbl oflag_tbl
[] = {
3320 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3321 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3322 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3323 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3324 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3325 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3326 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3327 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3328 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3329 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3330 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3331 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3332 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3333 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3334 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3335 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3336 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3337 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3338 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3339 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3340 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3341 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3342 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3343 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3347 static const bitmask_transtbl cflag_tbl
[] = {
3348 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3349 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3350 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3351 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3352 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3353 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3354 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3355 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3356 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3357 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3358 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3359 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3360 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3361 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3362 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3363 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3364 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3365 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3366 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3367 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3368 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3369 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3370 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3371 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3372 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3373 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3374 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3375 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3376 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3377 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3378 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3382 static const bitmask_transtbl lflag_tbl
[] = {
3383 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3384 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3385 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3386 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3387 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3388 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3389 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3390 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3391 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3392 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3393 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3394 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3395 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3396 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3397 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3401 static void target_to_host_termios (void *dst
, const void *src
)
3403 struct host_termios
*host
= dst
;
3404 const struct target_termios
*target
= src
;
3407 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3409 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3411 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3413 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3414 host
->c_line
= target
->c_line
;
3416 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3417 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3418 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3419 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3420 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3421 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3422 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3423 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3424 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3425 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3426 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3427 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3428 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3429 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3430 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3431 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3432 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3433 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3436 static void host_to_target_termios (void *dst
, const void *src
)
3438 struct target_termios
*target
= dst
;
3439 const struct host_termios
*host
= src
;
3442 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3444 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3446 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3448 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3449 target
->c_line
= host
->c_line
;
3451 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3452 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3453 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3454 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3455 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3456 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3457 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3458 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3459 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3460 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3461 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3462 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3463 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3464 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3465 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3466 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3467 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3468 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3471 static const StructEntry struct_termios_def
= {
3472 .convert
= { host_to_target_termios
, target_to_host_termios
},
3473 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3474 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3477 static bitmask_transtbl mmap_flags_tbl
[] = {
3478 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3479 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3480 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3481 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3482 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3483 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3484 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3485 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3489 #if defined(TARGET_I386)
3491 /* NOTE: there is really one LDT for all the threads */
3492 static uint8_t *ldt_table
;
3494 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3501 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3502 if (size
> bytecount
)
3504 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3506 return -TARGET_EFAULT
;
3507 /* ??? Should this by byteswapped? */
3508 memcpy(p
, ldt_table
, size
);
3509 unlock_user(p
, ptr
, size
);
3513 /* XXX: add locking support */
3514 static abi_long
write_ldt(CPUX86State
*env
,
3515 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3517 struct target_modify_ldt_ldt_s ldt_info
;
3518 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3519 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3520 int seg_not_present
, useable
, lm
;
3521 uint32_t *lp
, entry_1
, entry_2
;
3523 if (bytecount
!= sizeof(ldt_info
))
3524 return -TARGET_EINVAL
;
3525 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3526 return -TARGET_EFAULT
;
3527 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3528 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3529 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3530 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3531 unlock_user_struct(target_ldt_info
, ptr
, 0);
3533 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3534 return -TARGET_EINVAL
;
3535 seg_32bit
= ldt_info
.flags
& 1;
3536 contents
= (ldt_info
.flags
>> 1) & 3;
3537 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3538 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3539 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3540 useable
= (ldt_info
.flags
>> 6) & 1;
3544 lm
= (ldt_info
.flags
>> 7) & 1;
3546 if (contents
== 3) {
3548 return -TARGET_EINVAL
;
3549 if (seg_not_present
== 0)
3550 return -TARGET_EINVAL
;
3552 /* allocate the LDT */
3554 env
->ldt
.base
= target_mmap(0,
3555 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3556 PROT_READ
|PROT_WRITE
,
3557 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3558 if (env
->ldt
.base
== -1)
3559 return -TARGET_ENOMEM
;
3560 memset(g2h(env
->ldt
.base
), 0,
3561 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3562 env
->ldt
.limit
= 0xffff;
3563 ldt_table
= g2h(env
->ldt
.base
);
3566 /* NOTE: same code as Linux kernel */
3567 /* Allow LDTs to be cleared by the user. */
3568 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3571 read_exec_only
== 1 &&
3573 limit_in_pages
== 0 &&
3574 seg_not_present
== 1 &&
3582 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3583 (ldt_info
.limit
& 0x0ffff);
3584 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3585 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3586 (ldt_info
.limit
& 0xf0000) |
3587 ((read_exec_only
^ 1) << 9) |
3589 ((seg_not_present
^ 1) << 15) |
3591 (limit_in_pages
<< 23) |
3595 entry_2
|= (useable
<< 20);
3597 /* Install the new entry ... */
3599 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3600 lp
[0] = tswap32(entry_1
);
3601 lp
[1] = tswap32(entry_2
);
3605 /* specific and weird i386 syscalls */
3606 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3607 unsigned long bytecount
)
3613 ret
= read_ldt(ptr
, bytecount
);
3616 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3619 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3622 ret
= -TARGET_ENOSYS
;
3628 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3629 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3631 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3632 struct target_modify_ldt_ldt_s ldt_info
;
3633 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3634 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3635 int seg_not_present
, useable
, lm
;
3636 uint32_t *lp
, entry_1
, entry_2
;
3639 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3640 if (!target_ldt_info
)
3641 return -TARGET_EFAULT
;
3642 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3643 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3644 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3645 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3646 if (ldt_info
.entry_number
== -1) {
3647 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3648 if (gdt_table
[i
] == 0) {
3649 ldt_info
.entry_number
= i
;
3650 target_ldt_info
->entry_number
= tswap32(i
);
3655 unlock_user_struct(target_ldt_info
, ptr
, 1);
3657 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3658 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3659 return -TARGET_EINVAL
;
3660 seg_32bit
= ldt_info
.flags
& 1;
3661 contents
= (ldt_info
.flags
>> 1) & 3;
3662 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3663 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3664 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3665 useable
= (ldt_info
.flags
>> 6) & 1;
3669 lm
= (ldt_info
.flags
>> 7) & 1;
3672 if (contents
== 3) {
3673 if (seg_not_present
== 0)
3674 return -TARGET_EINVAL
;
3677 /* NOTE: same code as Linux kernel */
3678 /* Allow LDTs to be cleared by the user. */
3679 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3680 if ((contents
== 0 &&
3681 read_exec_only
== 1 &&
3683 limit_in_pages
== 0 &&
3684 seg_not_present
== 1 &&
3692 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3693 (ldt_info
.limit
& 0x0ffff);
3694 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3695 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3696 (ldt_info
.limit
& 0xf0000) |
3697 ((read_exec_only
^ 1) << 9) |
3699 ((seg_not_present
^ 1) << 15) |
3701 (limit_in_pages
<< 23) |
3706 /* Install the new entry ... */
3708 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3709 lp
[0] = tswap32(entry_1
);
3710 lp
[1] = tswap32(entry_2
);
3714 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3716 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3717 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3718 uint32_t base_addr
, limit
, flags
;
3719 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3720 int seg_not_present
, useable
, lm
;
3721 uint32_t *lp
, entry_1
, entry_2
;
3723 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3724 if (!target_ldt_info
)
3725 return -TARGET_EFAULT
;
3726 idx
= tswap32(target_ldt_info
->entry_number
);
3727 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3728 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3729 unlock_user_struct(target_ldt_info
, ptr
, 1);
3730 return -TARGET_EINVAL
;
3732 lp
= (uint32_t *)(gdt_table
+ idx
);
3733 entry_1
= tswap32(lp
[0]);
3734 entry_2
= tswap32(lp
[1]);
3736 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3737 contents
= (entry_2
>> 10) & 3;
3738 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3739 seg_32bit
= (entry_2
>> 22) & 1;
3740 limit_in_pages
= (entry_2
>> 23) & 1;
3741 useable
= (entry_2
>> 20) & 1;
3745 lm
= (entry_2
>> 21) & 1;
3747 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3748 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3749 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3750 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3751 base_addr
= (entry_1
>> 16) |
3752 (entry_2
& 0xff000000) |
3753 ((entry_2
& 0xff) << 16);
3754 target_ldt_info
->base_addr
= tswapl(base_addr
);
3755 target_ldt_info
->limit
= tswap32(limit
);
3756 target_ldt_info
->flags
= tswap32(flags
);
3757 unlock_user_struct(target_ldt_info
, ptr
, 1);
3760 #endif /* TARGET_I386 && TARGET_ABI32 */
3762 #ifndef TARGET_ABI32
3763 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3770 case TARGET_ARCH_SET_GS
:
3771 case TARGET_ARCH_SET_FS
:
3772 if (code
== TARGET_ARCH_SET_GS
)
3776 cpu_x86_load_seg(env
, idx
, 0);
3777 env
->segs
[idx
].base
= addr
;
3779 case TARGET_ARCH_GET_GS
:
3780 case TARGET_ARCH_GET_FS
:
3781 if (code
== TARGET_ARCH_GET_GS
)
3785 val
= env
->segs
[idx
].base
;
3786 if (put_user(val
, addr
, abi_ulong
))
3787 return -TARGET_EFAULT
;
3790 ret
= -TARGET_EINVAL
;
3797 #endif /* defined(TARGET_I386) */
3799 #define NEW_STACK_SIZE 0x40000
3801 #if defined(CONFIG_USE_NPTL)
3803 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3806 pthread_mutex_t mutex
;
3807 pthread_cond_t cond
;
3810 abi_ulong child_tidptr
;
3811 abi_ulong parent_tidptr
;
3815 static void *clone_func(void *arg
)
3817 new_thread_info
*info
= arg
;
3823 ts
= (TaskState
*)thread_env
->opaque
;
3824 info
->tid
= gettid();
3825 env
->host_tid
= info
->tid
;
3827 if (info
->child_tidptr
)
3828 put_user_u32(info
->tid
, info
->child_tidptr
);
3829 if (info
->parent_tidptr
)
3830 put_user_u32(info
->tid
, info
->parent_tidptr
);
3831 /* Enable signals. */
3832 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3833 /* Signal to the parent that we're ready. */
3834 pthread_mutex_lock(&info
->mutex
);
3835 pthread_cond_broadcast(&info
->cond
);
3836 pthread_mutex_unlock(&info
->mutex
);
3837 /* Wait until the parent has finshed initializing the tls state. */
3838 pthread_mutex_lock(&clone_lock
);
3839 pthread_mutex_unlock(&clone_lock
);
3846 static int clone_func(void *arg
)
3848 CPUState
*env
= arg
;
3855 /* do_fork() Must return host values and target errnos (unlike most
3856 do_*() functions). */
3857 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3858 abi_ulong parent_tidptr
, target_ulong newtls
,
3859 abi_ulong child_tidptr
)
3864 #if defined(CONFIG_USE_NPTL)
3865 unsigned int nptl_flags
;
3871 /* Emulate vfork() with fork() */
3872 if (flags
& CLONE_VFORK
)
3873 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3875 if (flags
& CLONE_VM
) {
3876 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3877 #if defined(CONFIG_USE_NPTL)
3878 new_thread_info info
;
3879 pthread_attr_t attr
;
3881 ts
= qemu_mallocz(sizeof(TaskState
));
3882 init_task_state(ts
);
3883 /* we create a new CPU instance. */
3884 new_env
= cpu_copy(env
);
3885 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3888 /* Init regs that differ from the parent. */
3889 cpu_clone_regs(new_env
, newsp
);
3890 new_env
->opaque
= ts
;
3891 ts
->bprm
= parent_ts
->bprm
;
3892 ts
->info
= parent_ts
->info
;
3893 #if defined(CONFIG_USE_NPTL)
3895 flags
&= ~CLONE_NPTL_FLAGS2
;
3897 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3898 ts
->child_tidptr
= child_tidptr
;
3901 if (nptl_flags
& CLONE_SETTLS
)
3902 cpu_set_tls (new_env
, newtls
);
3904 /* Grab a mutex so that thread setup appears atomic. */
3905 pthread_mutex_lock(&clone_lock
);
3907 memset(&info
, 0, sizeof(info
));
3908 pthread_mutex_init(&info
.mutex
, NULL
);
3909 pthread_mutex_lock(&info
.mutex
);
3910 pthread_cond_init(&info
.cond
, NULL
);
3912 if (nptl_flags
& CLONE_CHILD_SETTID
)
3913 info
.child_tidptr
= child_tidptr
;
3914 if (nptl_flags
& CLONE_PARENT_SETTID
)
3915 info
.parent_tidptr
= parent_tidptr
;
3917 ret
= pthread_attr_init(&attr
);
3918 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
3919 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
3920 /* It is not safe to deliver signals until the child has finished
3921 initializing, so temporarily block all signals. */
3922 sigfillset(&sigmask
);
3923 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3925 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3926 /* TODO: Free new CPU state if thread creation failed. */
3928 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3929 pthread_attr_destroy(&attr
);
3931 /* Wait for the child to initialize. */
3932 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3934 if (flags
& CLONE_PARENT_SETTID
)
3935 put_user_u32(ret
, parent_tidptr
);
3939 pthread_mutex_unlock(&info
.mutex
);
3940 pthread_cond_destroy(&info
.cond
);
3941 pthread_mutex_destroy(&info
.mutex
);
3942 pthread_mutex_unlock(&clone_lock
);
3944 if (flags
& CLONE_NPTL_FLAGS2
)
3946 /* This is probably going to die very quickly, but do it anyway. */
3947 new_stack
= qemu_mallocz (NEW_STACK_SIZE
);
3949 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
3951 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3955 /* if no CLONE_VM, we consider it is a fork */
3956 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3961 /* Child Process. */
3962 cpu_clone_regs(env
, newsp
);
3964 #if defined(CONFIG_USE_NPTL)
3965 /* There is a race condition here. The parent process could
3966 theoretically read the TID in the child process before the child
3967 tid is set. This would require using either ptrace
3968 (not implemented) or having *_tidptr to point at a shared memory
3969 mapping. We can't repeat the spinlock hack used above because
3970 the child process gets its own copy of the lock. */
3971 if (flags
& CLONE_CHILD_SETTID
)
3972 put_user_u32(gettid(), child_tidptr
);
3973 if (flags
& CLONE_PARENT_SETTID
)
3974 put_user_u32(gettid(), parent_tidptr
);
3975 ts
= (TaskState
*)env
->opaque
;
3976 if (flags
& CLONE_SETTLS
)
3977 cpu_set_tls (env
, newtls
);
3978 if (flags
& CLONE_CHILD_CLEARTID
)
3979 ts
->child_tidptr
= child_tidptr
;
3988 /* warning : doesn't handle linux specific flags... */
3989 static int target_to_host_fcntl_cmd(int cmd
)
3992 case TARGET_F_DUPFD
:
3993 case TARGET_F_GETFD
:
3994 case TARGET_F_SETFD
:
3995 case TARGET_F_GETFL
:
3996 case TARGET_F_SETFL
:
3998 case TARGET_F_GETLK
:
4000 case TARGET_F_SETLK
:
4002 case TARGET_F_SETLKW
:
4004 case TARGET_F_GETOWN
:
4006 case TARGET_F_SETOWN
:
4008 case TARGET_F_GETSIG
:
4010 case TARGET_F_SETSIG
:
4012 #if TARGET_ABI_BITS == 32
4013 case TARGET_F_GETLK64
:
4015 case TARGET_F_SETLK64
:
4017 case TARGET_F_SETLKW64
:
4020 case TARGET_F_SETLEASE
:
4022 case TARGET_F_GETLEASE
:
4024 #ifdef F_DUPFD_CLOEXEC
4025 case TARGET_F_DUPFD_CLOEXEC
:
4026 return F_DUPFD_CLOEXEC
;
4028 case TARGET_F_NOTIFY
:
4031 return -TARGET_EINVAL
;
4033 return -TARGET_EINVAL
;
4036 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4039 struct target_flock
*target_fl
;
4040 struct flock64 fl64
;
4041 struct target_flock64
*target_fl64
;
4043 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4045 if (host_cmd
== -TARGET_EINVAL
)
4049 case TARGET_F_GETLK
:
4050 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4051 return -TARGET_EFAULT
;
4052 fl
.l_type
= tswap16(target_fl
->l_type
);
4053 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4054 fl
.l_start
= tswapl(target_fl
->l_start
);
4055 fl
.l_len
= tswapl(target_fl
->l_len
);
4056 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4057 unlock_user_struct(target_fl
, arg
, 0);
4058 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4060 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4061 return -TARGET_EFAULT
;
4062 target_fl
->l_type
= tswap16(fl
.l_type
);
4063 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4064 target_fl
->l_start
= tswapl(fl
.l_start
);
4065 target_fl
->l_len
= tswapl(fl
.l_len
);
4066 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4067 unlock_user_struct(target_fl
, arg
, 1);
4071 case TARGET_F_SETLK
:
4072 case TARGET_F_SETLKW
:
4073 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4074 return -TARGET_EFAULT
;
4075 fl
.l_type
= tswap16(target_fl
->l_type
);
4076 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4077 fl
.l_start
= tswapl(target_fl
->l_start
);
4078 fl
.l_len
= tswapl(target_fl
->l_len
);
4079 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4080 unlock_user_struct(target_fl
, arg
, 0);
4081 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4084 case TARGET_F_GETLK64
:
4085 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4086 return -TARGET_EFAULT
;
4087 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4088 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4089 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4090 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4091 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4092 unlock_user_struct(target_fl64
, arg
, 0);
4093 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4095 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4096 return -TARGET_EFAULT
;
4097 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4098 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4099 target_fl64
->l_start
= tswapl(fl64
.l_start
);
4100 target_fl64
->l_len
= tswapl(fl64
.l_len
);
4101 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4102 unlock_user_struct(target_fl64
, arg
, 1);
4105 case TARGET_F_SETLK64
:
4106 case TARGET_F_SETLKW64
:
4107 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4108 return -TARGET_EFAULT
;
4109 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4110 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4111 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4112 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4113 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4114 unlock_user_struct(target_fl64
, arg
, 0);
4115 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4118 case TARGET_F_GETFL
:
4119 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4121 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4125 case TARGET_F_SETFL
:
4126 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4129 case TARGET_F_SETOWN
:
4130 case TARGET_F_GETOWN
:
4131 case TARGET_F_SETSIG
:
4132 case TARGET_F_GETSIG
:
4133 case TARGET_F_SETLEASE
:
4134 case TARGET_F_GETLEASE
:
4135 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4139 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4147 static inline int high2lowuid(int uid
)
4155 static inline int high2lowgid(int gid
)
4163 static inline int low2highuid(int uid
)
4165 if ((int16_t)uid
== -1)
4171 static inline int low2highgid(int gid
)
4173 if ((int16_t)gid
== -1)
4178 static inline int tswapid(int id
)
4182 #else /* !USE_UID16 */
4183 static inline int high2lowuid(int uid
)
4187 static inline int high2lowgid(int gid
)
4191 static inline int low2highuid(int uid
)
4195 static inline int low2highgid(int gid
)
4199 static inline int tswapid(int id
)
4203 #endif /* USE_UID16 */
4205 void syscall_init(void)
4208 const argtype
*arg_type
;
4212 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4213 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4214 #include "syscall_types.h"
4216 #undef STRUCT_SPECIAL
4218 /* we patch the ioctl size if necessary. We rely on the fact that
4219 no ioctl has all the bits at '1' in the size field */
4221 while (ie
->target_cmd
!= 0) {
4222 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4223 TARGET_IOC_SIZEMASK
) {
4224 arg_type
= ie
->arg_type
;
4225 if (arg_type
[0] != TYPE_PTR
) {
4226 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4231 size
= thunk_type_size(arg_type
, 0);
4232 ie
->target_cmd
= (ie
->target_cmd
&
4233 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4234 (size
<< TARGET_IOC_SIZESHIFT
);
4237 /* Build target_to_host_errno_table[] table from
4238 * host_to_target_errno_table[]. */
4239 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4240 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4242 /* automatic consistency check if same arch */
4243 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4244 (defined(__x86_64__) && defined(TARGET_X86_64))
4245 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4246 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4247 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4254 #if TARGET_ABI_BITS == 32
4255 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4257 #ifdef TARGET_WORDS_BIGENDIAN
4258 return ((uint64_t)word0
<< 32) | word1
;
4260 return ((uint64_t)word1
<< 32) | word0
;
4263 #else /* TARGET_ABI_BITS == 32 */
4264 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4268 #endif /* TARGET_ABI_BITS != 32 */
4270 #ifdef TARGET_NR_truncate64
4271 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4277 if (((CPUARMState
*)cpu_env
)->eabi
)
4283 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4287 #ifdef TARGET_NR_ftruncate64
4288 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4294 if (((CPUARMState
*)cpu_env
)->eabi
)
4300 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4304 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4305 abi_ulong target_addr
)
4307 struct target_timespec
*target_ts
;
4309 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4310 return -TARGET_EFAULT
;
4311 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
4312 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
4313 unlock_user_struct(target_ts
, target_addr
, 0);
4317 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4318 struct timespec
*host_ts
)
4320 struct target_timespec
*target_ts
;
4322 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4323 return -TARGET_EFAULT
;
4324 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
4325 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
4326 unlock_user_struct(target_ts
, target_addr
, 1);
4330 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4331 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4332 abi_ulong target_addr
,
4333 struct stat
*host_st
)
4336 if (((CPUARMState
*)cpu_env
)->eabi
) {
4337 struct target_eabi_stat64
*target_st
;
4339 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4340 return -TARGET_EFAULT
;
4341 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4342 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4343 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4344 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4345 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4347 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4348 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4349 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4350 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4351 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4352 __put_user(host_st
->st_size
, &target_st
->st_size
);
4353 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4354 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4355 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4356 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4357 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4358 unlock_user_struct(target_st
, target_addr
, 1);
4362 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4363 struct target_stat
*target_st
;
4365 struct target_stat64
*target_st
;
4368 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4369 return -TARGET_EFAULT
;
4370 memset(target_st
, 0, sizeof(*target_st
));
4371 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4372 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4373 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4374 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4376 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4377 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4378 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4379 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4380 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4381 /* XXX: better use of kernel struct */
4382 __put_user(host_st
->st_size
, &target_st
->st_size
);
4383 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4384 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4385 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4386 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4387 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4388 unlock_user_struct(target_st
, target_addr
, 1);
4395 #if defined(CONFIG_USE_NPTL)
4396 /* ??? Using host futex calls even when target atomic operations
4397 are not really atomic probably breaks things. However implementing
4398 futexes locally would make futexes shared between multiple processes
4399 tricky. However they're probably useless because guest atomic
4400 operations won't work either. */
4401 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4402 target_ulong uaddr2
, int val3
)
4404 struct timespec ts
, *pts
;
4407 /* ??? We assume FUTEX_* constants are the same on both host
4409 #ifdef FUTEX_CMD_MASK
4410 base_op
= op
& FUTEX_CMD_MASK
;
4418 target_to_host_timespec(pts
, timeout
);
4422 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4425 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4427 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4429 case FUTEX_CMP_REQUEUE
:
4431 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4432 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4433 But the prototype takes a `struct timespec *'; insert casts
4434 to satisfy the compiler. We do not need to tswap TIMEOUT
4435 since it's not compared to guest memory. */
4436 pts
= (struct timespec
*)(uintptr_t) timeout
;
4437 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4439 (base_op
== FUTEX_CMP_REQUEUE
4443 return -TARGET_ENOSYS
;
4448 /* Map host to target signal numbers for the wait family of syscalls.
4449 Assume all other status bits are the same. */
4450 static int host_to_target_waitstatus(int status
)
4452 if (WIFSIGNALED(status
)) {
4453 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4455 if (WIFSTOPPED(status
)) {
4456 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4462 int get_osversion(void)
4464 static int osversion
;
4465 struct new_utsname buf
;
4470 if (qemu_uname_release
&& *qemu_uname_release
) {
4471 s
= qemu_uname_release
;
4473 if (sys_uname(&buf
))
4478 for (i
= 0; i
< 3; i
++) {
4480 while (*s
>= '0' && *s
<= '9') {
4485 tmp
= (tmp
<< 8) + n
;
4493 /* do_syscall() should always have a single exit point at the end so
4494 that actions, such as logging of syscall results, can be performed.
4495 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4496 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4497 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4498 abi_long arg5
, abi_long arg6
)
4506 gemu_log("syscall %d", num
);
4509 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4512 case TARGET_NR_exit
:
4513 #ifdef CONFIG_USE_NPTL
4514 /* In old applications this may be used to implement _exit(2).
4515 However in threaded applictions it is used for thread termination,
4516 and _exit_group is used for application termination.
4517 Do thread termination if we have more then one thread. */
4518 /* FIXME: This probably breaks if a signal arrives. We should probably
4519 be disabling signals. */
4520 if (first_cpu
->next_cpu
) {
4528 while (p
&& p
!= (CPUState
*)cpu_env
) {
4529 lastp
= &p
->next_cpu
;
4532 /* If we didn't find the CPU for this thread then something is
4536 /* Remove the CPU from the list. */
4537 *lastp
= p
->next_cpu
;
4539 ts
= ((CPUState
*)cpu_env
)->opaque
;
4540 if (ts
->child_tidptr
) {
4541 put_user_u32(0, ts
->child_tidptr
);
4542 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4554 gdb_exit(cpu_env
, arg1
);
4556 ret
= 0; /* avoid warning */
4558 case TARGET_NR_read
:
4562 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4564 ret
= get_errno(read(arg1
, p
, arg3
));
4565 unlock_user(p
, arg2
, ret
);
4568 case TARGET_NR_write
:
4569 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4571 ret
= get_errno(write(arg1
, p
, arg3
));
4572 unlock_user(p
, arg2
, 0);
4574 case TARGET_NR_open
:
4575 if (!(p
= lock_user_string(arg1
)))
4577 ret
= get_errno(open(path(p
),
4578 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4580 unlock_user(p
, arg1
, 0);
4582 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4583 case TARGET_NR_openat
:
4584 if (!(p
= lock_user_string(arg2
)))
4586 ret
= get_errno(sys_openat(arg1
,
4588 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4590 unlock_user(p
, arg2
, 0);
4593 case TARGET_NR_close
:
4594 ret
= get_errno(close(arg1
));
4599 case TARGET_NR_fork
:
4600 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4602 #ifdef TARGET_NR_waitpid
4603 case TARGET_NR_waitpid
:
4606 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4607 if (!is_error(ret
) && arg2
4608 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4613 #ifdef TARGET_NR_waitid
4614 case TARGET_NR_waitid
:
4618 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4619 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4620 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4622 host_to_target_siginfo(p
, &info
);
4623 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4628 #ifdef TARGET_NR_creat /* not on alpha */
4629 case TARGET_NR_creat
:
4630 if (!(p
= lock_user_string(arg1
)))
4632 ret
= get_errno(creat(p
, arg2
));
4633 unlock_user(p
, arg1
, 0);
4636 case TARGET_NR_link
:
4639 p
= lock_user_string(arg1
);
4640 p2
= lock_user_string(arg2
);
4642 ret
= -TARGET_EFAULT
;
4644 ret
= get_errno(link(p
, p2
));
4645 unlock_user(p2
, arg2
, 0);
4646 unlock_user(p
, arg1
, 0);
4649 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4650 case TARGET_NR_linkat
:
4655 p
= lock_user_string(arg2
);
4656 p2
= lock_user_string(arg4
);
4658 ret
= -TARGET_EFAULT
;
4660 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4661 unlock_user(p
, arg2
, 0);
4662 unlock_user(p2
, arg4
, 0);
4666 case TARGET_NR_unlink
:
4667 if (!(p
= lock_user_string(arg1
)))
4669 ret
= get_errno(unlink(p
));
4670 unlock_user(p
, arg1
, 0);
4672 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4673 case TARGET_NR_unlinkat
:
4674 if (!(p
= lock_user_string(arg2
)))
4676 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4677 unlock_user(p
, arg2
, 0);
4680 case TARGET_NR_execve
:
4682 char **argp
, **envp
;
4685 abi_ulong guest_argp
;
4686 abi_ulong guest_envp
;
4692 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4693 if (get_user_ual(addr
, gp
))
4701 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4702 if (get_user_ual(addr
, gp
))
4709 argp
= alloca((argc
+ 1) * sizeof(void *));
4710 envp
= alloca((envc
+ 1) * sizeof(void *));
4712 for (gp
= guest_argp
, q
= argp
; gp
;
4713 gp
+= sizeof(abi_ulong
), q
++) {
4714 if (get_user_ual(addr
, gp
))
4718 if (!(*q
= lock_user_string(addr
)))
4723 for (gp
= guest_envp
, q
= envp
; gp
;
4724 gp
+= sizeof(abi_ulong
), q
++) {
4725 if (get_user_ual(addr
, gp
))
4729 if (!(*q
= lock_user_string(addr
)))
4734 if (!(p
= lock_user_string(arg1
)))
4736 ret
= get_errno(execve(p
, argp
, envp
));
4737 unlock_user(p
, arg1
, 0);
4742 ret
= -TARGET_EFAULT
;
4745 for (gp
= guest_argp
, q
= argp
; *q
;
4746 gp
+= sizeof(abi_ulong
), q
++) {
4747 if (get_user_ual(addr
, gp
)
4750 unlock_user(*q
, addr
, 0);
4752 for (gp
= guest_envp
, q
= envp
; *q
;
4753 gp
+= sizeof(abi_ulong
), q
++) {
4754 if (get_user_ual(addr
, gp
)
4757 unlock_user(*q
, addr
, 0);
4761 case TARGET_NR_chdir
:
4762 if (!(p
= lock_user_string(arg1
)))
4764 ret
= get_errno(chdir(p
));
4765 unlock_user(p
, arg1
, 0);
4767 #ifdef TARGET_NR_time
4768 case TARGET_NR_time
:
4771 ret
= get_errno(time(&host_time
));
4774 && put_user_sal(host_time
, arg1
))
4779 case TARGET_NR_mknod
:
4780 if (!(p
= lock_user_string(arg1
)))
4782 ret
= get_errno(mknod(p
, arg2
, arg3
));
4783 unlock_user(p
, arg1
, 0);
4785 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4786 case TARGET_NR_mknodat
:
4787 if (!(p
= lock_user_string(arg2
)))
4789 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4790 unlock_user(p
, arg2
, 0);
4793 case TARGET_NR_chmod
:
4794 if (!(p
= lock_user_string(arg1
)))
4796 ret
= get_errno(chmod(p
, arg2
));
4797 unlock_user(p
, arg1
, 0);
4799 #ifdef TARGET_NR_break
4800 case TARGET_NR_break
:
4803 #ifdef TARGET_NR_oldstat
4804 case TARGET_NR_oldstat
:
4807 case TARGET_NR_lseek
:
4808 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4810 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4811 /* Alpha specific */
4812 case TARGET_NR_getxpid
:
4813 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4814 ret
= get_errno(getpid());
4817 #ifdef TARGET_NR_getpid
4818 case TARGET_NR_getpid
:
4819 ret
= get_errno(getpid());
4822 case TARGET_NR_mount
:
4824 /* need to look at the data field */
4826 p
= lock_user_string(arg1
);
4827 p2
= lock_user_string(arg2
);
4828 p3
= lock_user_string(arg3
);
4829 if (!p
|| !p2
|| !p3
)
4830 ret
= -TARGET_EFAULT
;
4832 /* FIXME - arg5 should be locked, but it isn't clear how to
4833 * do that since it's not guaranteed to be a NULL-terminated
4837 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4839 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4841 unlock_user(p
, arg1
, 0);
4842 unlock_user(p2
, arg2
, 0);
4843 unlock_user(p3
, arg3
, 0);
4846 #ifdef TARGET_NR_umount
4847 case TARGET_NR_umount
:
4848 if (!(p
= lock_user_string(arg1
)))
4850 ret
= get_errno(umount(p
));
4851 unlock_user(p
, arg1
, 0);
4854 #ifdef TARGET_NR_stime /* not on alpha */
4855 case TARGET_NR_stime
:
4858 if (get_user_sal(host_time
, arg1
))
4860 ret
= get_errno(stime(&host_time
));
4864 case TARGET_NR_ptrace
:
4866 #ifdef TARGET_NR_alarm /* not on alpha */
4867 case TARGET_NR_alarm
:
4871 #ifdef TARGET_NR_oldfstat
4872 case TARGET_NR_oldfstat
:
4875 #ifdef TARGET_NR_pause /* not on alpha */
4876 case TARGET_NR_pause
:
4877 ret
= get_errno(pause());
4880 #ifdef TARGET_NR_utime
4881 case TARGET_NR_utime
:
4883 struct utimbuf tbuf
, *host_tbuf
;
4884 struct target_utimbuf
*target_tbuf
;
4886 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4888 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4889 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4890 unlock_user_struct(target_tbuf
, arg2
, 0);
4895 if (!(p
= lock_user_string(arg1
)))
4897 ret
= get_errno(utime(p
, host_tbuf
));
4898 unlock_user(p
, arg1
, 0);
4902 case TARGET_NR_utimes
:
4904 struct timeval
*tvp
, tv
[2];
4906 if (copy_from_user_timeval(&tv
[0], arg2
)
4907 || copy_from_user_timeval(&tv
[1],
4908 arg2
+ sizeof(struct target_timeval
)))
4914 if (!(p
= lock_user_string(arg1
)))
4916 ret
= get_errno(utimes(p
, tvp
));
4917 unlock_user(p
, arg1
, 0);
4920 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4921 case TARGET_NR_futimesat
:
4923 struct timeval
*tvp
, tv
[2];
4925 if (copy_from_user_timeval(&tv
[0], arg3
)
4926 || copy_from_user_timeval(&tv
[1],
4927 arg3
+ sizeof(struct target_timeval
)))
4933 if (!(p
= lock_user_string(arg2
)))
4935 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4936 unlock_user(p
, arg2
, 0);
4940 #ifdef TARGET_NR_stty
4941 case TARGET_NR_stty
:
4944 #ifdef TARGET_NR_gtty
4945 case TARGET_NR_gtty
:
4948 case TARGET_NR_access
:
4949 if (!(p
= lock_user_string(arg1
)))
4951 ret
= get_errno(access(path(p
), arg2
));
4952 unlock_user(p
, arg1
, 0);
4954 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4955 case TARGET_NR_faccessat
:
4956 if (!(p
= lock_user_string(arg2
)))
4958 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4959 unlock_user(p
, arg2
, 0);
4962 #ifdef TARGET_NR_nice /* not on alpha */
4963 case TARGET_NR_nice
:
4964 ret
= get_errno(nice(arg1
));
4967 #ifdef TARGET_NR_ftime
4968 case TARGET_NR_ftime
:
4971 case TARGET_NR_sync
:
4975 case TARGET_NR_kill
:
4976 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4978 case TARGET_NR_rename
:
4981 p
= lock_user_string(arg1
);
4982 p2
= lock_user_string(arg2
);
4984 ret
= -TARGET_EFAULT
;
4986 ret
= get_errno(rename(p
, p2
));
4987 unlock_user(p2
, arg2
, 0);
4988 unlock_user(p
, arg1
, 0);
4991 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4992 case TARGET_NR_renameat
:
4995 p
= lock_user_string(arg2
);
4996 p2
= lock_user_string(arg4
);
4998 ret
= -TARGET_EFAULT
;
5000 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5001 unlock_user(p2
, arg4
, 0);
5002 unlock_user(p
, arg2
, 0);
5006 case TARGET_NR_mkdir
:
5007 if (!(p
= lock_user_string(arg1
)))
5009 ret
= get_errno(mkdir(p
, arg2
));
5010 unlock_user(p
, arg1
, 0);
5012 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5013 case TARGET_NR_mkdirat
:
5014 if (!(p
= lock_user_string(arg2
)))
5016 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5017 unlock_user(p
, arg2
, 0);
5020 case TARGET_NR_rmdir
:
5021 if (!(p
= lock_user_string(arg1
)))
5023 ret
= get_errno(rmdir(p
));
5024 unlock_user(p
, arg1
, 0);
5027 ret
= get_errno(dup(arg1
));
5029 case TARGET_NR_pipe
:
5030 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5032 #ifdef TARGET_NR_pipe2
5033 case TARGET_NR_pipe2
:
5034 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5037 case TARGET_NR_times
:
5039 struct target_tms
*tmsp
;
5041 ret
= get_errno(times(&tms
));
5043 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5046 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
5047 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
5048 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
5049 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
5052 ret
= host_to_target_clock_t(ret
);
5055 #ifdef TARGET_NR_prof
5056 case TARGET_NR_prof
:
5059 #ifdef TARGET_NR_signal
5060 case TARGET_NR_signal
:
5063 case TARGET_NR_acct
:
5065 ret
= get_errno(acct(NULL
));
5067 if (!(p
= lock_user_string(arg1
)))
5069 ret
= get_errno(acct(path(p
)));
5070 unlock_user(p
, arg1
, 0);
5073 #ifdef TARGET_NR_umount2 /* not on alpha */
5074 case TARGET_NR_umount2
:
5075 if (!(p
= lock_user_string(arg1
)))
5077 ret
= get_errno(umount2(p
, arg2
));
5078 unlock_user(p
, arg1
, 0);
5081 #ifdef TARGET_NR_lock
5082 case TARGET_NR_lock
:
5085 case TARGET_NR_ioctl
:
5086 ret
= do_ioctl(arg1
, arg2
, arg3
);
5088 case TARGET_NR_fcntl
:
5089 ret
= do_fcntl(arg1
, arg2
, arg3
);
5091 #ifdef TARGET_NR_mpx
5095 case TARGET_NR_setpgid
:
5096 ret
= get_errno(setpgid(arg1
, arg2
));
5098 #ifdef TARGET_NR_ulimit
5099 case TARGET_NR_ulimit
:
5102 #ifdef TARGET_NR_oldolduname
5103 case TARGET_NR_oldolduname
:
5106 case TARGET_NR_umask
:
5107 ret
= get_errno(umask(arg1
));
5109 case TARGET_NR_chroot
:
5110 if (!(p
= lock_user_string(arg1
)))
5112 ret
= get_errno(chroot(p
));
5113 unlock_user(p
, arg1
, 0);
5115 case TARGET_NR_ustat
:
5117 case TARGET_NR_dup2
:
5118 ret
= get_errno(dup2(arg1
, arg2
));
5120 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5121 case TARGET_NR_dup3
:
5122 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5125 #ifdef TARGET_NR_getppid /* not on alpha */
5126 case TARGET_NR_getppid
:
5127 ret
= get_errno(getppid());
5130 case TARGET_NR_getpgrp
:
5131 ret
= get_errno(getpgrp());
5133 case TARGET_NR_setsid
:
5134 ret
= get_errno(setsid());
5136 #ifdef TARGET_NR_sigaction
5137 case TARGET_NR_sigaction
:
5139 #if defined(TARGET_ALPHA)
5140 struct target_sigaction act
, oact
, *pact
= 0;
5141 struct target_old_sigaction
*old_act
;
5143 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5145 act
._sa_handler
= old_act
->_sa_handler
;
5146 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5147 act
.sa_flags
= old_act
->sa_flags
;
5148 act
.sa_restorer
= 0;
5149 unlock_user_struct(old_act
, arg2
, 0);
5152 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5153 if (!is_error(ret
) && arg3
) {
5154 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5156 old_act
->_sa_handler
= oact
._sa_handler
;
5157 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5158 old_act
->sa_flags
= oact
.sa_flags
;
5159 unlock_user_struct(old_act
, arg3
, 1);
5161 #elif defined(TARGET_MIPS)
5162 struct target_sigaction act
, oact
, *pact
, *old_act
;
5165 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5167 act
._sa_handler
= old_act
->_sa_handler
;
5168 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5169 act
.sa_flags
= old_act
->sa_flags
;
5170 unlock_user_struct(old_act
, arg2
, 0);
5176 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5178 if (!is_error(ret
) && arg3
) {
5179 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5181 old_act
->_sa_handler
= oact
._sa_handler
;
5182 old_act
->sa_flags
= oact
.sa_flags
;
5183 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5184 old_act
->sa_mask
.sig
[1] = 0;
5185 old_act
->sa_mask
.sig
[2] = 0;
5186 old_act
->sa_mask
.sig
[3] = 0;
5187 unlock_user_struct(old_act
, arg3
, 1);
5190 struct target_old_sigaction
*old_act
;
5191 struct target_sigaction act
, oact
, *pact
;
5193 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5195 act
._sa_handler
= old_act
->_sa_handler
;
5196 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5197 act
.sa_flags
= old_act
->sa_flags
;
5198 act
.sa_restorer
= old_act
->sa_restorer
;
5199 unlock_user_struct(old_act
, arg2
, 0);
5204 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5205 if (!is_error(ret
) && arg3
) {
5206 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5208 old_act
->_sa_handler
= oact
._sa_handler
;
5209 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5210 old_act
->sa_flags
= oact
.sa_flags
;
5211 old_act
->sa_restorer
= oact
.sa_restorer
;
5212 unlock_user_struct(old_act
, arg3
, 1);
5218 case TARGET_NR_rt_sigaction
:
5220 #if defined(TARGET_ALPHA)
5221 struct target_sigaction act
, oact
, *pact
= 0;
5222 struct target_rt_sigaction
*rt_act
;
5223 /* ??? arg4 == sizeof(sigset_t). */
5225 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5227 act
._sa_handler
= rt_act
->_sa_handler
;
5228 act
.sa_mask
= rt_act
->sa_mask
;
5229 act
.sa_flags
= rt_act
->sa_flags
;
5230 act
.sa_restorer
= arg5
;
5231 unlock_user_struct(rt_act
, arg2
, 0);
5234 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5235 if (!is_error(ret
) && arg3
) {
5236 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5238 rt_act
->_sa_handler
= oact
._sa_handler
;
5239 rt_act
->sa_mask
= oact
.sa_mask
;
5240 rt_act
->sa_flags
= oact
.sa_flags
;
5241 unlock_user_struct(rt_act
, arg3
, 1);
5244 struct target_sigaction
*act
;
5245 struct target_sigaction
*oact
;
5248 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5253 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5254 ret
= -TARGET_EFAULT
;
5255 goto rt_sigaction_fail
;
5259 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5262 unlock_user_struct(act
, arg2
, 0);
5264 unlock_user_struct(oact
, arg3
, 1);
5268 #ifdef TARGET_NR_sgetmask /* not on alpha */
5269 case TARGET_NR_sgetmask
:
5272 abi_ulong target_set
;
5273 sigprocmask(0, NULL
, &cur_set
);
5274 host_to_target_old_sigset(&target_set
, &cur_set
);
5279 #ifdef TARGET_NR_ssetmask /* not on alpha */
5280 case TARGET_NR_ssetmask
:
5282 sigset_t set
, oset
, cur_set
;
5283 abi_ulong target_set
= arg1
;
5284 sigprocmask(0, NULL
, &cur_set
);
5285 target_to_host_old_sigset(&set
, &target_set
);
5286 sigorset(&set
, &set
, &cur_set
);
5287 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5288 host_to_target_old_sigset(&target_set
, &oset
);
5293 #ifdef TARGET_NR_sigprocmask
5294 case TARGET_NR_sigprocmask
:
5296 #if defined(TARGET_ALPHA)
5297 sigset_t set
, oldset
;
5302 case TARGET_SIG_BLOCK
:
5305 case TARGET_SIG_UNBLOCK
:
5308 case TARGET_SIG_SETMASK
:
5312 ret
= -TARGET_EINVAL
;
5316 target_to_host_old_sigset(&set
, &mask
);
5318 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5320 if (!is_error(ret
)) {
5321 host_to_target_old_sigset(&mask
, &oldset
);
5323 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5326 sigset_t set
, oldset
, *set_ptr
;
5331 case TARGET_SIG_BLOCK
:
5334 case TARGET_SIG_UNBLOCK
:
5337 case TARGET_SIG_SETMASK
:
5341 ret
= -TARGET_EINVAL
;
5344 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5346 target_to_host_old_sigset(&set
, p
);
5347 unlock_user(p
, arg2
, 0);
5353 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5354 if (!is_error(ret
) && arg3
) {
5355 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5357 host_to_target_old_sigset(p
, &oldset
);
5358 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5364 case TARGET_NR_rt_sigprocmask
:
5367 sigset_t set
, oldset
, *set_ptr
;
5371 case TARGET_SIG_BLOCK
:
5374 case TARGET_SIG_UNBLOCK
:
5377 case TARGET_SIG_SETMASK
:
5381 ret
= -TARGET_EINVAL
;
5384 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5386 target_to_host_sigset(&set
, p
);
5387 unlock_user(p
, arg2
, 0);
5393 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5394 if (!is_error(ret
) && arg3
) {
5395 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5397 host_to_target_sigset(p
, &oldset
);
5398 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5402 #ifdef TARGET_NR_sigpending
5403 case TARGET_NR_sigpending
:
5406 ret
= get_errno(sigpending(&set
));
5407 if (!is_error(ret
)) {
5408 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5410 host_to_target_old_sigset(p
, &set
);
5411 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5416 case TARGET_NR_rt_sigpending
:
5419 ret
= get_errno(sigpending(&set
));
5420 if (!is_error(ret
)) {
5421 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5423 host_to_target_sigset(p
, &set
);
5424 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5428 #ifdef TARGET_NR_sigsuspend
5429 case TARGET_NR_sigsuspend
:
5432 #if defined(TARGET_ALPHA)
5433 abi_ulong mask
= arg1
;
5434 target_to_host_old_sigset(&set
, &mask
);
5436 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5438 target_to_host_old_sigset(&set
, p
);
5439 unlock_user(p
, arg1
, 0);
5441 ret
= get_errno(sigsuspend(&set
));
5445 case TARGET_NR_rt_sigsuspend
:
5448 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5450 target_to_host_sigset(&set
, p
);
5451 unlock_user(p
, arg1
, 0);
5452 ret
= get_errno(sigsuspend(&set
));
5455 case TARGET_NR_rt_sigtimedwait
:
5458 struct timespec uts
, *puts
;
5461 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5463 target_to_host_sigset(&set
, p
);
5464 unlock_user(p
, arg1
, 0);
5467 target_to_host_timespec(puts
, arg3
);
5471 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5472 if (!is_error(ret
) && arg2
) {
5473 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5475 host_to_target_siginfo(p
, &uinfo
);
5476 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5480 case TARGET_NR_rt_sigqueueinfo
:
5483 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5485 target_to_host_siginfo(&uinfo
, p
);
5486 unlock_user(p
, arg1
, 0);
5487 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5490 #ifdef TARGET_NR_sigreturn
5491 case TARGET_NR_sigreturn
:
5492 /* NOTE: ret is eax, so not transcoding must be done */
5493 ret
= do_sigreturn(cpu_env
);
5496 case TARGET_NR_rt_sigreturn
:
5497 /* NOTE: ret is eax, so not transcoding must be done */
5498 ret
= do_rt_sigreturn(cpu_env
);
5500 case TARGET_NR_sethostname
:
5501 if (!(p
= lock_user_string(arg1
)))
5503 ret
= get_errno(sethostname(p
, arg2
));
5504 unlock_user(p
, arg1
, 0);
5506 case TARGET_NR_setrlimit
:
5508 int resource
= arg1
;
5509 struct target_rlimit
*target_rlim
;
5511 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5513 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5514 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5515 unlock_user_struct(target_rlim
, arg2
, 0);
5516 ret
= get_errno(setrlimit(resource
, &rlim
));
5519 case TARGET_NR_getrlimit
:
5521 int resource
= arg1
;
5522 struct target_rlimit
*target_rlim
;
5525 ret
= get_errno(getrlimit(resource
, &rlim
));
5526 if (!is_error(ret
)) {
5527 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5529 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5530 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5531 unlock_user_struct(target_rlim
, arg2
, 1);
5535 case TARGET_NR_getrusage
:
5537 struct rusage rusage
;
5538 ret
= get_errno(getrusage(arg1
, &rusage
));
5539 if (!is_error(ret
)) {
5540 host_to_target_rusage(arg2
, &rusage
);
5544 case TARGET_NR_gettimeofday
:
5547 ret
= get_errno(gettimeofday(&tv
, NULL
));
5548 if (!is_error(ret
)) {
5549 if (copy_to_user_timeval(arg1
, &tv
))
5554 case TARGET_NR_settimeofday
:
5557 if (copy_from_user_timeval(&tv
, arg1
))
5559 ret
= get_errno(settimeofday(&tv
, NULL
));
5562 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5563 case TARGET_NR_select
:
5565 struct target_sel_arg_struct
*sel
;
5566 abi_ulong inp
, outp
, exp
, tvp
;
5569 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5571 nsel
= tswapl(sel
->n
);
5572 inp
= tswapl(sel
->inp
);
5573 outp
= tswapl(sel
->outp
);
5574 exp
= tswapl(sel
->exp
);
5575 tvp
= tswapl(sel
->tvp
);
5576 unlock_user_struct(sel
, arg1
, 0);
5577 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5581 #ifdef TARGET_NR_pselect6
5582 case TARGET_NR_pselect6
:
5583 goto unimplemented_nowarn
;
5585 case TARGET_NR_symlink
:
5588 p
= lock_user_string(arg1
);
5589 p2
= lock_user_string(arg2
);
5591 ret
= -TARGET_EFAULT
;
5593 ret
= get_errno(symlink(p
, p2
));
5594 unlock_user(p2
, arg2
, 0);
5595 unlock_user(p
, arg1
, 0);
5598 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5599 case TARGET_NR_symlinkat
:
5602 p
= lock_user_string(arg1
);
5603 p2
= lock_user_string(arg3
);
5605 ret
= -TARGET_EFAULT
;
5607 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5608 unlock_user(p2
, arg3
, 0);
5609 unlock_user(p
, arg1
, 0);
5613 #ifdef TARGET_NR_oldlstat
5614 case TARGET_NR_oldlstat
:
5617 case TARGET_NR_readlink
:
5620 p
= lock_user_string(arg1
);
5621 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5623 ret
= -TARGET_EFAULT
;
5625 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5626 char real
[PATH_MAX
];
5627 temp
= realpath(exec_path
,real
);
5628 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5629 snprintf((char *)p2
, arg3
, "%s", real
);
5632 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5634 unlock_user(p2
, arg2
, ret
);
5635 unlock_user(p
, arg1
, 0);
5638 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5639 case TARGET_NR_readlinkat
:
5642 p
= lock_user_string(arg2
);
5643 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5645 ret
= -TARGET_EFAULT
;
5647 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5648 unlock_user(p2
, arg3
, ret
);
5649 unlock_user(p
, arg2
, 0);
5653 #ifdef TARGET_NR_uselib
5654 case TARGET_NR_uselib
:
5657 #ifdef TARGET_NR_swapon
5658 case TARGET_NR_swapon
:
5659 if (!(p
= lock_user_string(arg1
)))
5661 ret
= get_errno(swapon(p
, arg2
));
5662 unlock_user(p
, arg1
, 0);
5665 case TARGET_NR_reboot
:
5667 #ifdef TARGET_NR_readdir
5668 case TARGET_NR_readdir
:
5671 #ifdef TARGET_NR_mmap
5672 case TARGET_NR_mmap
:
5673 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5674 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5675 || defined(TARGET_S390X)
5678 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5679 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5687 unlock_user(v
, arg1
, 0);
5688 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5689 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5693 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5694 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5700 #ifdef TARGET_NR_mmap2
5701 case TARGET_NR_mmap2
:
5703 #define MMAP_SHIFT 12
5705 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5706 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5708 arg6
<< MMAP_SHIFT
));
5711 case TARGET_NR_munmap
:
5712 ret
= get_errno(target_munmap(arg1
, arg2
));
5714 case TARGET_NR_mprotect
:
5716 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
5717 /* Special hack to detect libc making the stack executable. */
5718 if ((arg3
& PROT_GROWSDOWN
)
5719 && arg1
>= ts
->info
->stack_limit
5720 && arg1
<= ts
->info
->start_stack
) {
5721 arg3
&= ~PROT_GROWSDOWN
;
5722 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
5723 arg1
= ts
->info
->stack_limit
;
5726 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5728 #ifdef TARGET_NR_mremap
5729 case TARGET_NR_mremap
:
5730 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5733 /* ??? msync/mlock/munlock are broken for softmmu. */
5734 #ifdef TARGET_NR_msync
5735 case TARGET_NR_msync
:
5736 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5739 #ifdef TARGET_NR_mlock
5740 case TARGET_NR_mlock
:
5741 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5744 #ifdef TARGET_NR_munlock
5745 case TARGET_NR_munlock
:
5746 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5749 #ifdef TARGET_NR_mlockall
5750 case TARGET_NR_mlockall
:
5751 ret
= get_errno(mlockall(arg1
));
5754 #ifdef TARGET_NR_munlockall
5755 case TARGET_NR_munlockall
:
5756 ret
= get_errno(munlockall());
5759 case TARGET_NR_truncate
:
5760 if (!(p
= lock_user_string(arg1
)))
5762 ret
= get_errno(truncate(p
, arg2
));
5763 unlock_user(p
, arg1
, 0);
5765 case TARGET_NR_ftruncate
:
5766 ret
= get_errno(ftruncate(arg1
, arg2
));
5768 case TARGET_NR_fchmod
:
5769 ret
= get_errno(fchmod(arg1
, arg2
));
5771 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5772 case TARGET_NR_fchmodat
:
5773 if (!(p
= lock_user_string(arg2
)))
5775 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5776 unlock_user(p
, arg2
, 0);
5779 case TARGET_NR_getpriority
:
5780 /* libc does special remapping of the return value of
5781 * sys_getpriority() so it's just easiest to call
5782 * sys_getpriority() directly rather than through libc. */
5783 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5785 case TARGET_NR_setpriority
:
5786 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5788 #ifdef TARGET_NR_profil
5789 case TARGET_NR_profil
:
5792 case TARGET_NR_statfs
:
5793 if (!(p
= lock_user_string(arg1
)))
5795 ret
= get_errno(statfs(path(p
), &stfs
));
5796 unlock_user(p
, arg1
, 0);
5798 if (!is_error(ret
)) {
5799 struct target_statfs
*target_stfs
;
5801 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5803 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5804 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5805 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5806 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5807 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5808 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5809 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5810 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5811 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5812 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5813 unlock_user_struct(target_stfs
, arg2
, 1);
5816 case TARGET_NR_fstatfs
:
5817 ret
= get_errno(fstatfs(arg1
, &stfs
));
5818 goto convert_statfs
;
5819 #ifdef TARGET_NR_statfs64
5820 case TARGET_NR_statfs64
:
5821 if (!(p
= lock_user_string(arg1
)))
5823 ret
= get_errno(statfs(path(p
), &stfs
));
5824 unlock_user(p
, arg1
, 0);
5826 if (!is_error(ret
)) {
5827 struct target_statfs64
*target_stfs
;
5829 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5831 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5832 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5833 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5834 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5835 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5836 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5837 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5838 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5839 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5840 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5841 unlock_user_struct(target_stfs
, arg3
, 1);
5844 case TARGET_NR_fstatfs64
:
5845 ret
= get_errno(fstatfs(arg1
, &stfs
));
5846 goto convert_statfs64
;
5848 #ifdef TARGET_NR_ioperm
5849 case TARGET_NR_ioperm
:
5852 #ifdef TARGET_NR_socketcall
5853 case TARGET_NR_socketcall
:
5854 ret
= do_socketcall(arg1
, arg2
);
5857 #ifdef TARGET_NR_accept
5858 case TARGET_NR_accept
:
5859 ret
= do_accept(arg1
, arg2
, arg3
);
5862 #ifdef TARGET_NR_bind
5863 case TARGET_NR_bind
:
5864 ret
= do_bind(arg1
, arg2
, arg3
);
5867 #ifdef TARGET_NR_connect
5868 case TARGET_NR_connect
:
5869 ret
= do_connect(arg1
, arg2
, arg3
);
5872 #ifdef TARGET_NR_getpeername
5873 case TARGET_NR_getpeername
:
5874 ret
= do_getpeername(arg1
, arg2
, arg3
);
5877 #ifdef TARGET_NR_getsockname
5878 case TARGET_NR_getsockname
:
5879 ret
= do_getsockname(arg1
, arg2
, arg3
);
5882 #ifdef TARGET_NR_getsockopt
5883 case TARGET_NR_getsockopt
:
5884 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5887 #ifdef TARGET_NR_listen
5888 case TARGET_NR_listen
:
5889 ret
= get_errno(listen(arg1
, arg2
));
5892 #ifdef TARGET_NR_recv
5893 case TARGET_NR_recv
:
5894 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5897 #ifdef TARGET_NR_recvfrom
5898 case TARGET_NR_recvfrom
:
5899 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5902 #ifdef TARGET_NR_recvmsg
5903 case TARGET_NR_recvmsg
:
5904 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5907 #ifdef TARGET_NR_send
5908 case TARGET_NR_send
:
5909 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5912 #ifdef TARGET_NR_sendmsg
5913 case TARGET_NR_sendmsg
:
5914 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5917 #ifdef TARGET_NR_sendto
5918 case TARGET_NR_sendto
:
5919 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5922 #ifdef TARGET_NR_shutdown
5923 case TARGET_NR_shutdown
:
5924 ret
= get_errno(shutdown(arg1
, arg2
));
5927 #ifdef TARGET_NR_socket
5928 case TARGET_NR_socket
:
5929 ret
= do_socket(arg1
, arg2
, arg3
);
5932 #ifdef TARGET_NR_socketpair
5933 case TARGET_NR_socketpair
:
5934 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5937 #ifdef TARGET_NR_setsockopt
5938 case TARGET_NR_setsockopt
:
5939 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5943 case TARGET_NR_syslog
:
5944 if (!(p
= lock_user_string(arg2
)))
5946 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5947 unlock_user(p
, arg2
, 0);
5950 case TARGET_NR_setitimer
:
5952 struct itimerval value
, ovalue
, *pvalue
;
5956 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5957 || copy_from_user_timeval(&pvalue
->it_value
,
5958 arg2
+ sizeof(struct target_timeval
)))
5963 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5964 if (!is_error(ret
) && arg3
) {
5965 if (copy_to_user_timeval(arg3
,
5966 &ovalue
.it_interval
)
5967 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5973 case TARGET_NR_getitimer
:
5975 struct itimerval value
;
5977 ret
= get_errno(getitimer(arg1
, &value
));
5978 if (!is_error(ret
) && arg2
) {
5979 if (copy_to_user_timeval(arg2
,
5981 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5987 case TARGET_NR_stat
:
5988 if (!(p
= lock_user_string(arg1
)))
5990 ret
= get_errno(stat(path(p
), &st
));
5991 unlock_user(p
, arg1
, 0);
5993 case TARGET_NR_lstat
:
5994 if (!(p
= lock_user_string(arg1
)))
5996 ret
= get_errno(lstat(path(p
), &st
));
5997 unlock_user(p
, arg1
, 0);
5999 case TARGET_NR_fstat
:
6001 ret
= get_errno(fstat(arg1
, &st
));
6003 if (!is_error(ret
)) {
6004 struct target_stat
*target_st
;
6006 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6008 memset(target_st
, 0, sizeof(*target_st
));
6009 __put_user(st
.st_dev
, &target_st
->st_dev
);
6010 __put_user(st
.st_ino
, &target_st
->st_ino
);
6011 __put_user(st
.st_mode
, &target_st
->st_mode
);
6012 __put_user(st
.st_uid
, &target_st
->st_uid
);
6013 __put_user(st
.st_gid
, &target_st
->st_gid
);
6014 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6015 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6016 __put_user(st
.st_size
, &target_st
->st_size
);
6017 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6018 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6019 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6020 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6021 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6022 unlock_user_struct(target_st
, arg2
, 1);
6026 #ifdef TARGET_NR_olduname
6027 case TARGET_NR_olduname
:
6030 #ifdef TARGET_NR_iopl
6031 case TARGET_NR_iopl
:
6034 case TARGET_NR_vhangup
:
6035 ret
= get_errno(vhangup());
6037 #ifdef TARGET_NR_idle
6038 case TARGET_NR_idle
:
6041 #ifdef TARGET_NR_syscall
6042 case TARGET_NR_syscall
:
6043 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
6046 case TARGET_NR_wait4
:
6049 abi_long status_ptr
= arg2
;
6050 struct rusage rusage
, *rusage_ptr
;
6051 abi_ulong target_rusage
= arg4
;
6053 rusage_ptr
= &rusage
;
6056 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6057 if (!is_error(ret
)) {
6059 status
= host_to_target_waitstatus(status
);
6060 if (put_user_s32(status
, status_ptr
))
6064 host_to_target_rusage(target_rusage
, &rusage
);
6068 #ifdef TARGET_NR_swapoff
6069 case TARGET_NR_swapoff
:
6070 if (!(p
= lock_user_string(arg1
)))
6072 ret
= get_errno(swapoff(p
));
6073 unlock_user(p
, arg1
, 0);
6076 case TARGET_NR_sysinfo
:
6078 struct target_sysinfo
*target_value
;
6079 struct sysinfo value
;
6080 ret
= get_errno(sysinfo(&value
));
6081 if (!is_error(ret
) && arg1
)
6083 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6085 __put_user(value
.uptime
, &target_value
->uptime
);
6086 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6087 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6088 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6089 __put_user(value
.totalram
, &target_value
->totalram
);
6090 __put_user(value
.freeram
, &target_value
->freeram
);
6091 __put_user(value
.sharedram
, &target_value
->sharedram
);
6092 __put_user(value
.bufferram
, &target_value
->bufferram
);
6093 __put_user(value
.totalswap
, &target_value
->totalswap
);
6094 __put_user(value
.freeswap
, &target_value
->freeswap
);
6095 __put_user(value
.procs
, &target_value
->procs
);
6096 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6097 __put_user(value
.freehigh
, &target_value
->freehigh
);
6098 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6099 unlock_user_struct(target_value
, arg1
, 1);
6103 #ifdef TARGET_NR_ipc
6105 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6108 #ifdef TARGET_NR_semget
6109 case TARGET_NR_semget
:
6110 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6113 #ifdef TARGET_NR_semop
6114 case TARGET_NR_semop
:
6115 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6118 #ifdef TARGET_NR_semctl
6119 case TARGET_NR_semctl
:
6120 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6123 #ifdef TARGET_NR_msgctl
6124 case TARGET_NR_msgctl
:
6125 ret
= do_msgctl(arg1
, arg2
, arg3
);
6128 #ifdef TARGET_NR_msgget
6129 case TARGET_NR_msgget
:
6130 ret
= get_errno(msgget(arg1
, arg2
));
6133 #ifdef TARGET_NR_msgrcv
6134 case TARGET_NR_msgrcv
:
6135 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6138 #ifdef TARGET_NR_msgsnd
6139 case TARGET_NR_msgsnd
:
6140 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6143 #ifdef TARGET_NR_shmget
6144 case TARGET_NR_shmget
:
6145 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6148 #ifdef TARGET_NR_shmctl
6149 case TARGET_NR_shmctl
:
6150 ret
= do_shmctl(arg1
, arg2
, arg3
);
6153 #ifdef TARGET_NR_shmat
6154 case TARGET_NR_shmat
:
6155 ret
= do_shmat(arg1
, arg2
, arg3
);
6158 #ifdef TARGET_NR_shmdt
6159 case TARGET_NR_shmdt
:
6160 ret
= do_shmdt(arg1
);
6163 case TARGET_NR_fsync
:
6164 ret
= get_errno(fsync(arg1
));
6166 case TARGET_NR_clone
:
6167 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6168 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6169 #elif defined(TARGET_CRIS)
6170 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6171 #elif defined(TARGET_S390X)
6172 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6174 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6177 #ifdef __NR_exit_group
6178 /* new thread calls */
6179 case TARGET_NR_exit_group
:
6183 gdb_exit(cpu_env
, arg1
);
6184 ret
= get_errno(exit_group(arg1
));
6187 case TARGET_NR_setdomainname
:
6188 if (!(p
= lock_user_string(arg1
)))
6190 ret
= get_errno(setdomainname(p
, arg2
));
6191 unlock_user(p
, arg1
, 0);
6193 case TARGET_NR_uname
:
6194 /* no need to transcode because we use the linux syscall */
6196 struct new_utsname
* buf
;
6198 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6200 ret
= get_errno(sys_uname(buf
));
6201 if (!is_error(ret
)) {
6202 /* Overrite the native machine name with whatever is being
6204 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6205 /* Allow the user to override the reported release. */
6206 if (qemu_uname_release
&& *qemu_uname_release
)
6207 strcpy (buf
->release
, qemu_uname_release
);
6209 unlock_user_struct(buf
, arg1
, 1);
6213 case TARGET_NR_modify_ldt
:
6214 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6216 #if !defined(TARGET_X86_64)
6217 case TARGET_NR_vm86old
:
6219 case TARGET_NR_vm86
:
6220 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6224 case TARGET_NR_adjtimex
:
6226 #ifdef TARGET_NR_create_module
6227 case TARGET_NR_create_module
:
6229 case TARGET_NR_init_module
:
6230 case TARGET_NR_delete_module
:
6231 #ifdef TARGET_NR_get_kernel_syms
6232 case TARGET_NR_get_kernel_syms
:
6235 case TARGET_NR_quotactl
:
6237 case TARGET_NR_getpgid
:
6238 ret
= get_errno(getpgid(arg1
));
6240 case TARGET_NR_fchdir
:
6241 ret
= get_errno(fchdir(arg1
));
6243 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6244 case TARGET_NR_bdflush
:
6247 #ifdef TARGET_NR_sysfs
6248 case TARGET_NR_sysfs
:
6251 case TARGET_NR_personality
:
6252 ret
= get_errno(personality(arg1
));
6254 #ifdef TARGET_NR_afs_syscall
6255 case TARGET_NR_afs_syscall
:
6258 #ifdef TARGET_NR__llseek /* Not on alpha */
6259 case TARGET_NR__llseek
:
6262 #if !defined(__NR_llseek)
6263 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6265 ret
= get_errno(res
);
6270 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6272 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6278 case TARGET_NR_getdents
:
6279 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6281 struct target_dirent
*target_dirp
;
6282 struct linux_dirent
*dirp
;
6283 abi_long count
= arg3
;
6285 dirp
= malloc(count
);
6287 ret
= -TARGET_ENOMEM
;
6291 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6292 if (!is_error(ret
)) {
6293 struct linux_dirent
*de
;
6294 struct target_dirent
*tde
;
6296 int reclen
, treclen
;
6297 int count1
, tnamelen
;
6301 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6305 reclen
= de
->d_reclen
;
6306 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6307 tde
->d_reclen
= tswap16(treclen
);
6308 tde
->d_ino
= tswapl(de
->d_ino
);
6309 tde
->d_off
= tswapl(de
->d_off
);
6310 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6313 /* XXX: may not be correct */
6314 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6315 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6317 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6321 unlock_user(target_dirp
, arg2
, ret
);
6327 struct linux_dirent
*dirp
;
6328 abi_long count
= arg3
;
6330 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6332 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6333 if (!is_error(ret
)) {
6334 struct linux_dirent
*de
;
6339 reclen
= de
->d_reclen
;
6342 de
->d_reclen
= tswap16(reclen
);
6343 tswapls(&de
->d_ino
);
6344 tswapls(&de
->d_off
);
6345 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6349 unlock_user(dirp
, arg2
, ret
);
6353 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6354 case TARGET_NR_getdents64
:
6356 struct linux_dirent64
*dirp
;
6357 abi_long count
= arg3
;
6358 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6360 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6361 if (!is_error(ret
)) {
6362 struct linux_dirent64
*de
;
6367 reclen
= de
->d_reclen
;
6370 de
->d_reclen
= tswap16(reclen
);
6371 tswap64s((uint64_t *)&de
->d_ino
);
6372 tswap64s((uint64_t *)&de
->d_off
);
6373 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6377 unlock_user(dirp
, arg2
, ret
);
6380 #endif /* TARGET_NR_getdents64 */
6381 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6383 case TARGET_NR_select
:
6385 case TARGET_NR__newselect
:
6387 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6390 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6391 # ifdef TARGET_NR_poll
6392 case TARGET_NR_poll
:
6394 # ifdef TARGET_NR_ppoll
6395 case TARGET_NR_ppoll
:
6398 struct target_pollfd
*target_pfd
;
6399 unsigned int nfds
= arg2
;
6404 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6408 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6409 for(i
= 0; i
< nfds
; i
++) {
6410 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6411 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6414 # ifdef TARGET_NR_ppoll
6415 if (num
== TARGET_NR_ppoll
) {
6416 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
6417 target_sigset_t
*target_set
;
6418 sigset_t _set
, *set
= &_set
;
6421 if (target_to_host_timespec(timeout_ts
, arg3
)) {
6422 unlock_user(target_pfd
, arg1
, 0);
6430 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
6432 unlock_user(target_pfd
, arg1
, 0);
6435 target_to_host_sigset(set
, target_set
);
6440 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
6442 if (!is_error(ret
) && arg3
) {
6443 host_to_target_timespec(arg3
, timeout_ts
);
6446 unlock_user(target_set
, arg4
, 0);
6450 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6452 if (!is_error(ret
)) {
6453 for(i
= 0; i
< nfds
; i
++) {
6454 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6457 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
6461 case TARGET_NR_flock
:
6462 /* NOTE: the flock constant seems to be the same for every
6464 ret
= get_errno(flock(arg1
, arg2
));
6466 case TARGET_NR_readv
:
6471 vec
= alloca(count
* sizeof(struct iovec
));
6472 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6474 ret
= get_errno(readv(arg1
, vec
, count
));
6475 unlock_iovec(vec
, arg2
, count
, 1);
6478 case TARGET_NR_writev
:
6483 vec
= alloca(count
* sizeof(struct iovec
));
6484 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6486 ret
= get_errno(writev(arg1
, vec
, count
));
6487 unlock_iovec(vec
, arg2
, count
, 0);
6490 case TARGET_NR_getsid
:
6491 ret
= get_errno(getsid(arg1
));
6493 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6494 case TARGET_NR_fdatasync
:
6495 ret
= get_errno(fdatasync(arg1
));
6498 case TARGET_NR__sysctl
:
6499 /* We don't implement this, but ENOTDIR is always a safe
6501 ret
= -TARGET_ENOTDIR
;
6503 case TARGET_NR_sched_getaffinity
:
6505 unsigned int mask_size
;
6506 unsigned long *mask
;
6509 * sched_getaffinity needs multiples of ulong, so need to take
6510 * care of mismatches between target ulong and host ulong sizes.
6512 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6513 ret
= -TARGET_EINVAL
;
6516 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6518 mask
= alloca(mask_size
);
6519 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
6521 if (!is_error(ret
)) {
6522 if (copy_to_user(arg3
, mask
, ret
)) {
6528 case TARGET_NR_sched_setaffinity
:
6530 unsigned int mask_size
;
6531 unsigned long *mask
;
6534 * sched_setaffinity needs multiples of ulong, so need to take
6535 * care of mismatches between target ulong and host ulong sizes.
6537 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6538 ret
= -TARGET_EINVAL
;
6541 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6543 mask
= alloca(mask_size
);
6544 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
6547 memcpy(mask
, p
, arg2
);
6548 unlock_user_struct(p
, arg2
, 0);
6550 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
6553 case TARGET_NR_sched_setparam
:
6555 struct sched_param
*target_schp
;
6556 struct sched_param schp
;
6558 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6560 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6561 unlock_user_struct(target_schp
, arg2
, 0);
6562 ret
= get_errno(sched_setparam(arg1
, &schp
));
6565 case TARGET_NR_sched_getparam
:
6567 struct sched_param
*target_schp
;
6568 struct sched_param schp
;
6569 ret
= get_errno(sched_getparam(arg1
, &schp
));
6570 if (!is_error(ret
)) {
6571 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6573 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6574 unlock_user_struct(target_schp
, arg2
, 1);
6578 case TARGET_NR_sched_setscheduler
:
6580 struct sched_param
*target_schp
;
6581 struct sched_param schp
;
6582 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6584 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6585 unlock_user_struct(target_schp
, arg3
, 0);
6586 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6589 case TARGET_NR_sched_getscheduler
:
6590 ret
= get_errno(sched_getscheduler(arg1
));
6592 case TARGET_NR_sched_yield
:
6593 ret
= get_errno(sched_yield());
6595 case TARGET_NR_sched_get_priority_max
:
6596 ret
= get_errno(sched_get_priority_max(arg1
));
6598 case TARGET_NR_sched_get_priority_min
:
6599 ret
= get_errno(sched_get_priority_min(arg1
));
6601 case TARGET_NR_sched_rr_get_interval
:
6604 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6605 if (!is_error(ret
)) {
6606 host_to_target_timespec(arg2
, &ts
);
6610 case TARGET_NR_nanosleep
:
6612 struct timespec req
, rem
;
6613 target_to_host_timespec(&req
, arg1
);
6614 ret
= get_errno(nanosleep(&req
, &rem
));
6615 if (is_error(ret
) && arg2
) {
6616 host_to_target_timespec(arg2
, &rem
);
6620 #ifdef TARGET_NR_query_module
6621 case TARGET_NR_query_module
:
6624 #ifdef TARGET_NR_nfsservctl
6625 case TARGET_NR_nfsservctl
:
6628 case TARGET_NR_prctl
:
6631 case PR_GET_PDEATHSIG
:
6634 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6635 if (!is_error(ret
) && arg2
6636 && put_user_ual(deathsig
, arg2
))
6641 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6645 #ifdef TARGET_NR_arch_prctl
6646 case TARGET_NR_arch_prctl
:
6647 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6648 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6654 #ifdef TARGET_NR_pread
6655 case TARGET_NR_pread
:
6657 if (((CPUARMState
*)cpu_env
)->eabi
)
6660 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6662 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6663 unlock_user(p
, arg2
, ret
);
6665 case TARGET_NR_pwrite
:
6667 if (((CPUARMState
*)cpu_env
)->eabi
)
6670 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6672 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6673 unlock_user(p
, arg2
, 0);
6676 #ifdef TARGET_NR_pread64
6677 case TARGET_NR_pread64
:
6678 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6680 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6681 unlock_user(p
, arg2
, ret
);
6683 case TARGET_NR_pwrite64
:
6684 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6686 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6687 unlock_user(p
, arg2
, 0);
6690 case TARGET_NR_getcwd
:
6691 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6693 ret
= get_errno(sys_getcwd1(p
, arg2
));
6694 unlock_user(p
, arg1
, ret
);
6696 case TARGET_NR_capget
:
6698 case TARGET_NR_capset
:
6700 case TARGET_NR_sigaltstack
:
6701 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6702 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6703 defined(TARGET_M68K) || defined(TARGET_S390X)
6704 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6709 case TARGET_NR_sendfile
:
6711 #ifdef TARGET_NR_getpmsg
6712 case TARGET_NR_getpmsg
:
6715 #ifdef TARGET_NR_putpmsg
6716 case TARGET_NR_putpmsg
:
6719 #ifdef TARGET_NR_vfork
6720 case TARGET_NR_vfork
:
6721 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6725 #ifdef TARGET_NR_ugetrlimit
6726 case TARGET_NR_ugetrlimit
:
6729 ret
= get_errno(getrlimit(arg1
, &rlim
));
6730 if (!is_error(ret
)) {
6731 struct target_rlimit
*target_rlim
;
6732 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6734 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6735 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6736 unlock_user_struct(target_rlim
, arg2
, 1);
6741 #ifdef TARGET_NR_truncate64
6742 case TARGET_NR_truncate64
:
6743 if (!(p
= lock_user_string(arg1
)))
6745 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6746 unlock_user(p
, arg1
, 0);
6749 #ifdef TARGET_NR_ftruncate64
6750 case TARGET_NR_ftruncate64
:
6751 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6754 #ifdef TARGET_NR_stat64
6755 case TARGET_NR_stat64
:
6756 if (!(p
= lock_user_string(arg1
)))
6758 ret
= get_errno(stat(path(p
), &st
));
6759 unlock_user(p
, arg1
, 0);
6761 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6764 #ifdef TARGET_NR_lstat64
6765 case TARGET_NR_lstat64
:
6766 if (!(p
= lock_user_string(arg1
)))
6768 ret
= get_errno(lstat(path(p
), &st
));
6769 unlock_user(p
, arg1
, 0);
6771 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6774 #ifdef TARGET_NR_fstat64
6775 case TARGET_NR_fstat64
:
6776 ret
= get_errno(fstat(arg1
, &st
));
6778 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6781 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6782 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6783 #ifdef TARGET_NR_fstatat64
6784 case TARGET_NR_fstatat64
:
6786 #ifdef TARGET_NR_newfstatat
6787 case TARGET_NR_newfstatat
:
6789 if (!(p
= lock_user_string(arg2
)))
6791 #ifdef __NR_fstatat64
6792 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6794 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6797 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6800 case TARGET_NR_lchown
:
6801 if (!(p
= lock_user_string(arg1
)))
6803 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6804 unlock_user(p
, arg1
, 0);
6806 #ifdef TARGET_NR_getuid
6807 case TARGET_NR_getuid
:
6808 ret
= get_errno(high2lowuid(getuid()));
6811 #ifdef TARGET_NR_getgid
6812 case TARGET_NR_getgid
:
6813 ret
= get_errno(high2lowgid(getgid()));
6816 #ifdef TARGET_NR_geteuid
6817 case TARGET_NR_geteuid
:
6818 ret
= get_errno(high2lowuid(geteuid()));
6821 #ifdef TARGET_NR_getegid
6822 case TARGET_NR_getegid
:
6823 ret
= get_errno(high2lowgid(getegid()));
6826 case TARGET_NR_setreuid
:
6827 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6829 case TARGET_NR_setregid
:
6830 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6832 case TARGET_NR_getgroups
:
6834 int gidsetsize
= arg1
;
6835 target_id
*target_grouplist
;
6839 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6840 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6841 if (gidsetsize
== 0)
6843 if (!is_error(ret
)) {
6844 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6845 if (!target_grouplist
)
6847 for(i
= 0;i
< ret
; i
++)
6848 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
6849 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6853 case TARGET_NR_setgroups
:
6855 int gidsetsize
= arg1
;
6856 target_id
*target_grouplist
;
6860 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6861 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6862 if (!target_grouplist
) {
6863 ret
= -TARGET_EFAULT
;
6866 for(i
= 0;i
< gidsetsize
; i
++)
6867 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
6868 unlock_user(target_grouplist
, arg2
, 0);
6869 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6872 case TARGET_NR_fchown
:
6873 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6875 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6876 case TARGET_NR_fchownat
:
6877 if (!(p
= lock_user_string(arg2
)))
6879 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6880 unlock_user(p
, arg2
, 0);
6883 #ifdef TARGET_NR_setresuid
6884 case TARGET_NR_setresuid
:
6885 ret
= get_errno(setresuid(low2highuid(arg1
),
6887 low2highuid(arg3
)));
6890 #ifdef TARGET_NR_getresuid
6891 case TARGET_NR_getresuid
:
6893 uid_t ruid
, euid
, suid
;
6894 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6895 if (!is_error(ret
)) {
6896 if (put_user_u16(high2lowuid(ruid
), arg1
)
6897 || put_user_u16(high2lowuid(euid
), arg2
)
6898 || put_user_u16(high2lowuid(suid
), arg3
))
6904 #ifdef TARGET_NR_getresgid
6905 case TARGET_NR_setresgid
:
6906 ret
= get_errno(setresgid(low2highgid(arg1
),
6908 low2highgid(arg3
)));
6911 #ifdef TARGET_NR_getresgid
6912 case TARGET_NR_getresgid
:
6914 gid_t rgid
, egid
, sgid
;
6915 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6916 if (!is_error(ret
)) {
6917 if (put_user_u16(high2lowgid(rgid
), arg1
)
6918 || put_user_u16(high2lowgid(egid
), arg2
)
6919 || put_user_u16(high2lowgid(sgid
), arg3
))
6925 case TARGET_NR_chown
:
6926 if (!(p
= lock_user_string(arg1
)))
6928 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6929 unlock_user(p
, arg1
, 0);
6931 case TARGET_NR_setuid
:
6932 ret
= get_errno(setuid(low2highuid(arg1
)));
6934 case TARGET_NR_setgid
:
6935 ret
= get_errno(setgid(low2highgid(arg1
)));
6937 case TARGET_NR_setfsuid
:
6938 ret
= get_errno(setfsuid(arg1
));
6940 case TARGET_NR_setfsgid
:
6941 ret
= get_errno(setfsgid(arg1
));
6944 #ifdef TARGET_NR_lchown32
6945 case TARGET_NR_lchown32
:
6946 if (!(p
= lock_user_string(arg1
)))
6948 ret
= get_errno(lchown(p
, arg2
, arg3
));
6949 unlock_user(p
, arg1
, 0);
6952 #ifdef TARGET_NR_getuid32
6953 case TARGET_NR_getuid32
:
6954 ret
= get_errno(getuid());
6958 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6959 /* Alpha specific */
6960 case TARGET_NR_getxuid
:
6964 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6966 ret
= get_errno(getuid());
6969 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6970 /* Alpha specific */
6971 case TARGET_NR_getxgid
:
6975 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6977 ret
= get_errno(getgid());
6980 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6981 /* Alpha specific */
6982 case TARGET_NR_osf_getsysinfo
:
6983 ret
= -TARGET_EOPNOTSUPP
;
6985 case TARGET_GSI_IEEE_FP_CONTROL
:
6987 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6989 /* Copied from linux ieee_fpcr_to_swcr. */
6990 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
6991 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
6992 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
6993 | SWCR_TRAP_ENABLE_DZE
6994 | SWCR_TRAP_ENABLE_OVF
);
6995 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
6996 | SWCR_TRAP_ENABLE_INE
);
6997 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
6998 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7000 if (put_user_u64 (swcr
, arg2
))
7006 /* case GSI_IEEE_STATE_AT_SIGNAL:
7007 -- Not implemented in linux kernel.
7009 -- Retrieves current unaligned access state; not much used.
7011 -- Retrieves implver information; surely not used.
7013 -- Grabs a copy of the HWRPB; surely not used.
7018 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7019 /* Alpha specific */
7020 case TARGET_NR_osf_setsysinfo
:
7021 ret
= -TARGET_EOPNOTSUPP
;
7023 case TARGET_SSI_IEEE_FP_CONTROL
:
7024 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7026 uint64_t swcr
, fpcr
, orig_fpcr
;
7028 if (get_user_u64 (swcr
, arg2
))
7030 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7031 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7033 /* Copied from linux ieee_swcr_to_fpcr. */
7034 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7035 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7036 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7037 | SWCR_TRAP_ENABLE_DZE
7038 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7039 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7040 | SWCR_TRAP_ENABLE_INE
)) << 57;
7041 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7042 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7044 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
7047 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
7048 /* Old exceptions are not signaled. */
7049 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7051 /* If any exceptions set by this call, and are unmasked,
7058 /* case SSI_NVPAIRS:
7059 -- Used with SSIN_UACPROC to enable unaligned accesses.
7060 case SSI_IEEE_STATE_AT_SIGNAL:
7061 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7062 -- Not implemented in linux kernel
7067 #ifdef TARGET_NR_osf_sigprocmask
7068 /* Alpha specific. */
7069 case TARGET_NR_osf_sigprocmask
:
7073 sigset_t set
, oldset
;
7076 case TARGET_SIG_BLOCK
:
7079 case TARGET_SIG_UNBLOCK
:
7082 case TARGET_SIG_SETMASK
:
7086 ret
= -TARGET_EINVAL
;
7090 target_to_host_old_sigset(&set
, &mask
);
7091 sigprocmask(arg1
, &set
, &oldset
);
7092 host_to_target_old_sigset(&mask
, &oldset
);
7098 #ifdef TARGET_NR_getgid32
7099 case TARGET_NR_getgid32
:
7100 ret
= get_errno(getgid());
7103 #ifdef TARGET_NR_geteuid32
7104 case TARGET_NR_geteuid32
:
7105 ret
= get_errno(geteuid());
7108 #ifdef TARGET_NR_getegid32
7109 case TARGET_NR_getegid32
:
7110 ret
= get_errno(getegid());
7113 #ifdef TARGET_NR_setreuid32
7114 case TARGET_NR_setreuid32
:
7115 ret
= get_errno(setreuid(arg1
, arg2
));
7118 #ifdef TARGET_NR_setregid32
7119 case TARGET_NR_setregid32
:
7120 ret
= get_errno(setregid(arg1
, arg2
));
7123 #ifdef TARGET_NR_getgroups32
7124 case TARGET_NR_getgroups32
:
7126 int gidsetsize
= arg1
;
7127 uint32_t *target_grouplist
;
7131 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7132 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7133 if (gidsetsize
== 0)
7135 if (!is_error(ret
)) {
7136 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7137 if (!target_grouplist
) {
7138 ret
= -TARGET_EFAULT
;
7141 for(i
= 0;i
< ret
; i
++)
7142 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7143 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7148 #ifdef TARGET_NR_setgroups32
7149 case TARGET_NR_setgroups32
:
7151 int gidsetsize
= arg1
;
7152 uint32_t *target_grouplist
;
7156 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7157 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7158 if (!target_grouplist
) {
7159 ret
= -TARGET_EFAULT
;
7162 for(i
= 0;i
< gidsetsize
; i
++)
7163 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7164 unlock_user(target_grouplist
, arg2
, 0);
7165 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7169 #ifdef TARGET_NR_fchown32
7170 case TARGET_NR_fchown32
:
7171 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7174 #ifdef TARGET_NR_setresuid32
7175 case TARGET_NR_setresuid32
:
7176 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7179 #ifdef TARGET_NR_getresuid32
7180 case TARGET_NR_getresuid32
:
7182 uid_t ruid
, euid
, suid
;
7183 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7184 if (!is_error(ret
)) {
7185 if (put_user_u32(ruid
, arg1
)
7186 || put_user_u32(euid
, arg2
)
7187 || put_user_u32(suid
, arg3
))
7193 #ifdef TARGET_NR_setresgid32
7194 case TARGET_NR_setresgid32
:
7195 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7198 #ifdef TARGET_NR_getresgid32
7199 case TARGET_NR_getresgid32
:
7201 gid_t rgid
, egid
, sgid
;
7202 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7203 if (!is_error(ret
)) {
7204 if (put_user_u32(rgid
, arg1
)
7205 || put_user_u32(egid
, arg2
)
7206 || put_user_u32(sgid
, arg3
))
7212 #ifdef TARGET_NR_chown32
7213 case TARGET_NR_chown32
:
7214 if (!(p
= lock_user_string(arg1
)))
7216 ret
= get_errno(chown(p
, arg2
, arg3
));
7217 unlock_user(p
, arg1
, 0);
7220 #ifdef TARGET_NR_setuid32
7221 case TARGET_NR_setuid32
:
7222 ret
= get_errno(setuid(arg1
));
7225 #ifdef TARGET_NR_setgid32
7226 case TARGET_NR_setgid32
:
7227 ret
= get_errno(setgid(arg1
));
7230 #ifdef TARGET_NR_setfsuid32
7231 case TARGET_NR_setfsuid32
:
7232 ret
= get_errno(setfsuid(arg1
));
7235 #ifdef TARGET_NR_setfsgid32
7236 case TARGET_NR_setfsgid32
:
7237 ret
= get_errno(setfsgid(arg1
));
7241 case TARGET_NR_pivot_root
:
7243 #ifdef TARGET_NR_mincore
7244 case TARGET_NR_mincore
:
7247 ret
= -TARGET_EFAULT
;
7248 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7250 if (!(p
= lock_user_string(arg3
)))
7252 ret
= get_errno(mincore(a
, arg2
, p
));
7253 unlock_user(p
, arg3
, ret
);
7255 unlock_user(a
, arg1
, 0);
7259 #ifdef TARGET_NR_arm_fadvise64_64
7260 case TARGET_NR_arm_fadvise64_64
:
7263 * arm_fadvise64_64 looks like fadvise64_64 but
7264 * with different argument order
7272 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7273 #ifdef TARGET_NR_fadvise64_64
7274 case TARGET_NR_fadvise64_64
:
7276 #ifdef TARGET_NR_fadvise64
7277 case TARGET_NR_fadvise64
:
7281 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7282 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7283 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7284 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7288 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7291 #ifdef TARGET_NR_madvise
7292 case TARGET_NR_madvise
:
7293 /* A straight passthrough may not be safe because qemu sometimes
7294 turns private flie-backed mappings into anonymous mappings.
7295 This will break MADV_DONTNEED.
7296 This is a hint, so ignoring and returning success is ok. */
7300 #if TARGET_ABI_BITS == 32
7301 case TARGET_NR_fcntl64
:
7305 struct target_flock64
*target_fl
;
7307 struct target_eabi_flock64
*target_efl
;
7310 cmd
= target_to_host_fcntl_cmd(arg2
);
7311 if (cmd
== -TARGET_EINVAL
)
7315 case TARGET_F_GETLK64
:
7317 if (((CPUARMState
*)cpu_env
)->eabi
) {
7318 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7320 fl
.l_type
= tswap16(target_efl
->l_type
);
7321 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7322 fl
.l_start
= tswap64(target_efl
->l_start
);
7323 fl
.l_len
= tswap64(target_efl
->l_len
);
7324 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7325 unlock_user_struct(target_efl
, arg3
, 0);
7329 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7331 fl
.l_type
= tswap16(target_fl
->l_type
);
7332 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7333 fl
.l_start
= tswap64(target_fl
->l_start
);
7334 fl
.l_len
= tswap64(target_fl
->l_len
);
7335 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7336 unlock_user_struct(target_fl
, arg3
, 0);
7338 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7341 if (((CPUARMState
*)cpu_env
)->eabi
) {
7342 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7344 target_efl
->l_type
= tswap16(fl
.l_type
);
7345 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7346 target_efl
->l_start
= tswap64(fl
.l_start
);
7347 target_efl
->l_len
= tswap64(fl
.l_len
);
7348 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7349 unlock_user_struct(target_efl
, arg3
, 1);
7353 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7355 target_fl
->l_type
= tswap16(fl
.l_type
);
7356 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7357 target_fl
->l_start
= tswap64(fl
.l_start
);
7358 target_fl
->l_len
= tswap64(fl
.l_len
);
7359 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7360 unlock_user_struct(target_fl
, arg3
, 1);
7365 case TARGET_F_SETLK64
:
7366 case TARGET_F_SETLKW64
:
7368 if (((CPUARMState
*)cpu_env
)->eabi
) {
7369 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7371 fl
.l_type
= tswap16(target_efl
->l_type
);
7372 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7373 fl
.l_start
= tswap64(target_efl
->l_start
);
7374 fl
.l_len
= tswap64(target_efl
->l_len
);
7375 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7376 unlock_user_struct(target_efl
, arg3
, 0);
7380 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7382 fl
.l_type
= tswap16(target_fl
->l_type
);
7383 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7384 fl
.l_start
= tswap64(target_fl
->l_start
);
7385 fl
.l_len
= tswap64(target_fl
->l_len
);
7386 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7387 unlock_user_struct(target_fl
, arg3
, 0);
7389 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7392 ret
= do_fcntl(arg1
, arg2
, arg3
);
7398 #ifdef TARGET_NR_cacheflush
7399 case TARGET_NR_cacheflush
:
7400 /* self-modifying code is handled automatically, so nothing needed */
7404 #ifdef TARGET_NR_security
7405 case TARGET_NR_security
:
7408 #ifdef TARGET_NR_getpagesize
7409 case TARGET_NR_getpagesize
:
7410 ret
= TARGET_PAGE_SIZE
;
7413 case TARGET_NR_gettid
:
7414 ret
= get_errno(gettid());
7416 #ifdef TARGET_NR_readahead
7417 case TARGET_NR_readahead
:
7418 #if TARGET_ABI_BITS == 32
7420 if (((CPUARMState
*)cpu_env
)->eabi
)
7427 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7429 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7433 #ifdef TARGET_NR_setxattr
7434 case TARGET_NR_setxattr
:
7435 case TARGET_NR_lsetxattr
:
7436 case TARGET_NR_fsetxattr
:
7437 case TARGET_NR_getxattr
:
7438 case TARGET_NR_lgetxattr
:
7439 case TARGET_NR_fgetxattr
:
7440 case TARGET_NR_listxattr
:
7441 case TARGET_NR_llistxattr
:
7442 case TARGET_NR_flistxattr
:
7443 case TARGET_NR_removexattr
:
7444 case TARGET_NR_lremovexattr
:
7445 case TARGET_NR_fremovexattr
:
7446 ret
= -TARGET_EOPNOTSUPP
;
7449 #ifdef TARGET_NR_set_thread_area
7450 case TARGET_NR_set_thread_area
:
7451 #if defined(TARGET_MIPS)
7452 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7455 #elif defined(TARGET_CRIS)
7457 ret
= -TARGET_EINVAL
;
7459 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7463 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7464 ret
= do_set_thread_area(cpu_env
, arg1
);
7467 goto unimplemented_nowarn
;
7470 #ifdef TARGET_NR_get_thread_area
7471 case TARGET_NR_get_thread_area
:
7472 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7473 ret
= do_get_thread_area(cpu_env
, arg1
);
7475 goto unimplemented_nowarn
;
7478 #ifdef TARGET_NR_getdomainname
7479 case TARGET_NR_getdomainname
:
7480 goto unimplemented_nowarn
;
7483 #ifdef TARGET_NR_clock_gettime
7484 case TARGET_NR_clock_gettime
:
7487 ret
= get_errno(clock_gettime(arg1
, &ts
));
7488 if (!is_error(ret
)) {
7489 host_to_target_timespec(arg2
, &ts
);
7494 #ifdef TARGET_NR_clock_getres
7495 case TARGET_NR_clock_getres
:
7498 ret
= get_errno(clock_getres(arg1
, &ts
));
7499 if (!is_error(ret
)) {
7500 host_to_target_timespec(arg2
, &ts
);
7505 #ifdef TARGET_NR_clock_nanosleep
7506 case TARGET_NR_clock_nanosleep
:
7509 target_to_host_timespec(&ts
, arg3
);
7510 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7512 host_to_target_timespec(arg4
, &ts
);
7517 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7518 case TARGET_NR_set_tid_address
:
7519 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7523 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7524 case TARGET_NR_tkill
:
7525 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7529 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7530 case TARGET_NR_tgkill
:
7531 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7532 target_to_host_signal(arg3
)));
7536 #ifdef TARGET_NR_set_robust_list
7537 case TARGET_NR_set_robust_list
:
7538 goto unimplemented_nowarn
;
7541 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7542 case TARGET_NR_utimensat
:
7544 struct timespec
*tsp
, ts
[2];
7548 target_to_host_timespec(ts
, arg3
);
7549 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7553 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7555 if (!(p
= lock_user_string(arg2
))) {
7556 ret
= -TARGET_EFAULT
;
7559 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7560 unlock_user(p
, arg2
, 0);
7565 #if defined(CONFIG_USE_NPTL)
7566 case TARGET_NR_futex
:
7567 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7570 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7571 case TARGET_NR_inotify_init
:
7572 ret
= get_errno(sys_inotify_init());
7575 #ifdef CONFIG_INOTIFY1
7576 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7577 case TARGET_NR_inotify_init1
:
7578 ret
= get_errno(sys_inotify_init1(arg1
));
7582 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7583 case TARGET_NR_inotify_add_watch
:
7584 p
= lock_user_string(arg2
);
7585 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7586 unlock_user(p
, arg2
, 0);
7589 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7590 case TARGET_NR_inotify_rm_watch
:
7591 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7595 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7596 case TARGET_NR_mq_open
:
7598 struct mq_attr posix_mq_attr
;
7600 p
= lock_user_string(arg1
- 1);
7602 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7603 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7604 unlock_user (p
, arg1
, 0);
7608 case TARGET_NR_mq_unlink
:
7609 p
= lock_user_string(arg1
- 1);
7610 ret
= get_errno(mq_unlink(p
));
7611 unlock_user (p
, arg1
, 0);
7614 case TARGET_NR_mq_timedsend
:
7618 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7620 target_to_host_timespec(&ts
, arg5
);
7621 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7622 host_to_target_timespec(arg5
, &ts
);
7625 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7626 unlock_user (p
, arg2
, arg3
);
7630 case TARGET_NR_mq_timedreceive
:
7635 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7637 target_to_host_timespec(&ts
, arg5
);
7638 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7639 host_to_target_timespec(arg5
, &ts
);
7642 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7643 unlock_user (p
, arg2
, arg3
);
7645 put_user_u32(prio
, arg4
);
7649 /* Not implemented for now... */
7650 /* case TARGET_NR_mq_notify: */
7653 case TARGET_NR_mq_getsetattr
:
7655 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7658 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7659 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7662 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7663 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7670 #ifdef CONFIG_SPLICE
7671 #ifdef TARGET_NR_tee
7674 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7678 #ifdef TARGET_NR_splice
7679 case TARGET_NR_splice
:
7681 loff_t loff_in
, loff_out
;
7682 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7684 get_user_u64(loff_in
, arg2
);
7685 ploff_in
= &loff_in
;
7688 get_user_u64(loff_out
, arg2
);
7689 ploff_out
= &loff_out
;
7691 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7695 #ifdef TARGET_NR_vmsplice
7696 case TARGET_NR_vmsplice
:
7701 vec
= alloca(count
* sizeof(struct iovec
));
7702 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7704 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7705 unlock_iovec(vec
, arg2
, count
, 0);
7709 #endif /* CONFIG_SPLICE */
7710 #ifdef CONFIG_EVENTFD
7711 #if defined(TARGET_NR_eventfd)
7712 case TARGET_NR_eventfd
:
7713 ret
= get_errno(eventfd(arg1
, 0));
7716 #if defined(TARGET_NR_eventfd2)
7717 case TARGET_NR_eventfd2
:
7718 ret
= get_errno(eventfd(arg1
, arg2
));
7721 #endif /* CONFIG_EVENTFD */
7722 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7723 case TARGET_NR_fallocate
:
7724 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7727 #if defined(CONFIG_SYNC_FILE_RANGE)
7728 #if defined(TARGET_NR_sync_file_range)
7729 case TARGET_NR_sync_file_range
:
7730 #if TARGET_ABI_BITS == 32
7731 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
7732 target_offset64(arg4
, arg5
), arg6
));
7734 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
7738 #if defined(TARGET_NR_sync_file_range2)
7739 case TARGET_NR_sync_file_range2
:
7740 /* This is like sync_file_range but the arguments are reordered */
7741 #if TARGET_ABI_BITS == 32
7742 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7743 target_offset64(arg5
, arg6
), arg2
));
7745 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
7750 #if defined(CONFIG_EPOLL)
7751 #if defined(TARGET_NR_epoll_create)
7752 case TARGET_NR_epoll_create
:
7753 ret
= get_errno(epoll_create(arg1
));
7756 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7757 case TARGET_NR_epoll_create1
:
7758 ret
= get_errno(epoll_create1(arg1
));
7761 #if defined(TARGET_NR_epoll_ctl)
7762 case TARGET_NR_epoll_ctl
:
7764 struct epoll_event ep
;
7765 struct epoll_event
*epp
= 0;
7767 struct target_epoll_event
*target_ep
;
7768 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
7771 ep
.events
= tswap32(target_ep
->events
);
7772 /* The epoll_data_t union is just opaque data to the kernel,
7773 * so we transfer all 64 bits across and need not worry what
7774 * actual data type it is.
7776 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
7777 unlock_user_struct(target_ep
, arg4
, 0);
7780 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
7785 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7786 #define IMPLEMENT_EPOLL_PWAIT
7788 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7789 #if defined(TARGET_NR_epoll_wait)
7790 case TARGET_NR_epoll_wait
:
7792 #if defined(IMPLEMENT_EPOLL_PWAIT)
7793 case TARGET_NR_epoll_pwait
:
7796 struct target_epoll_event
*target_ep
;
7797 struct epoll_event
*ep
;
7799 int maxevents
= arg3
;
7802 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
7803 maxevents
* sizeof(struct target_epoll_event
), 1);
7808 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
7811 #if defined(IMPLEMENT_EPOLL_PWAIT)
7812 case TARGET_NR_epoll_pwait
:
7814 target_sigset_t
*target_set
;
7815 sigset_t _set
, *set
= &_set
;
7818 target_set
= lock_user(VERIFY_READ
, arg5
,
7819 sizeof(target_sigset_t
), 1);
7821 unlock_user(target_ep
, arg2
, 0);
7824 target_to_host_sigset(set
, target_set
);
7825 unlock_user(target_set
, arg5
, 0);
7830 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
7834 #if defined(TARGET_NR_epoll_wait)
7835 case TARGET_NR_epoll_wait
:
7836 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
7840 ret
= -TARGET_ENOSYS
;
7842 if (!is_error(ret
)) {
7844 for (i
= 0; i
< ret
; i
++) {
7845 target_ep
[i
].events
= tswap32(ep
[i
].events
);
7846 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
7849 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
7856 gemu_log("qemu: Unsupported syscall: %d\n", num
);
7857 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7858 unimplemented_nowarn
:
7860 ret
= -TARGET_ENOSYS
;
7865 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
7868 print_syscall_ret(num
, ret
);
7871 ret
= -TARGET_EFAULT
;