4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include "qemu-common.h"
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu-xattr.h"
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/utsname.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include "linux_loop.h"
101 #include "cpu-uname.h"
105 #if defined(CONFIG_USE_NPTL)
106 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
107 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
109 /* XXX: Hardcode the above values. */
110 #define CLONE_NPTL_FLAGS2 0
115 //#include <linux/msdos_fs.h>
116 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
117 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #define _syscall0(type,name) \
129 static type name (void) \
131 return syscall(__NR_##name); \
134 #define _syscall1(type,name,type1,arg1) \
135 static type name (type1 arg1) \
137 return syscall(__NR_##name, arg1); \
140 #define _syscall2(type,name,type1,arg1,type2,arg2) \
141 static type name (type1 arg1,type2 arg2) \
143 return syscall(__NR_##name, arg1, arg2); \
146 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
147 static type name (type1 arg1,type2 arg2,type3 arg3) \
149 return syscall(__NR_##name, arg1, arg2, arg3); \
152 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
153 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
155 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
158 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
160 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
162 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
166 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5,type6,arg6) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
175 #define __NR_sys_uname __NR_uname
176 #define __NR_sys_faccessat __NR_faccessat
177 #define __NR_sys_fchmodat __NR_fchmodat
178 #define __NR_sys_fchownat __NR_fchownat
179 #define __NR_sys_fstatat64 __NR_fstatat64
180 #define __NR_sys_futimesat __NR_futimesat
181 #define __NR_sys_getcwd1 __NR_getcwd
182 #define __NR_sys_getdents __NR_getdents
183 #define __NR_sys_getdents64 __NR_getdents64
184 #define __NR_sys_getpriority __NR_getpriority
185 #define __NR_sys_linkat __NR_linkat
186 #define __NR_sys_mkdirat __NR_mkdirat
187 #define __NR_sys_mknodat __NR_mknodat
188 #define __NR_sys_newfstatat __NR_newfstatat
189 #define __NR_sys_openat __NR_openat
190 #define __NR_sys_readlinkat __NR_readlinkat
191 #define __NR_sys_renameat __NR_renameat
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_symlinkat __NR_symlinkat
194 #define __NR_sys_syslog __NR_syslog
195 #define __NR_sys_tgkill __NR_tgkill
196 #define __NR_sys_tkill __NR_tkill
197 #define __NR_sys_unlinkat __NR_unlinkat
198 #define __NR_sys_utimensat __NR_utimensat
199 #define __NR_sys_futex __NR_futex
200 #define __NR_sys_inotify_init __NR_inotify_init
201 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
202 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
204 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
206 #define __NR__llseek __NR_lseek
210 _syscall0(int, gettid
)
212 /* This is a replacement for the host gettid() and must return a host
214 static int gettid(void) {
218 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
219 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
220 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
224 loff_t
*, res
, uint
, wh
);
226 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
227 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group
,int,error_code
)
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address
,int *,tidptr
)
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
243 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
251 unsigned long *, user_mask_ptr
);
252 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
255 static bitmask_transtbl fcntl_flags_tbl
[] = {
256 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
257 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
258 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
259 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
260 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
261 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
262 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
263 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
264 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
265 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
266 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
267 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
268 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
272 #if defined(O_NOATIME)
273 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
275 #if defined(O_CLOEXEC)
276 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
279 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
281 /* Don't terminate the list prematurely on 64-bit host+guest. */
282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
283 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
288 #define COPY_UTSNAME_FIELD(dest, src) \
290 /* __NEW_UTS_LEN doesn't include terminating null */ \
291 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
292 (dest)[__NEW_UTS_LEN] = '\0'; \
295 static int sys_uname(struct new_utsname
*buf
)
297 struct utsname uts_buf
;
299 if (uname(&uts_buf
) < 0)
303 * Just in case these have some differences, we
304 * translate utsname to new_utsname (which is the
305 * struct linux kernel uses).
308 memset(buf
, 0, sizeof(*buf
));
309 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
310 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
311 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
312 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
313 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
315 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
319 #undef COPY_UTSNAME_FIELD
322 static int sys_getcwd1(char *buf
, size_t size
)
324 if (getcwd(buf
, size
) == NULL
) {
325 /* getcwd() sets errno */
328 return strlen(buf
)+1;
333 * Host system seems to have atfile syscall stubs available. We
334 * now enable them one by one as specified by target syscall_nr.h.
337 #ifdef TARGET_NR_faccessat
338 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
340 return (faccessat(dirfd
, pathname
, mode
, 0));
343 #ifdef TARGET_NR_fchmodat
344 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
346 return (fchmodat(dirfd
, pathname
, mode
, 0));
349 #if defined(TARGET_NR_fchownat)
350 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
351 gid_t group
, int flags
)
353 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
356 #ifdef __NR_fstatat64
357 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
360 return (fstatat(dirfd
, pathname
, buf
, flags
));
363 #ifdef __NR_newfstatat
364 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
367 return (fstatat(dirfd
, pathname
, buf
, flags
));
370 #ifdef TARGET_NR_futimesat
371 static int sys_futimesat(int dirfd
, const char *pathname
,
372 const struct timeval times
[2])
374 return (futimesat(dirfd
, pathname
, times
));
377 #ifdef TARGET_NR_linkat
378 static int sys_linkat(int olddirfd
, const char *oldpath
,
379 int newdirfd
, const char *newpath
, int flags
)
381 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
384 #ifdef TARGET_NR_mkdirat
385 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
387 return (mkdirat(dirfd
, pathname
, mode
));
390 #ifdef TARGET_NR_mknodat
391 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
394 return (mknodat(dirfd
, pathname
, mode
, dev
));
397 #ifdef TARGET_NR_openat
398 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
401 * open(2) has extra parameter 'mode' when called with
404 if ((flags
& O_CREAT
) != 0) {
405 return (openat(dirfd
, pathname
, flags
, mode
));
407 return (openat(dirfd
, pathname
, flags
));
410 #ifdef TARGET_NR_readlinkat
411 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
413 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
416 #ifdef TARGET_NR_renameat
417 static int sys_renameat(int olddirfd
, const char *oldpath
,
418 int newdirfd
, const char *newpath
)
420 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
423 #ifdef TARGET_NR_symlinkat
424 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
426 return (symlinkat(oldpath
, newdirfd
, newpath
));
429 #ifdef TARGET_NR_unlinkat
430 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
432 return (unlinkat(dirfd
, pathname
, flags
));
435 #else /* !CONFIG_ATFILE */
438 * Try direct syscalls instead
440 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
441 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
443 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
444 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
446 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
447 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
448 uid_t
,owner
,gid_t
,group
,int,flags
)
450 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
451 defined(__NR_fstatat64)
452 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
453 struct stat
*,buf
,int,flags
)
455 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
456 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
457 const struct timeval
*,times
)
459 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
460 defined(__NR_newfstatat)
461 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
462 struct stat
*,buf
,int,flags
)
464 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
465 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
466 int,newdirfd
,const char *,newpath
,int,flags
)
468 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
469 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
471 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
472 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
473 mode_t
,mode
,dev_t
,dev
)
475 #if defined(TARGET_NR_openat) && defined(__NR_openat)
476 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
478 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
479 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
480 char *,buf
,size_t,bufsize
)
482 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
483 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
484 int,newdirfd
,const char *,newpath
)
486 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
487 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
488 int,newdirfd
,const char *,newpath
)
490 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
491 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
494 #endif /* CONFIG_ATFILE */
496 #ifdef CONFIG_UTIMENSAT
497 static int sys_utimensat(int dirfd
, const char *pathname
,
498 const struct timespec times
[2], int flags
)
500 if (pathname
== NULL
)
501 return futimens(dirfd
, times
);
503 return utimensat(dirfd
, pathname
, times
, flags
);
506 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
507 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
508 const struct timespec
*,tsp
,int,flags
)
510 #endif /* CONFIG_UTIMENSAT */
512 #ifdef CONFIG_INOTIFY
513 #include <sys/inotify.h>
515 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
516 static int sys_inotify_init(void)
518 return (inotify_init());
521 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
522 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
524 return (inotify_add_watch(fd
, pathname
, mask
));
527 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
528 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
530 return (inotify_rm_watch(fd
, wd
));
533 #ifdef CONFIG_INOTIFY1
534 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
535 static int sys_inotify_init1(int flags
)
537 return (inotify_init1(flags
));
542 /* Userspace can usually survive runtime without inotify */
543 #undef TARGET_NR_inotify_init
544 #undef TARGET_NR_inotify_init1
545 #undef TARGET_NR_inotify_add_watch
546 #undef TARGET_NR_inotify_rm_watch
547 #endif /* CONFIG_INOTIFY */
549 #if defined(TARGET_NR_ppoll)
551 # define __NR_ppoll -1
553 #define __NR_sys_ppoll __NR_ppoll
554 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
555 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
559 #if defined(TARGET_NR_pselect6)
560 #ifndef __NR_pselect6
561 # define __NR_pselect6 -1
563 #define __NR_sys_pselect6 __NR_pselect6
564 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
565 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
568 #if defined(TARGET_NR_prlimit64)
569 #ifndef __NR_prlimit64
570 # define __NR_prlimit64 -1
572 #define __NR_sys_prlimit64 __NR_prlimit64
573 /* The glibc rlimit structure may not be that used by the underlying syscall */
574 struct host_rlimit64
{
578 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
579 const struct host_rlimit64
*, new_limit
,
580 struct host_rlimit64
*, old_limit
)
583 extern int personality(int);
584 extern int flock(int, int);
585 extern int setfsuid(int);
586 extern int setfsgid(int);
587 extern int setgroups(int, gid_t
*);
589 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
591 static inline int regpairs_aligned(void *cpu_env
) {
592 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
594 #elif defined(TARGET_MIPS)
595 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
596 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
597 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
598 * of registers which translates to the same as ARM/MIPS, because we start with
600 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
602 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
605 #define ERRNO_TABLE_SIZE 1200
607 /* target_to_host_errno_table[] is initialized from
608 * host_to_target_errno_table[] in syscall_init(). */
609 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
613 * This list is the union of errno values overridden in asm-<arch>/errno.h
614 * minus the errnos that are not actually generic to all archs.
616 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
617 [EIDRM
] = TARGET_EIDRM
,
618 [ECHRNG
] = TARGET_ECHRNG
,
619 [EL2NSYNC
] = TARGET_EL2NSYNC
,
620 [EL3HLT
] = TARGET_EL3HLT
,
621 [EL3RST
] = TARGET_EL3RST
,
622 [ELNRNG
] = TARGET_ELNRNG
,
623 [EUNATCH
] = TARGET_EUNATCH
,
624 [ENOCSI
] = TARGET_ENOCSI
,
625 [EL2HLT
] = TARGET_EL2HLT
,
626 [EDEADLK
] = TARGET_EDEADLK
,
627 [ENOLCK
] = TARGET_ENOLCK
,
628 [EBADE
] = TARGET_EBADE
,
629 [EBADR
] = TARGET_EBADR
,
630 [EXFULL
] = TARGET_EXFULL
,
631 [ENOANO
] = TARGET_ENOANO
,
632 [EBADRQC
] = TARGET_EBADRQC
,
633 [EBADSLT
] = TARGET_EBADSLT
,
634 [EBFONT
] = TARGET_EBFONT
,
635 [ENOSTR
] = TARGET_ENOSTR
,
636 [ENODATA
] = TARGET_ENODATA
,
637 [ETIME
] = TARGET_ETIME
,
638 [ENOSR
] = TARGET_ENOSR
,
639 [ENONET
] = TARGET_ENONET
,
640 [ENOPKG
] = TARGET_ENOPKG
,
641 [EREMOTE
] = TARGET_EREMOTE
,
642 [ENOLINK
] = TARGET_ENOLINK
,
643 [EADV
] = TARGET_EADV
,
644 [ESRMNT
] = TARGET_ESRMNT
,
645 [ECOMM
] = TARGET_ECOMM
,
646 [EPROTO
] = TARGET_EPROTO
,
647 [EDOTDOT
] = TARGET_EDOTDOT
,
648 [EMULTIHOP
] = TARGET_EMULTIHOP
,
649 [EBADMSG
] = TARGET_EBADMSG
,
650 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
651 [EOVERFLOW
] = TARGET_EOVERFLOW
,
652 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
653 [EBADFD
] = TARGET_EBADFD
,
654 [EREMCHG
] = TARGET_EREMCHG
,
655 [ELIBACC
] = TARGET_ELIBACC
,
656 [ELIBBAD
] = TARGET_ELIBBAD
,
657 [ELIBSCN
] = TARGET_ELIBSCN
,
658 [ELIBMAX
] = TARGET_ELIBMAX
,
659 [ELIBEXEC
] = TARGET_ELIBEXEC
,
660 [EILSEQ
] = TARGET_EILSEQ
,
661 [ENOSYS
] = TARGET_ENOSYS
,
662 [ELOOP
] = TARGET_ELOOP
,
663 [ERESTART
] = TARGET_ERESTART
,
664 [ESTRPIPE
] = TARGET_ESTRPIPE
,
665 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
666 [EUSERS
] = TARGET_EUSERS
,
667 [ENOTSOCK
] = TARGET_ENOTSOCK
,
668 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
669 [EMSGSIZE
] = TARGET_EMSGSIZE
,
670 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
671 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
672 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
673 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
674 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
675 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
676 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
677 [EADDRINUSE
] = TARGET_EADDRINUSE
,
678 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
679 [ENETDOWN
] = TARGET_ENETDOWN
,
680 [ENETUNREACH
] = TARGET_ENETUNREACH
,
681 [ENETRESET
] = TARGET_ENETRESET
,
682 [ECONNABORTED
] = TARGET_ECONNABORTED
,
683 [ECONNRESET
] = TARGET_ECONNRESET
,
684 [ENOBUFS
] = TARGET_ENOBUFS
,
685 [EISCONN
] = TARGET_EISCONN
,
686 [ENOTCONN
] = TARGET_ENOTCONN
,
687 [EUCLEAN
] = TARGET_EUCLEAN
,
688 [ENOTNAM
] = TARGET_ENOTNAM
,
689 [ENAVAIL
] = TARGET_ENAVAIL
,
690 [EISNAM
] = TARGET_EISNAM
,
691 [EREMOTEIO
] = TARGET_EREMOTEIO
,
692 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
693 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
694 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
695 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
696 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
697 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
698 [EALREADY
] = TARGET_EALREADY
,
699 [EINPROGRESS
] = TARGET_EINPROGRESS
,
700 [ESTALE
] = TARGET_ESTALE
,
701 [ECANCELED
] = TARGET_ECANCELED
,
702 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
703 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
705 [ENOKEY
] = TARGET_ENOKEY
,
708 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
711 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
714 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
717 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
719 #ifdef ENOTRECOVERABLE
720 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
724 static inline int host_to_target_errno(int err
)
726 if(host_to_target_errno_table
[err
])
727 return host_to_target_errno_table
[err
];
731 static inline int target_to_host_errno(int err
)
733 if (target_to_host_errno_table
[err
])
734 return target_to_host_errno_table
[err
];
738 static inline abi_long
get_errno(abi_long ret
)
741 return -host_to_target_errno(errno
);
746 static inline int is_error(abi_long ret
)
748 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
751 char *target_strerror(int err
)
753 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
756 return strerror(target_to_host_errno(err
));
759 static abi_ulong target_brk
;
760 static abi_ulong target_original_brk
;
761 static abi_ulong brk_page
;
763 void target_set_brk(abi_ulong new_brk
)
765 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
766 brk_page
= HOST_PAGE_ALIGN(target_brk
);
769 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
770 #define DEBUGF_BRK(message, args...)
772 /* do_brk() must return target values and target errnos. */
773 abi_long
do_brk(abi_ulong new_brk
)
775 abi_long mapped_addr
;
778 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
781 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
784 if (new_brk
< target_original_brk
) {
785 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
790 /* If the new brk is less than the highest page reserved to the
791 * target heap allocation, set it and we're almost done... */
792 if (new_brk
<= brk_page
) {
793 /* Heap contents are initialized to zero, as for anonymous
795 if (new_brk
> target_brk
) {
796 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
798 target_brk
= new_brk
;
799 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
803 /* We need to allocate more memory after the brk... Note that
804 * we don't use MAP_FIXED because that will map over the top of
805 * any existing mapping (like the one with the host libc or qemu
806 * itself); instead we treat "mapped but at wrong address" as
807 * a failure and unmap again.
809 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
810 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
811 PROT_READ
|PROT_WRITE
,
812 MAP_ANON
|MAP_PRIVATE
, 0, 0));
814 if (mapped_addr
== brk_page
) {
815 /* Heap contents are initialized to zero, as for anonymous
816 * mapped pages. Technically the new pages are already
817 * initialized to zero since they *are* anonymous mapped
818 * pages, however we have to take care with the contents that
819 * come from the remaining part of the previous page: it may
820 * contains garbage data due to a previous heap usage (grown
822 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
824 target_brk
= new_brk
;
825 brk_page
= HOST_PAGE_ALIGN(target_brk
);
826 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
829 } else if (mapped_addr
!= -1) {
830 /* Mapped but at wrong address, meaning there wasn't actually
831 * enough space for this brk.
833 target_munmap(mapped_addr
, new_alloc_size
);
835 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
838 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
841 #if defined(TARGET_ALPHA)
842 /* We (partially) emulate OSF/1 on Alpha, which requires we
843 return a proper errno, not an unchanged brk value. */
844 return -TARGET_ENOMEM
;
846 /* For everything else, return the previous break. */
850 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
851 abi_ulong target_fds_addr
,
855 abi_ulong b
, *target_fds
;
857 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
858 if (!(target_fds
= lock_user(VERIFY_READ
,
860 sizeof(abi_ulong
) * nw
,
862 return -TARGET_EFAULT
;
866 for (i
= 0; i
< nw
; i
++) {
867 /* grab the abi_ulong */
868 __get_user(b
, &target_fds
[i
]);
869 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
870 /* check the bit inside the abi_ulong */
877 unlock_user(target_fds
, target_fds_addr
, 0);
882 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
883 abi_ulong target_fds_addr
,
886 if (target_fds_addr
) {
887 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
888 return -TARGET_EFAULT
;
896 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
902 abi_ulong
*target_fds
;
904 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
905 if (!(target_fds
= lock_user(VERIFY_WRITE
,
907 sizeof(abi_ulong
) * nw
,
909 return -TARGET_EFAULT
;
912 for (i
= 0; i
< nw
; i
++) {
914 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
915 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
918 __put_user(v
, &target_fds
[i
]);
921 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
926 #if defined(__alpha__)
932 static inline abi_long
host_to_target_clock_t(long ticks
)
934 #if HOST_HZ == TARGET_HZ
937 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
941 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
942 const struct rusage
*rusage
)
944 struct target_rusage
*target_rusage
;
946 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
947 return -TARGET_EFAULT
;
948 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
949 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
950 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
951 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
952 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
953 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
954 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
955 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
956 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
957 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
958 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
959 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
960 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
961 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
962 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
963 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
964 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
965 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
966 unlock_user_struct(target_rusage
, target_addr
, 1);
971 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
973 abi_ulong target_rlim_swap
;
976 target_rlim_swap
= tswapal(target_rlim
);
977 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
978 return RLIM_INFINITY
;
980 result
= target_rlim_swap
;
981 if (target_rlim_swap
!= (rlim_t
)result
)
982 return RLIM_INFINITY
;
987 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
989 abi_ulong target_rlim_swap
;
992 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
993 target_rlim_swap
= TARGET_RLIM_INFINITY
;
995 target_rlim_swap
= rlim
;
996 result
= tswapal(target_rlim_swap
);
1001 static inline int target_to_host_resource(int code
)
1004 case TARGET_RLIMIT_AS
:
1006 case TARGET_RLIMIT_CORE
:
1008 case TARGET_RLIMIT_CPU
:
1010 case TARGET_RLIMIT_DATA
:
1012 case TARGET_RLIMIT_FSIZE
:
1013 return RLIMIT_FSIZE
;
1014 case TARGET_RLIMIT_LOCKS
:
1015 return RLIMIT_LOCKS
;
1016 case TARGET_RLIMIT_MEMLOCK
:
1017 return RLIMIT_MEMLOCK
;
1018 case TARGET_RLIMIT_MSGQUEUE
:
1019 return RLIMIT_MSGQUEUE
;
1020 case TARGET_RLIMIT_NICE
:
1022 case TARGET_RLIMIT_NOFILE
:
1023 return RLIMIT_NOFILE
;
1024 case TARGET_RLIMIT_NPROC
:
1025 return RLIMIT_NPROC
;
1026 case TARGET_RLIMIT_RSS
:
1028 case TARGET_RLIMIT_RTPRIO
:
1029 return RLIMIT_RTPRIO
;
1030 case TARGET_RLIMIT_SIGPENDING
:
1031 return RLIMIT_SIGPENDING
;
1032 case TARGET_RLIMIT_STACK
:
1033 return RLIMIT_STACK
;
1039 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1040 abi_ulong target_tv_addr
)
1042 struct target_timeval
*target_tv
;
1044 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1045 return -TARGET_EFAULT
;
1047 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1048 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1050 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1055 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1056 const struct timeval
*tv
)
1058 struct target_timeval
*target_tv
;
1060 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1061 return -TARGET_EFAULT
;
1063 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1064 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1066 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1071 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1074 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1075 abi_ulong target_mq_attr_addr
)
1077 struct target_mq_attr
*target_mq_attr
;
1079 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1080 target_mq_attr_addr
, 1))
1081 return -TARGET_EFAULT
;
1083 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1084 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1085 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1086 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1088 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1093 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1094 const struct mq_attr
*attr
)
1096 struct target_mq_attr
*target_mq_attr
;
1098 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1099 target_mq_attr_addr
, 0))
1100 return -TARGET_EFAULT
;
1102 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1103 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1104 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1105 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1107 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1113 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1114 /* do_select() must return target values and target errnos. */
1115 static abi_long
do_select(int n
,
1116 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1117 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1119 fd_set rfds
, wfds
, efds
;
1120 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1121 struct timeval tv
, *tv_ptr
;
1124 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1128 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1132 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1137 if (target_tv_addr
) {
1138 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1139 return -TARGET_EFAULT
;
1145 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1147 if (!is_error(ret
)) {
1148 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1149 return -TARGET_EFAULT
;
1150 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1151 return -TARGET_EFAULT
;
1152 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1153 return -TARGET_EFAULT
;
1155 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1156 return -TARGET_EFAULT
;
1163 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1166 return pipe2(host_pipe
, flags
);
1172 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1173 int flags
, int is_pipe2
)
1177 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1180 return get_errno(ret
);
1182 /* Several targets have special calling conventions for the original
1183 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1185 #if defined(TARGET_ALPHA)
1186 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1187 return host_pipe
[0];
1188 #elif defined(TARGET_MIPS)
1189 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1190 return host_pipe
[0];
1191 #elif defined(TARGET_SH4)
1192 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1193 return host_pipe
[0];
1197 if (put_user_s32(host_pipe
[0], pipedes
)
1198 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1199 return -TARGET_EFAULT
;
1200 return get_errno(ret
);
1203 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1204 abi_ulong target_addr
,
1207 struct target_ip_mreqn
*target_smreqn
;
1209 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1211 return -TARGET_EFAULT
;
1212 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1213 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1214 if (len
== sizeof(struct target_ip_mreqn
))
1215 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1216 unlock_user(target_smreqn
, target_addr
, 0);
1221 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1222 abi_ulong target_addr
,
1225 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1226 sa_family_t sa_family
;
1227 struct target_sockaddr
*target_saddr
;
1229 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1231 return -TARGET_EFAULT
;
1233 sa_family
= tswap16(target_saddr
->sa_family
);
1235 /* Oops. The caller might send a incomplete sun_path; sun_path
1236 * must be terminated by \0 (see the manual page), but
1237 * unfortunately it is quite common to specify sockaddr_un
1238 * length as "strlen(x->sun_path)" while it should be
1239 * "strlen(...) + 1". We'll fix that here if needed.
1240 * Linux kernel has a similar feature.
1243 if (sa_family
== AF_UNIX
) {
1244 if (len
< unix_maxlen
&& len
> 0) {
1245 char *cp
= (char*)target_saddr
;
1247 if ( cp
[len
-1] && !cp
[len
] )
1250 if (len
> unix_maxlen
)
1254 memcpy(addr
, target_saddr
, len
);
1255 addr
->sa_family
= sa_family
;
1256 unlock_user(target_saddr
, target_addr
, 0);
1261 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1262 struct sockaddr
*addr
,
1265 struct target_sockaddr
*target_saddr
;
1267 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1269 return -TARGET_EFAULT
;
1270 memcpy(target_saddr
, addr
, len
);
1271 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1272 unlock_user(target_saddr
, target_addr
, len
);
1277 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1278 struct target_msghdr
*target_msgh
)
1280 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1281 abi_long msg_controllen
;
1282 abi_ulong target_cmsg_addr
;
1283 struct target_cmsghdr
*target_cmsg
;
1284 socklen_t space
= 0;
1286 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1287 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1289 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1290 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1292 return -TARGET_EFAULT
;
1294 while (cmsg
&& target_cmsg
) {
1295 void *data
= CMSG_DATA(cmsg
);
1296 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1298 int len
= tswapal(target_cmsg
->cmsg_len
)
1299 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1301 space
+= CMSG_SPACE(len
);
1302 if (space
> msgh
->msg_controllen
) {
1303 space
-= CMSG_SPACE(len
);
1304 gemu_log("Host cmsg overflow\n");
1308 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1309 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1310 cmsg
->cmsg_len
= CMSG_LEN(len
);
1312 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1313 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1314 memcpy(data
, target_data
, len
);
1316 int *fd
= (int *)data
;
1317 int *target_fd
= (int *)target_data
;
1318 int i
, numfds
= len
/ sizeof(int);
1320 for (i
= 0; i
< numfds
; i
++)
1321 fd
[i
] = tswap32(target_fd
[i
]);
1324 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1325 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1327 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1329 msgh
->msg_controllen
= space
;
1333 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1334 struct msghdr
*msgh
)
1336 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1337 abi_long msg_controllen
;
1338 abi_ulong target_cmsg_addr
;
1339 struct target_cmsghdr
*target_cmsg
;
1340 socklen_t space
= 0;
1342 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1343 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1345 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1346 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1348 return -TARGET_EFAULT
;
1350 while (cmsg
&& target_cmsg
) {
1351 void *data
= CMSG_DATA(cmsg
);
1352 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1354 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1356 space
+= TARGET_CMSG_SPACE(len
);
1357 if (space
> msg_controllen
) {
1358 space
-= TARGET_CMSG_SPACE(len
);
1359 gemu_log("Target cmsg overflow\n");
1363 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1364 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1365 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1367 if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1368 (cmsg
->cmsg_type
== SCM_RIGHTS
)) {
1369 int *fd
= (int *)data
;
1370 int *target_fd
= (int *)target_data
;
1371 int i
, numfds
= len
/ sizeof(int);
1373 for (i
= 0; i
< numfds
; i
++)
1374 target_fd
[i
] = tswap32(fd
[i
]);
1375 } else if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1376 (cmsg
->cmsg_type
== SO_TIMESTAMP
) &&
1377 (len
== sizeof(struct timeval
))) {
1378 /* copy struct timeval to target */
1379 struct timeval
*tv
= (struct timeval
*)data
;
1380 struct target_timeval
*target_tv
=
1381 (struct target_timeval
*)target_data
;
1383 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1384 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1386 gemu_log("Unsupported ancillary data: %d/%d\n",
1387 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1388 memcpy(target_data
, data
, len
);
1391 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1392 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1394 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1396 target_msgh
->msg_controllen
= tswapal(space
);
1400 /* do_setsockopt() Must return target values and target errnos. */
1401 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1402 abi_ulong optval_addr
, socklen_t optlen
)
1406 struct ip_mreqn
*ip_mreq
;
1407 struct ip_mreq_source
*ip_mreq_source
;
1411 /* TCP options all take an 'int' value. */
1412 if (optlen
< sizeof(uint32_t))
1413 return -TARGET_EINVAL
;
1415 if (get_user_u32(val
, optval_addr
))
1416 return -TARGET_EFAULT
;
1417 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1424 case IP_ROUTER_ALERT
:
1428 case IP_MTU_DISCOVER
:
1434 case IP_MULTICAST_TTL
:
1435 case IP_MULTICAST_LOOP
:
1437 if (optlen
>= sizeof(uint32_t)) {
1438 if (get_user_u32(val
, optval_addr
))
1439 return -TARGET_EFAULT
;
1440 } else if (optlen
>= 1) {
1441 if (get_user_u8(val
, optval_addr
))
1442 return -TARGET_EFAULT
;
1444 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1446 case IP_ADD_MEMBERSHIP
:
1447 case IP_DROP_MEMBERSHIP
:
1448 if (optlen
< sizeof (struct target_ip_mreq
) ||
1449 optlen
> sizeof (struct target_ip_mreqn
))
1450 return -TARGET_EINVAL
;
1452 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1453 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1454 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1457 case IP_BLOCK_SOURCE
:
1458 case IP_UNBLOCK_SOURCE
:
1459 case IP_ADD_SOURCE_MEMBERSHIP
:
1460 case IP_DROP_SOURCE_MEMBERSHIP
:
1461 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1462 return -TARGET_EINVAL
;
1464 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1465 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1466 unlock_user (ip_mreq_source
, optval_addr
, 0);
1476 /* struct icmp_filter takes an u32 value */
1477 if (optlen
< sizeof(uint32_t)) {
1478 return -TARGET_EINVAL
;
1481 if (get_user_u32(val
, optval_addr
)) {
1482 return -TARGET_EFAULT
;
1484 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1485 &val
, sizeof(val
)));
1492 case TARGET_SOL_SOCKET
:
1494 /* Options with 'int' argument. */
1495 case TARGET_SO_DEBUG
:
1498 case TARGET_SO_REUSEADDR
:
1499 optname
= SO_REUSEADDR
;
1501 case TARGET_SO_TYPE
:
1504 case TARGET_SO_ERROR
:
1507 case TARGET_SO_DONTROUTE
:
1508 optname
= SO_DONTROUTE
;
1510 case TARGET_SO_BROADCAST
:
1511 optname
= SO_BROADCAST
;
1513 case TARGET_SO_SNDBUF
:
1514 optname
= SO_SNDBUF
;
1516 case TARGET_SO_RCVBUF
:
1517 optname
= SO_RCVBUF
;
1519 case TARGET_SO_KEEPALIVE
:
1520 optname
= SO_KEEPALIVE
;
1522 case TARGET_SO_OOBINLINE
:
1523 optname
= SO_OOBINLINE
;
1525 case TARGET_SO_NO_CHECK
:
1526 optname
= SO_NO_CHECK
;
1528 case TARGET_SO_PRIORITY
:
1529 optname
= SO_PRIORITY
;
1532 case TARGET_SO_BSDCOMPAT
:
1533 optname
= SO_BSDCOMPAT
;
1536 case TARGET_SO_PASSCRED
:
1537 optname
= SO_PASSCRED
;
1539 case TARGET_SO_TIMESTAMP
:
1540 optname
= SO_TIMESTAMP
;
1542 case TARGET_SO_RCVLOWAT
:
1543 optname
= SO_RCVLOWAT
;
1545 case TARGET_SO_RCVTIMEO
:
1546 optname
= SO_RCVTIMEO
;
1548 case TARGET_SO_SNDTIMEO
:
1549 optname
= SO_SNDTIMEO
;
1555 if (optlen
< sizeof(uint32_t))
1556 return -TARGET_EINVAL
;
1558 if (get_user_u32(val
, optval_addr
))
1559 return -TARGET_EFAULT
;
1560 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1564 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1565 ret
= -TARGET_ENOPROTOOPT
;
1570 /* do_getsockopt() Must return target values and target errnos. */
1571 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1572 abi_ulong optval_addr
, abi_ulong optlen
)
1579 case TARGET_SOL_SOCKET
:
1582 /* These don't just return a single integer */
1583 case TARGET_SO_LINGER
:
1584 case TARGET_SO_RCVTIMEO
:
1585 case TARGET_SO_SNDTIMEO
:
1586 case TARGET_SO_PEERNAME
:
1588 case TARGET_SO_PEERCRED
: {
1591 struct target_ucred
*tcr
;
1593 if (get_user_u32(len
, optlen
)) {
1594 return -TARGET_EFAULT
;
1597 return -TARGET_EINVAL
;
1601 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1609 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1610 return -TARGET_EFAULT
;
1612 __put_user(cr
.pid
, &tcr
->pid
);
1613 __put_user(cr
.uid
, &tcr
->uid
);
1614 __put_user(cr
.gid
, &tcr
->gid
);
1615 unlock_user_struct(tcr
, optval_addr
, 1);
1616 if (put_user_u32(len
, optlen
)) {
1617 return -TARGET_EFAULT
;
1621 /* Options with 'int' argument. */
1622 case TARGET_SO_DEBUG
:
1625 case TARGET_SO_REUSEADDR
:
1626 optname
= SO_REUSEADDR
;
1628 case TARGET_SO_TYPE
:
1631 case TARGET_SO_ERROR
:
1634 case TARGET_SO_DONTROUTE
:
1635 optname
= SO_DONTROUTE
;
1637 case TARGET_SO_BROADCAST
:
1638 optname
= SO_BROADCAST
;
1640 case TARGET_SO_SNDBUF
:
1641 optname
= SO_SNDBUF
;
1643 case TARGET_SO_RCVBUF
:
1644 optname
= SO_RCVBUF
;
1646 case TARGET_SO_KEEPALIVE
:
1647 optname
= SO_KEEPALIVE
;
1649 case TARGET_SO_OOBINLINE
:
1650 optname
= SO_OOBINLINE
;
1652 case TARGET_SO_NO_CHECK
:
1653 optname
= SO_NO_CHECK
;
1655 case TARGET_SO_PRIORITY
:
1656 optname
= SO_PRIORITY
;
1659 case TARGET_SO_BSDCOMPAT
:
1660 optname
= SO_BSDCOMPAT
;
1663 case TARGET_SO_PASSCRED
:
1664 optname
= SO_PASSCRED
;
1666 case TARGET_SO_TIMESTAMP
:
1667 optname
= SO_TIMESTAMP
;
1669 case TARGET_SO_RCVLOWAT
:
1670 optname
= SO_RCVLOWAT
;
1677 /* TCP options all take an 'int' value. */
1679 if (get_user_u32(len
, optlen
))
1680 return -TARGET_EFAULT
;
1682 return -TARGET_EINVAL
;
1684 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1690 if (put_user_u32(val
, optval_addr
))
1691 return -TARGET_EFAULT
;
1693 if (put_user_u8(val
, optval_addr
))
1694 return -TARGET_EFAULT
;
1696 if (put_user_u32(len
, optlen
))
1697 return -TARGET_EFAULT
;
1704 case IP_ROUTER_ALERT
:
1708 case IP_MTU_DISCOVER
:
1714 case IP_MULTICAST_TTL
:
1715 case IP_MULTICAST_LOOP
:
1716 if (get_user_u32(len
, optlen
))
1717 return -TARGET_EFAULT
;
1719 return -TARGET_EINVAL
;
1721 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1724 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1726 if (put_user_u32(len
, optlen
)
1727 || put_user_u8(val
, optval_addr
))
1728 return -TARGET_EFAULT
;
1730 if (len
> sizeof(int))
1732 if (put_user_u32(len
, optlen
)
1733 || put_user_u32(val
, optval_addr
))
1734 return -TARGET_EFAULT
;
1738 ret
= -TARGET_ENOPROTOOPT
;
1744 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1746 ret
= -TARGET_EOPNOTSUPP
;
1752 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1753 int count
, int copy
)
1755 struct target_iovec
*target_vec
;
1757 abi_ulong total_len
, max_len
;
1764 if (count
> IOV_MAX
) {
1769 vec
= calloc(count
, sizeof(struct iovec
));
1775 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1776 count
* sizeof(struct target_iovec
), 1);
1777 if (target_vec
== NULL
) {
1782 /* ??? If host page size > target page size, this will result in a
1783 value larger than what we can actually support. */
1784 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1787 for (i
= 0; i
< count
; i
++) {
1788 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1789 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1794 } else if (len
== 0) {
1795 /* Zero length pointer is ignored. */
1796 vec
[i
].iov_base
= 0;
1798 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1799 if (!vec
[i
].iov_base
) {
1803 if (len
> max_len
- total_len
) {
1804 len
= max_len
- total_len
;
1807 vec
[i
].iov_len
= len
;
1811 unlock_user(target_vec
, target_addr
, 0);
1817 unlock_user(target_vec
, target_addr
, 0);
1821 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1822 int count
, int copy
)
1824 struct target_iovec
*target_vec
;
1827 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1828 count
* sizeof(struct target_iovec
), 1);
1830 for (i
= 0; i
< count
; i
++) {
1831 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1832 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1836 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1838 unlock_user(target_vec
, target_addr
, 0);
1844 /* do_socket() Must return target values and target errnos. */
1845 static abi_long
do_socket(int domain
, int type
, int protocol
)
1847 #if defined(TARGET_MIPS)
1849 case TARGET_SOCK_DGRAM
:
1852 case TARGET_SOCK_STREAM
:
1855 case TARGET_SOCK_RAW
:
1858 case TARGET_SOCK_RDM
:
1861 case TARGET_SOCK_SEQPACKET
:
1862 type
= SOCK_SEQPACKET
;
1864 case TARGET_SOCK_PACKET
:
1869 if (domain
== PF_NETLINK
)
1870 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1871 return get_errno(socket(domain
, type
, protocol
));
1874 /* do_bind() Must return target values and target errnos. */
1875 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1881 if ((int)addrlen
< 0) {
1882 return -TARGET_EINVAL
;
1885 addr
= alloca(addrlen
+1);
1887 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1891 return get_errno(bind(sockfd
, addr
, addrlen
));
1894 /* do_connect() Must return target values and target errnos. */
1895 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1901 if ((int)addrlen
< 0) {
1902 return -TARGET_EINVAL
;
1905 addr
= alloca(addrlen
);
1907 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1911 return get_errno(connect(sockfd
, addr
, addrlen
));
1914 /* do_sendrecvmsg() Must return target values and target errnos. */
1915 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1916 int flags
, int send
)
1919 struct target_msghdr
*msgp
;
1923 abi_ulong target_vec
;
1926 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1930 return -TARGET_EFAULT
;
1931 if (msgp
->msg_name
) {
1932 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1933 msg
.msg_name
= alloca(msg
.msg_namelen
);
1934 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1940 msg
.msg_name
= NULL
;
1941 msg
.msg_namelen
= 0;
1943 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1944 msg
.msg_control
= alloca(msg
.msg_controllen
);
1945 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1947 count
= tswapal(msgp
->msg_iovlen
);
1948 target_vec
= tswapal(msgp
->msg_iov
);
1949 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
1950 target_vec
, count
, send
);
1952 ret
= -host_to_target_errno(errno
);
1955 msg
.msg_iovlen
= count
;
1959 ret
= target_to_host_cmsg(&msg
, msgp
);
1961 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1963 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1964 if (!is_error(ret
)) {
1966 ret
= host_to_target_cmsg(msgp
, &msg
);
1967 if (!is_error(ret
)) {
1968 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
1969 if (msg
.msg_name
!= NULL
) {
1970 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
1971 msg
.msg_name
, msg
.msg_namelen
);
1983 unlock_iovec(vec
, target_vec
, count
, !send
);
1985 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1989 /* do_accept() Must return target values and target errnos. */
1990 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1991 abi_ulong target_addrlen_addr
)
1997 if (target_addr
== 0)
1998 return get_errno(accept(fd
, NULL
, NULL
));
2000 /* linux returns EINVAL if addrlen pointer is invalid */
2001 if (get_user_u32(addrlen
, target_addrlen_addr
))
2002 return -TARGET_EINVAL
;
2004 if ((int)addrlen
< 0) {
2005 return -TARGET_EINVAL
;
2008 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2009 return -TARGET_EINVAL
;
2011 addr
= alloca(addrlen
);
2013 ret
= get_errno(accept(fd
, addr
, &addrlen
));
2014 if (!is_error(ret
)) {
2015 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2016 if (put_user_u32(addrlen
, target_addrlen_addr
))
2017 ret
= -TARGET_EFAULT
;
2022 /* do_getpeername() Must return target values and target errnos. */
2023 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2024 abi_ulong target_addrlen_addr
)
2030 if (get_user_u32(addrlen
, target_addrlen_addr
))
2031 return -TARGET_EFAULT
;
2033 if ((int)addrlen
< 0) {
2034 return -TARGET_EINVAL
;
2037 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2038 return -TARGET_EFAULT
;
2040 addr
= alloca(addrlen
);
2042 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2043 if (!is_error(ret
)) {
2044 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2045 if (put_user_u32(addrlen
, target_addrlen_addr
))
2046 ret
= -TARGET_EFAULT
;
2051 /* do_getsockname() Must return target values and target errnos. */
2052 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2053 abi_ulong target_addrlen_addr
)
2059 if (get_user_u32(addrlen
, target_addrlen_addr
))
2060 return -TARGET_EFAULT
;
2062 if ((int)addrlen
< 0) {
2063 return -TARGET_EINVAL
;
2066 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2067 return -TARGET_EFAULT
;
2069 addr
= alloca(addrlen
);
2071 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2072 if (!is_error(ret
)) {
2073 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2074 if (put_user_u32(addrlen
, target_addrlen_addr
))
2075 ret
= -TARGET_EFAULT
;
2080 /* do_socketpair() Must return target values and target errnos. */
2081 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2082 abi_ulong target_tab_addr
)
2087 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2088 if (!is_error(ret
)) {
2089 if (put_user_s32(tab
[0], target_tab_addr
)
2090 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2091 ret
= -TARGET_EFAULT
;
2096 /* do_sendto() Must return target values and target errnos. */
2097 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2098 abi_ulong target_addr
, socklen_t addrlen
)
2104 if ((int)addrlen
< 0) {
2105 return -TARGET_EINVAL
;
2108 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2110 return -TARGET_EFAULT
;
2112 addr
= alloca(addrlen
);
2113 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2115 unlock_user(host_msg
, msg
, 0);
2118 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2120 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2122 unlock_user(host_msg
, msg
, 0);
2126 /* do_recvfrom() Must return target values and target errnos. */
2127 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2128 abi_ulong target_addr
,
2129 abi_ulong target_addrlen
)
2136 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2138 return -TARGET_EFAULT
;
2140 if (get_user_u32(addrlen
, target_addrlen
)) {
2141 ret
= -TARGET_EFAULT
;
2144 if ((int)addrlen
< 0) {
2145 ret
= -TARGET_EINVAL
;
2148 addr
= alloca(addrlen
);
2149 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2151 addr
= NULL
; /* To keep compiler quiet. */
2152 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2154 if (!is_error(ret
)) {
2156 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2157 if (put_user_u32(addrlen
, target_addrlen
)) {
2158 ret
= -TARGET_EFAULT
;
2162 unlock_user(host_msg
, msg
, len
);
2165 unlock_user(host_msg
, msg
, 0);
2170 #ifdef TARGET_NR_socketcall
2171 /* do_socketcall() Must return target values and target errnos. */
2172 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2175 const int n
= sizeof(abi_ulong
);
2180 abi_ulong domain
, type
, protocol
;
2182 if (get_user_ual(domain
, vptr
)
2183 || get_user_ual(type
, vptr
+ n
)
2184 || get_user_ual(protocol
, vptr
+ 2 * n
))
2185 return -TARGET_EFAULT
;
2187 ret
= do_socket(domain
, type
, protocol
);
2193 abi_ulong target_addr
;
2196 if (get_user_ual(sockfd
, vptr
)
2197 || get_user_ual(target_addr
, vptr
+ n
)
2198 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2199 return -TARGET_EFAULT
;
2201 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2204 case SOCKOP_connect
:
2207 abi_ulong target_addr
;
2210 if (get_user_ual(sockfd
, vptr
)
2211 || get_user_ual(target_addr
, vptr
+ n
)
2212 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2213 return -TARGET_EFAULT
;
2215 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2220 abi_ulong sockfd
, backlog
;
2222 if (get_user_ual(sockfd
, vptr
)
2223 || get_user_ual(backlog
, vptr
+ n
))
2224 return -TARGET_EFAULT
;
2226 ret
= get_errno(listen(sockfd
, backlog
));
2232 abi_ulong target_addr
, target_addrlen
;
2234 if (get_user_ual(sockfd
, vptr
)
2235 || get_user_ual(target_addr
, vptr
+ n
)
2236 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2237 return -TARGET_EFAULT
;
2239 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2242 case SOCKOP_getsockname
:
2245 abi_ulong target_addr
, target_addrlen
;
2247 if (get_user_ual(sockfd
, vptr
)
2248 || get_user_ual(target_addr
, vptr
+ n
)
2249 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2250 return -TARGET_EFAULT
;
2252 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2255 case SOCKOP_getpeername
:
2258 abi_ulong target_addr
, target_addrlen
;
2260 if (get_user_ual(sockfd
, vptr
)
2261 || get_user_ual(target_addr
, vptr
+ n
)
2262 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2263 return -TARGET_EFAULT
;
2265 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2268 case SOCKOP_socketpair
:
2270 abi_ulong domain
, type
, protocol
;
2273 if (get_user_ual(domain
, vptr
)
2274 || get_user_ual(type
, vptr
+ n
)
2275 || get_user_ual(protocol
, vptr
+ 2 * n
)
2276 || get_user_ual(tab
, vptr
+ 3 * n
))
2277 return -TARGET_EFAULT
;
2279 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2289 if (get_user_ual(sockfd
, vptr
)
2290 || get_user_ual(msg
, vptr
+ n
)
2291 || get_user_ual(len
, vptr
+ 2 * n
)
2292 || get_user_ual(flags
, vptr
+ 3 * n
))
2293 return -TARGET_EFAULT
;
2295 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2305 if (get_user_ual(sockfd
, vptr
)
2306 || get_user_ual(msg
, vptr
+ n
)
2307 || get_user_ual(len
, vptr
+ 2 * n
)
2308 || get_user_ual(flags
, vptr
+ 3 * n
))
2309 return -TARGET_EFAULT
;
2311 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2323 if (get_user_ual(sockfd
, vptr
)
2324 || get_user_ual(msg
, vptr
+ n
)
2325 || get_user_ual(len
, vptr
+ 2 * n
)
2326 || get_user_ual(flags
, vptr
+ 3 * n
)
2327 || get_user_ual(addr
, vptr
+ 4 * n
)
2328 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2329 return -TARGET_EFAULT
;
2331 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2334 case SOCKOP_recvfrom
:
2343 if (get_user_ual(sockfd
, vptr
)
2344 || get_user_ual(msg
, vptr
+ n
)
2345 || get_user_ual(len
, vptr
+ 2 * n
)
2346 || get_user_ual(flags
, vptr
+ 3 * n
)
2347 || get_user_ual(addr
, vptr
+ 4 * n
)
2348 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2349 return -TARGET_EFAULT
;
2351 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2354 case SOCKOP_shutdown
:
2356 abi_ulong sockfd
, how
;
2358 if (get_user_ual(sockfd
, vptr
)
2359 || get_user_ual(how
, vptr
+ n
))
2360 return -TARGET_EFAULT
;
2362 ret
= get_errno(shutdown(sockfd
, how
));
2365 case SOCKOP_sendmsg
:
2366 case SOCKOP_recvmsg
:
2369 abi_ulong target_msg
;
2372 if (get_user_ual(fd
, vptr
)
2373 || get_user_ual(target_msg
, vptr
+ n
)
2374 || get_user_ual(flags
, vptr
+ 2 * n
))
2375 return -TARGET_EFAULT
;
2377 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2378 (num
== SOCKOP_sendmsg
));
2381 case SOCKOP_setsockopt
:
2389 if (get_user_ual(sockfd
, vptr
)
2390 || get_user_ual(level
, vptr
+ n
)
2391 || get_user_ual(optname
, vptr
+ 2 * n
)
2392 || get_user_ual(optval
, vptr
+ 3 * n
)
2393 || get_user_ual(optlen
, vptr
+ 4 * n
))
2394 return -TARGET_EFAULT
;
2396 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2399 case SOCKOP_getsockopt
:
2407 if (get_user_ual(sockfd
, vptr
)
2408 || get_user_ual(level
, vptr
+ n
)
2409 || get_user_ual(optname
, vptr
+ 2 * n
)
2410 || get_user_ual(optval
, vptr
+ 3 * n
)
2411 || get_user_ual(optlen
, vptr
+ 4 * n
))
2412 return -TARGET_EFAULT
;
2414 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2418 gemu_log("Unsupported socketcall: %d\n", num
);
2419 ret
= -TARGET_ENOSYS
;
2426 #define N_SHM_REGIONS 32
2428 static struct shm_region
{
2431 } shm_regions
[N_SHM_REGIONS
];
2433 struct target_ipc_perm
2440 unsigned short int mode
;
2441 unsigned short int __pad1
;
2442 unsigned short int __seq
;
2443 unsigned short int __pad2
;
2444 abi_ulong __unused1
;
2445 abi_ulong __unused2
;
2448 struct target_semid_ds
2450 struct target_ipc_perm sem_perm
;
2451 abi_ulong sem_otime
;
2452 abi_ulong __unused1
;
2453 abi_ulong sem_ctime
;
2454 abi_ulong __unused2
;
2455 abi_ulong sem_nsems
;
2456 abi_ulong __unused3
;
2457 abi_ulong __unused4
;
2460 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2461 abi_ulong target_addr
)
2463 struct target_ipc_perm
*target_ip
;
2464 struct target_semid_ds
*target_sd
;
2466 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2467 return -TARGET_EFAULT
;
2468 target_ip
= &(target_sd
->sem_perm
);
2469 host_ip
->__key
= tswapal(target_ip
->__key
);
2470 host_ip
->uid
= tswapal(target_ip
->uid
);
2471 host_ip
->gid
= tswapal(target_ip
->gid
);
2472 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2473 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2474 host_ip
->mode
= tswap16(target_ip
->mode
);
2475 unlock_user_struct(target_sd
, target_addr
, 0);
2479 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2480 struct ipc_perm
*host_ip
)
2482 struct target_ipc_perm
*target_ip
;
2483 struct target_semid_ds
*target_sd
;
2485 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2486 return -TARGET_EFAULT
;
2487 target_ip
= &(target_sd
->sem_perm
);
2488 target_ip
->__key
= tswapal(host_ip
->__key
);
2489 target_ip
->uid
= tswapal(host_ip
->uid
);
2490 target_ip
->gid
= tswapal(host_ip
->gid
);
2491 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2492 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2493 target_ip
->mode
= tswap16(host_ip
->mode
);
2494 unlock_user_struct(target_sd
, target_addr
, 1);
2498 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2499 abi_ulong target_addr
)
2501 struct target_semid_ds
*target_sd
;
2503 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2504 return -TARGET_EFAULT
;
2505 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2506 return -TARGET_EFAULT
;
2507 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2508 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2509 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2510 unlock_user_struct(target_sd
, target_addr
, 0);
2514 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2515 struct semid_ds
*host_sd
)
2517 struct target_semid_ds
*target_sd
;
2519 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2520 return -TARGET_EFAULT
;
2521 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2522 return -TARGET_EFAULT
;
2523 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2524 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2525 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2526 unlock_user_struct(target_sd
, target_addr
, 1);
2530 struct target_seminfo
{
2543 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2544 struct seminfo
*host_seminfo
)
2546 struct target_seminfo
*target_seminfo
;
2547 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2548 return -TARGET_EFAULT
;
2549 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2550 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2551 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2552 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2553 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2554 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2555 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2556 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2557 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2558 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2559 unlock_user_struct(target_seminfo
, target_addr
, 1);
2565 struct semid_ds
*buf
;
2566 unsigned short *array
;
2567 struct seminfo
*__buf
;
2570 union target_semun
{
2577 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2578 abi_ulong target_addr
)
2581 unsigned short *array
;
2583 struct semid_ds semid_ds
;
2586 semun
.buf
= &semid_ds
;
2588 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2590 return get_errno(ret
);
2592 nsems
= semid_ds
.sem_nsems
;
2594 *host_array
= malloc(nsems
*sizeof(unsigned short));
2595 array
= lock_user(VERIFY_READ
, target_addr
,
2596 nsems
*sizeof(unsigned short), 1);
2598 return -TARGET_EFAULT
;
2600 for(i
=0; i
<nsems
; i
++) {
2601 __get_user((*host_array
)[i
], &array
[i
]);
2603 unlock_user(array
, target_addr
, 0);
2608 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2609 unsigned short **host_array
)
2612 unsigned short *array
;
2614 struct semid_ds semid_ds
;
2617 semun
.buf
= &semid_ds
;
2619 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2621 return get_errno(ret
);
2623 nsems
= semid_ds
.sem_nsems
;
2625 array
= lock_user(VERIFY_WRITE
, target_addr
,
2626 nsems
*sizeof(unsigned short), 0);
2628 return -TARGET_EFAULT
;
2630 for(i
=0; i
<nsems
; i
++) {
2631 __put_user((*host_array
)[i
], &array
[i
]);
2634 unlock_user(array
, target_addr
, 1);
2639 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2640 union target_semun target_su
)
2643 struct semid_ds dsarg
;
2644 unsigned short *array
= NULL
;
2645 struct seminfo seminfo
;
2646 abi_long ret
= -TARGET_EINVAL
;
2653 arg
.val
= tswap32(target_su
.val
);
2654 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2655 target_su
.val
= tswap32(arg
.val
);
2659 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2663 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2664 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2671 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2675 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2676 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2682 arg
.__buf
= &seminfo
;
2683 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2684 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2692 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2699 struct target_sembuf
{
2700 unsigned short sem_num
;
2705 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2706 abi_ulong target_addr
,
2709 struct target_sembuf
*target_sembuf
;
2712 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2713 nsops
*sizeof(struct target_sembuf
), 1);
2715 return -TARGET_EFAULT
;
2717 for(i
=0; i
<nsops
; i
++) {
2718 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2719 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2720 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2723 unlock_user(target_sembuf
, target_addr
, 0);
2728 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2730 struct sembuf sops
[nsops
];
2732 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2733 return -TARGET_EFAULT
;
2735 return semop(semid
, sops
, nsops
);
2738 struct target_msqid_ds
2740 struct target_ipc_perm msg_perm
;
2741 abi_ulong msg_stime
;
2742 #if TARGET_ABI_BITS == 32
2743 abi_ulong __unused1
;
2745 abi_ulong msg_rtime
;
2746 #if TARGET_ABI_BITS == 32
2747 abi_ulong __unused2
;
2749 abi_ulong msg_ctime
;
2750 #if TARGET_ABI_BITS == 32
2751 abi_ulong __unused3
;
2753 abi_ulong __msg_cbytes
;
2755 abi_ulong msg_qbytes
;
2756 abi_ulong msg_lspid
;
2757 abi_ulong msg_lrpid
;
2758 abi_ulong __unused4
;
2759 abi_ulong __unused5
;
2762 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2763 abi_ulong target_addr
)
2765 struct target_msqid_ds
*target_md
;
2767 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2768 return -TARGET_EFAULT
;
2769 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2770 return -TARGET_EFAULT
;
2771 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2772 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2773 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2774 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2775 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2776 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2777 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2778 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2779 unlock_user_struct(target_md
, target_addr
, 0);
2783 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2784 struct msqid_ds
*host_md
)
2786 struct target_msqid_ds
*target_md
;
2788 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2789 return -TARGET_EFAULT
;
2790 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2791 return -TARGET_EFAULT
;
2792 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2793 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2794 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2795 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2796 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2797 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2798 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2799 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2800 unlock_user_struct(target_md
, target_addr
, 1);
2804 struct target_msginfo
{
2812 unsigned short int msgseg
;
2815 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2816 struct msginfo
*host_msginfo
)
2818 struct target_msginfo
*target_msginfo
;
2819 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2820 return -TARGET_EFAULT
;
2821 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2822 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2823 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2824 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2825 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2826 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2827 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2828 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2829 unlock_user_struct(target_msginfo
, target_addr
, 1);
2833 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2835 struct msqid_ds dsarg
;
2836 struct msginfo msginfo
;
2837 abi_long ret
= -TARGET_EINVAL
;
2845 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2846 return -TARGET_EFAULT
;
2847 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2848 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2849 return -TARGET_EFAULT
;
2852 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2856 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2857 if (host_to_target_msginfo(ptr
, &msginfo
))
2858 return -TARGET_EFAULT
;
2865 struct target_msgbuf
{
2870 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2871 unsigned int msgsz
, int msgflg
)
2873 struct target_msgbuf
*target_mb
;
2874 struct msgbuf
*host_mb
;
2877 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2878 return -TARGET_EFAULT
;
2879 host_mb
= malloc(msgsz
+sizeof(long));
2880 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2881 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2882 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2884 unlock_user_struct(target_mb
, msgp
, 0);
2889 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2890 unsigned int msgsz
, abi_long msgtyp
,
2893 struct target_msgbuf
*target_mb
;
2895 struct msgbuf
*host_mb
;
2898 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2899 return -TARGET_EFAULT
;
2901 host_mb
= g_malloc(msgsz
+sizeof(long));
2902 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapal(msgtyp
), msgflg
));
2905 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2906 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2907 if (!target_mtext
) {
2908 ret
= -TARGET_EFAULT
;
2911 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2912 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2915 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2919 unlock_user_struct(target_mb
, msgp
, 1);
2924 struct target_shmid_ds
2926 struct target_ipc_perm shm_perm
;
2927 abi_ulong shm_segsz
;
2928 abi_ulong shm_atime
;
2929 #if TARGET_ABI_BITS == 32
2930 abi_ulong __unused1
;
2932 abi_ulong shm_dtime
;
2933 #if TARGET_ABI_BITS == 32
2934 abi_ulong __unused2
;
2936 abi_ulong shm_ctime
;
2937 #if TARGET_ABI_BITS == 32
2938 abi_ulong __unused3
;
2942 abi_ulong shm_nattch
;
2943 unsigned long int __unused4
;
2944 unsigned long int __unused5
;
2947 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2948 abi_ulong target_addr
)
2950 struct target_shmid_ds
*target_sd
;
2952 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2953 return -TARGET_EFAULT
;
2954 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2955 return -TARGET_EFAULT
;
2956 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2957 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2958 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2959 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2960 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2961 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2962 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2963 unlock_user_struct(target_sd
, target_addr
, 0);
2967 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2968 struct shmid_ds
*host_sd
)
2970 struct target_shmid_ds
*target_sd
;
2972 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2973 return -TARGET_EFAULT
;
2974 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2975 return -TARGET_EFAULT
;
2976 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2977 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2978 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2979 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2980 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2981 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2982 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2983 unlock_user_struct(target_sd
, target_addr
, 1);
2987 struct target_shminfo
{
2995 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2996 struct shminfo
*host_shminfo
)
2998 struct target_shminfo
*target_shminfo
;
2999 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3000 return -TARGET_EFAULT
;
3001 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3002 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3003 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3004 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3005 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3006 unlock_user_struct(target_shminfo
, target_addr
, 1);
3010 struct target_shm_info
{
3015 abi_ulong swap_attempts
;
3016 abi_ulong swap_successes
;
3019 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3020 struct shm_info
*host_shm_info
)
3022 struct target_shm_info
*target_shm_info
;
3023 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3024 return -TARGET_EFAULT
;
3025 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3026 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3027 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3028 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3029 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3030 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3031 unlock_user_struct(target_shm_info
, target_addr
, 1);
3035 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3037 struct shmid_ds dsarg
;
3038 struct shminfo shminfo
;
3039 struct shm_info shm_info
;
3040 abi_long ret
= -TARGET_EINVAL
;
3048 if (target_to_host_shmid_ds(&dsarg
, buf
))
3049 return -TARGET_EFAULT
;
3050 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3051 if (host_to_target_shmid_ds(buf
, &dsarg
))
3052 return -TARGET_EFAULT
;
3055 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3056 if (host_to_target_shminfo(buf
, &shminfo
))
3057 return -TARGET_EFAULT
;
3060 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3061 if (host_to_target_shm_info(buf
, &shm_info
))
3062 return -TARGET_EFAULT
;
3067 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3074 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3078 struct shmid_ds shm_info
;
3081 /* find out the length of the shared memory segment */
3082 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3083 if (is_error(ret
)) {
3084 /* can't get length, bail out */
3091 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3093 abi_ulong mmap_start
;
3095 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3097 if (mmap_start
== -1) {
3099 host_raddr
= (void *)-1;
3101 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3104 if (host_raddr
== (void *)-1) {
3106 return get_errno((long)host_raddr
);
3108 raddr
=h2g((unsigned long)host_raddr
);
3110 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3111 PAGE_VALID
| PAGE_READ
|
3112 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3114 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3115 if (shm_regions
[i
].start
== 0) {
3116 shm_regions
[i
].start
= raddr
;
3117 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3127 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3131 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3132 if (shm_regions
[i
].start
== shmaddr
) {
3133 shm_regions
[i
].start
= 0;
3134 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3139 return get_errno(shmdt(g2h(shmaddr
)));
3142 #ifdef TARGET_NR_ipc
3143 /* ??? This only works with linear mappings. */
3144 /* do_ipc() must return target values and target errnos. */
3145 static abi_long
do_ipc(unsigned int call
, int first
,
3146 int second
, int third
,
3147 abi_long ptr
, abi_long fifth
)
3152 version
= call
>> 16;
3157 ret
= do_semop(first
, ptr
, second
);
3161 ret
= get_errno(semget(first
, second
, third
));
3165 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3169 ret
= get_errno(msgget(first
, second
));
3173 ret
= do_msgsnd(first
, ptr
, second
, third
);
3177 ret
= do_msgctl(first
, second
, ptr
);
3184 struct target_ipc_kludge
{
3189 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3190 ret
= -TARGET_EFAULT
;
3194 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
3196 unlock_user_struct(tmp
, ptr
, 0);
3200 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3209 raddr
= do_shmat(first
, ptr
, second
);
3210 if (is_error(raddr
))
3211 return get_errno(raddr
);
3212 if (put_user_ual(raddr
, third
))
3213 return -TARGET_EFAULT
;
3217 ret
= -TARGET_EINVAL
;
3222 ret
= do_shmdt(ptr
);
3226 /* IPC_* flag values are the same on all linux platforms */
3227 ret
= get_errno(shmget(first
, second
, third
));
3230 /* IPC_* and SHM_* command values are the same on all linux platforms */
3232 ret
= do_shmctl(first
, second
, third
);
3235 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3236 ret
= -TARGET_ENOSYS
;
3243 /* kernel structure types definitions */
3245 #define STRUCT(name, ...) STRUCT_ ## name,
3246 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3248 #include "syscall_types.h"
3251 #undef STRUCT_SPECIAL
3253 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3254 #define STRUCT_SPECIAL(name)
3255 #include "syscall_types.h"
3257 #undef STRUCT_SPECIAL
3259 typedef struct IOCTLEntry IOCTLEntry
;
3261 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3262 int fd
, abi_long cmd
, abi_long arg
);
3265 unsigned int target_cmd
;
3266 unsigned int host_cmd
;
3269 do_ioctl_fn
*do_ioctl
;
3270 const argtype arg_type
[5];
3273 #define IOC_R 0x0001
3274 #define IOC_W 0x0002
3275 #define IOC_RW (IOC_R | IOC_W)
3277 #define MAX_STRUCT_SIZE 4096
3279 #ifdef CONFIG_FIEMAP
3280 /* So fiemap access checks don't overflow on 32 bit systems.
3281 * This is very slightly smaller than the limit imposed by
3282 * the underlying kernel.
3284 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3285 / sizeof(struct fiemap_extent))
3287 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3288 int fd
, abi_long cmd
, abi_long arg
)
3290 /* The parameter for this ioctl is a struct fiemap followed
3291 * by an array of struct fiemap_extent whose size is set
3292 * in fiemap->fm_extent_count. The array is filled in by the
3295 int target_size_in
, target_size_out
;
3297 const argtype
*arg_type
= ie
->arg_type
;
3298 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3301 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3305 assert(arg_type
[0] == TYPE_PTR
);
3306 assert(ie
->access
== IOC_RW
);
3308 target_size_in
= thunk_type_size(arg_type
, 0);
3309 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3311 return -TARGET_EFAULT
;
3313 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3314 unlock_user(argptr
, arg
, 0);
3315 fm
= (struct fiemap
*)buf_temp
;
3316 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3317 return -TARGET_EINVAL
;
3320 outbufsz
= sizeof (*fm
) +
3321 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3323 if (outbufsz
> MAX_STRUCT_SIZE
) {
3324 /* We can't fit all the extents into the fixed size buffer.
3325 * Allocate one that is large enough and use it instead.
3327 fm
= malloc(outbufsz
);
3329 return -TARGET_ENOMEM
;
3331 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3334 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3335 if (!is_error(ret
)) {
3336 target_size_out
= target_size_in
;
3337 /* An extent_count of 0 means we were only counting the extents
3338 * so there are no structs to copy
3340 if (fm
->fm_extent_count
!= 0) {
3341 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3343 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3345 ret
= -TARGET_EFAULT
;
3347 /* Convert the struct fiemap */
3348 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3349 if (fm
->fm_extent_count
!= 0) {
3350 p
= argptr
+ target_size_in
;
3351 /* ...and then all the struct fiemap_extents */
3352 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3353 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3358 unlock_user(argptr
, arg
, target_size_out
);
3368 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3369 int fd
, abi_long cmd
, abi_long arg
)
3371 const argtype
*arg_type
= ie
->arg_type
;
3375 struct ifconf
*host_ifconf
;
3377 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3378 int target_ifreq_size
;
3383 abi_long target_ifc_buf
;
3387 assert(arg_type
[0] == TYPE_PTR
);
3388 assert(ie
->access
== IOC_RW
);
3391 target_size
= thunk_type_size(arg_type
, 0);
3393 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3395 return -TARGET_EFAULT
;
3396 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3397 unlock_user(argptr
, arg
, 0);
3399 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3400 target_ifc_len
= host_ifconf
->ifc_len
;
3401 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3403 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3404 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3405 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3407 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3408 if (outbufsz
> MAX_STRUCT_SIZE
) {
3409 /* We can't fit all the extents into the fixed size buffer.
3410 * Allocate one that is large enough and use it instead.
3412 host_ifconf
= malloc(outbufsz
);
3414 return -TARGET_ENOMEM
;
3416 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3419 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3421 host_ifconf
->ifc_len
= host_ifc_len
;
3422 host_ifconf
->ifc_buf
= host_ifc_buf
;
3424 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3425 if (!is_error(ret
)) {
3426 /* convert host ifc_len to target ifc_len */
3428 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3429 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3430 host_ifconf
->ifc_len
= target_ifc_len
;
3432 /* restore target ifc_buf */
3434 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3436 /* copy struct ifconf to target user */
3438 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3440 return -TARGET_EFAULT
;
3441 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3442 unlock_user(argptr
, arg
, target_size
);
3444 /* copy ifreq[] to target user */
3446 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3447 for (i
= 0; i
< nb_ifreq
; i
++) {
3448 thunk_convert(argptr
+ i
* target_ifreq_size
,
3449 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3450 ifreq_arg_type
, THUNK_TARGET
);
3452 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3462 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3463 abi_long cmd
, abi_long arg
)
3466 struct dm_ioctl
*host_dm
;
3467 abi_long guest_data
;
3468 uint32_t guest_data_size
;
3470 const argtype
*arg_type
= ie
->arg_type
;
3472 void *big_buf
= NULL
;
3476 target_size
= thunk_type_size(arg_type
, 0);
3477 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3479 ret
= -TARGET_EFAULT
;
3482 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3483 unlock_user(argptr
, arg
, 0);
3485 /* buf_temp is too small, so fetch things into a bigger buffer */
3486 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3487 memcpy(big_buf
, buf_temp
, target_size
);
3491 guest_data
= arg
+ host_dm
->data_start
;
3492 if ((guest_data
- arg
) < 0) {
3496 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3497 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3499 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3500 switch (ie
->host_cmd
) {
3502 case DM_LIST_DEVICES
:
3505 case DM_DEV_SUSPEND
:
3508 case DM_TABLE_STATUS
:
3509 case DM_TABLE_CLEAR
:
3511 case DM_LIST_VERSIONS
:
3515 case DM_DEV_SET_GEOMETRY
:
3516 /* data contains only strings */
3517 memcpy(host_data
, argptr
, guest_data_size
);
3520 memcpy(host_data
, argptr
, guest_data_size
);
3521 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3525 void *gspec
= argptr
;
3526 void *cur_data
= host_data
;
3527 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3528 int spec_size
= thunk_type_size(arg_type
, 0);
3531 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3532 struct dm_target_spec
*spec
= cur_data
;
3536 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3537 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3539 spec
->next
= sizeof(*spec
) + slen
;
3540 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3542 cur_data
+= spec
->next
;
3547 ret
= -TARGET_EINVAL
;
3550 unlock_user(argptr
, guest_data
, 0);
3552 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3553 if (!is_error(ret
)) {
3554 guest_data
= arg
+ host_dm
->data_start
;
3555 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3556 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3557 switch (ie
->host_cmd
) {
3562 case DM_DEV_SUSPEND
:
3565 case DM_TABLE_CLEAR
:
3567 case DM_DEV_SET_GEOMETRY
:
3568 /* no return data */
3570 case DM_LIST_DEVICES
:
3572 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3573 uint32_t remaining_data
= guest_data_size
;
3574 void *cur_data
= argptr
;
3575 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3576 int nl_size
= 12; /* can't use thunk_size due to alignment */
3579 uint32_t next
= nl
->next
;
3581 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3583 if (remaining_data
< nl
->next
) {
3584 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3587 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3588 strcpy(cur_data
+ nl_size
, nl
->name
);
3589 cur_data
+= nl
->next
;
3590 remaining_data
-= nl
->next
;
3594 nl
= (void*)nl
+ next
;
3599 case DM_TABLE_STATUS
:
3601 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3602 void *cur_data
= argptr
;
3603 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3604 int spec_size
= thunk_type_size(arg_type
, 0);
3607 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3608 uint32_t next
= spec
->next
;
3609 int slen
= strlen((char*)&spec
[1]) + 1;
3610 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3611 if (guest_data_size
< spec
->next
) {
3612 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3615 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3616 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3617 cur_data
= argptr
+ spec
->next
;
3618 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3624 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3625 int count
= *(uint32_t*)hdata
;
3626 uint64_t *hdev
= hdata
+ 8;
3627 uint64_t *gdev
= argptr
+ 8;
3630 *(uint32_t*)argptr
= tswap32(count
);
3631 for (i
= 0; i
< count
; i
++) {
3632 *gdev
= tswap64(*hdev
);
3638 case DM_LIST_VERSIONS
:
3640 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3641 uint32_t remaining_data
= guest_data_size
;
3642 void *cur_data
= argptr
;
3643 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3644 int vers_size
= thunk_type_size(arg_type
, 0);
3647 uint32_t next
= vers
->next
;
3649 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3651 if (remaining_data
< vers
->next
) {
3652 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3655 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3656 strcpy(cur_data
+ vers_size
, vers
->name
);
3657 cur_data
+= vers
->next
;
3658 remaining_data
-= vers
->next
;
3662 vers
= (void*)vers
+ next
;
3667 ret
= -TARGET_EINVAL
;
3670 unlock_user(argptr
, guest_data
, guest_data_size
);
3672 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3674 ret
= -TARGET_EFAULT
;
3677 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3678 unlock_user(argptr
, arg
, target_size
);
3685 static IOCTLEntry ioctl_entries
[] = {
3686 #define IOCTL(cmd, access, ...) \
3687 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3688 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3689 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3694 /* ??? Implement proper locking for ioctls. */
3695 /* do_ioctl() Must return target values and target errnos. */
3696 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3698 const IOCTLEntry
*ie
;
3699 const argtype
*arg_type
;
3701 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3707 if (ie
->target_cmd
== 0) {
3708 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3709 return -TARGET_ENOSYS
;
3711 if (ie
->target_cmd
== cmd
)
3715 arg_type
= ie
->arg_type
;
3717 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3720 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3723 switch(arg_type
[0]) {
3726 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3731 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3735 target_size
= thunk_type_size(arg_type
, 0);
3736 switch(ie
->access
) {
3738 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3739 if (!is_error(ret
)) {
3740 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3742 return -TARGET_EFAULT
;
3743 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3744 unlock_user(argptr
, arg
, target_size
);
3748 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3750 return -TARGET_EFAULT
;
3751 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3752 unlock_user(argptr
, arg
, 0);
3753 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3757 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3759 return -TARGET_EFAULT
;
3760 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3761 unlock_user(argptr
, arg
, 0);
3762 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3763 if (!is_error(ret
)) {
3764 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3766 return -TARGET_EFAULT
;
3767 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3768 unlock_user(argptr
, arg
, target_size
);
3774 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3775 (long)cmd
, arg_type
[0]);
3776 ret
= -TARGET_ENOSYS
;
3782 static const bitmask_transtbl iflag_tbl
[] = {
3783 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3784 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3785 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3786 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3787 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3788 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3789 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3790 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3791 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3792 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3793 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3794 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3795 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3796 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3800 static const bitmask_transtbl oflag_tbl
[] = {
3801 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3802 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3803 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3804 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3805 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3806 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3807 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3808 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3809 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3810 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3811 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3812 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3813 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3814 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3815 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3816 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3817 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3818 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3819 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3820 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3821 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3822 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3823 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3824 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3828 static const bitmask_transtbl cflag_tbl
[] = {
3829 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3830 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3831 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3832 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3833 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3834 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3835 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3836 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3837 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3838 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3839 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3840 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3841 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3842 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3843 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3844 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3845 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3846 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3847 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3848 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3849 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3850 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3851 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3852 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3853 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3854 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3855 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3856 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3857 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3858 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3859 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3863 static const bitmask_transtbl lflag_tbl
[] = {
3864 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3865 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3866 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3867 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3868 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3869 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3870 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3871 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3872 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3873 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3874 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3875 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3876 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3877 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3878 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3882 static void target_to_host_termios (void *dst
, const void *src
)
3884 struct host_termios
*host
= dst
;
3885 const struct target_termios
*target
= src
;
3888 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3890 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3892 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3894 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3895 host
->c_line
= target
->c_line
;
3897 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3898 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3899 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3900 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3901 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3902 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3903 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3904 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3905 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3906 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3907 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3908 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3909 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3910 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3911 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3912 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3913 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3914 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3917 static void host_to_target_termios (void *dst
, const void *src
)
3919 struct target_termios
*target
= dst
;
3920 const struct host_termios
*host
= src
;
3923 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3925 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3927 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3929 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3930 target
->c_line
= host
->c_line
;
3932 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3933 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3934 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3935 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3936 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3937 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3938 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3939 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3940 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3941 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3942 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3943 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3944 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3945 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3946 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3947 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3948 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3949 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3952 static const StructEntry struct_termios_def
= {
3953 .convert
= { host_to_target_termios
, target_to_host_termios
},
3954 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3955 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3958 static bitmask_transtbl mmap_flags_tbl
[] = {
3959 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3960 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3961 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3962 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3963 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3964 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3965 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3966 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3970 #if defined(TARGET_I386)
3972 /* NOTE: there is really one LDT for all the threads */
3973 static uint8_t *ldt_table
;
3975 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3982 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3983 if (size
> bytecount
)
3985 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3987 return -TARGET_EFAULT
;
3988 /* ??? Should this by byteswapped? */
3989 memcpy(p
, ldt_table
, size
);
3990 unlock_user(p
, ptr
, size
);
3994 /* XXX: add locking support */
3995 static abi_long
write_ldt(CPUX86State
*env
,
3996 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3998 struct target_modify_ldt_ldt_s ldt_info
;
3999 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4000 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4001 int seg_not_present
, useable
, lm
;
4002 uint32_t *lp
, entry_1
, entry_2
;
4004 if (bytecount
!= sizeof(ldt_info
))
4005 return -TARGET_EINVAL
;
4006 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4007 return -TARGET_EFAULT
;
4008 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4009 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4010 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4011 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4012 unlock_user_struct(target_ldt_info
, ptr
, 0);
4014 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4015 return -TARGET_EINVAL
;
4016 seg_32bit
= ldt_info
.flags
& 1;
4017 contents
= (ldt_info
.flags
>> 1) & 3;
4018 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4019 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4020 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4021 useable
= (ldt_info
.flags
>> 6) & 1;
4025 lm
= (ldt_info
.flags
>> 7) & 1;
4027 if (contents
== 3) {
4029 return -TARGET_EINVAL
;
4030 if (seg_not_present
== 0)
4031 return -TARGET_EINVAL
;
4033 /* allocate the LDT */
4035 env
->ldt
.base
= target_mmap(0,
4036 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4037 PROT_READ
|PROT_WRITE
,
4038 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4039 if (env
->ldt
.base
== -1)
4040 return -TARGET_ENOMEM
;
4041 memset(g2h(env
->ldt
.base
), 0,
4042 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4043 env
->ldt
.limit
= 0xffff;
4044 ldt_table
= g2h(env
->ldt
.base
);
4047 /* NOTE: same code as Linux kernel */
4048 /* Allow LDTs to be cleared by the user. */
4049 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4052 read_exec_only
== 1 &&
4054 limit_in_pages
== 0 &&
4055 seg_not_present
== 1 &&
4063 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4064 (ldt_info
.limit
& 0x0ffff);
4065 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4066 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4067 (ldt_info
.limit
& 0xf0000) |
4068 ((read_exec_only
^ 1) << 9) |
4070 ((seg_not_present
^ 1) << 15) |
4072 (limit_in_pages
<< 23) |
4076 entry_2
|= (useable
<< 20);
4078 /* Install the new entry ... */
4080 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4081 lp
[0] = tswap32(entry_1
);
4082 lp
[1] = tswap32(entry_2
);
4086 /* specific and weird i386 syscalls */
4087 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4088 unsigned long bytecount
)
4094 ret
= read_ldt(ptr
, bytecount
);
4097 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4100 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4103 ret
= -TARGET_ENOSYS
;
4109 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4110 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4112 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4113 struct target_modify_ldt_ldt_s ldt_info
;
4114 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4115 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4116 int seg_not_present
, useable
, lm
;
4117 uint32_t *lp
, entry_1
, entry_2
;
4120 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4121 if (!target_ldt_info
)
4122 return -TARGET_EFAULT
;
4123 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4124 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4125 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4126 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4127 if (ldt_info
.entry_number
== -1) {
4128 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4129 if (gdt_table
[i
] == 0) {
4130 ldt_info
.entry_number
= i
;
4131 target_ldt_info
->entry_number
= tswap32(i
);
4136 unlock_user_struct(target_ldt_info
, ptr
, 1);
4138 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4139 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4140 return -TARGET_EINVAL
;
4141 seg_32bit
= ldt_info
.flags
& 1;
4142 contents
= (ldt_info
.flags
>> 1) & 3;
4143 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4144 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4145 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4146 useable
= (ldt_info
.flags
>> 6) & 1;
4150 lm
= (ldt_info
.flags
>> 7) & 1;
4153 if (contents
== 3) {
4154 if (seg_not_present
== 0)
4155 return -TARGET_EINVAL
;
4158 /* NOTE: same code as Linux kernel */
4159 /* Allow LDTs to be cleared by the user. */
4160 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4161 if ((contents
== 0 &&
4162 read_exec_only
== 1 &&
4164 limit_in_pages
== 0 &&
4165 seg_not_present
== 1 &&
4173 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4174 (ldt_info
.limit
& 0x0ffff);
4175 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4176 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4177 (ldt_info
.limit
& 0xf0000) |
4178 ((read_exec_only
^ 1) << 9) |
4180 ((seg_not_present
^ 1) << 15) |
4182 (limit_in_pages
<< 23) |
4187 /* Install the new entry ... */
4189 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4190 lp
[0] = tswap32(entry_1
);
4191 lp
[1] = tswap32(entry_2
);
4195 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4197 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4198 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4199 uint32_t base_addr
, limit
, flags
;
4200 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4201 int seg_not_present
, useable
, lm
;
4202 uint32_t *lp
, entry_1
, entry_2
;
4204 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4205 if (!target_ldt_info
)
4206 return -TARGET_EFAULT
;
4207 idx
= tswap32(target_ldt_info
->entry_number
);
4208 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4209 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4210 unlock_user_struct(target_ldt_info
, ptr
, 1);
4211 return -TARGET_EINVAL
;
4213 lp
= (uint32_t *)(gdt_table
+ idx
);
4214 entry_1
= tswap32(lp
[0]);
4215 entry_2
= tswap32(lp
[1]);
4217 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4218 contents
= (entry_2
>> 10) & 3;
4219 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4220 seg_32bit
= (entry_2
>> 22) & 1;
4221 limit_in_pages
= (entry_2
>> 23) & 1;
4222 useable
= (entry_2
>> 20) & 1;
4226 lm
= (entry_2
>> 21) & 1;
4228 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4229 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4230 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4231 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4232 base_addr
= (entry_1
>> 16) |
4233 (entry_2
& 0xff000000) |
4234 ((entry_2
& 0xff) << 16);
4235 target_ldt_info
->base_addr
= tswapal(base_addr
);
4236 target_ldt_info
->limit
= tswap32(limit
);
4237 target_ldt_info
->flags
= tswap32(flags
);
4238 unlock_user_struct(target_ldt_info
, ptr
, 1);
4241 #endif /* TARGET_I386 && TARGET_ABI32 */
4243 #ifndef TARGET_ABI32
4244 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4251 case TARGET_ARCH_SET_GS
:
4252 case TARGET_ARCH_SET_FS
:
4253 if (code
== TARGET_ARCH_SET_GS
)
4257 cpu_x86_load_seg(env
, idx
, 0);
4258 env
->segs
[idx
].base
= addr
;
4260 case TARGET_ARCH_GET_GS
:
4261 case TARGET_ARCH_GET_FS
:
4262 if (code
== TARGET_ARCH_GET_GS
)
4266 val
= env
->segs
[idx
].base
;
4267 if (put_user(val
, addr
, abi_ulong
))
4268 ret
= -TARGET_EFAULT
;
4271 ret
= -TARGET_EINVAL
;
4278 #endif /* defined(TARGET_I386) */
4280 #define NEW_STACK_SIZE 0x40000
4282 #if defined(CONFIG_USE_NPTL)
4284 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4287 pthread_mutex_t mutex
;
4288 pthread_cond_t cond
;
4291 abi_ulong child_tidptr
;
4292 abi_ulong parent_tidptr
;
4296 static void *clone_func(void *arg
)
4298 new_thread_info
*info
= arg
;
4304 ts
= (TaskState
*)thread_env
->opaque
;
4305 info
->tid
= gettid();
4306 env
->host_tid
= info
->tid
;
4308 if (info
->child_tidptr
)
4309 put_user_u32(info
->tid
, info
->child_tidptr
);
4310 if (info
->parent_tidptr
)
4311 put_user_u32(info
->tid
, info
->parent_tidptr
);
4312 /* Enable signals. */
4313 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4314 /* Signal to the parent that we're ready. */
4315 pthread_mutex_lock(&info
->mutex
);
4316 pthread_cond_broadcast(&info
->cond
);
4317 pthread_mutex_unlock(&info
->mutex
);
4318 /* Wait until the parent has finshed initializing the tls state. */
4319 pthread_mutex_lock(&clone_lock
);
4320 pthread_mutex_unlock(&clone_lock
);
4327 static int clone_func(void *arg
)
4329 CPUArchState
*env
= arg
;
4336 /* do_fork() Must return host values and target errnos (unlike most
4337 do_*() functions). */
4338 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4339 abi_ulong parent_tidptr
, target_ulong newtls
,
4340 abi_ulong child_tidptr
)
4344 CPUArchState
*new_env
;
4345 #if defined(CONFIG_USE_NPTL)
4346 unsigned int nptl_flags
;
4352 /* Emulate vfork() with fork() */
4353 if (flags
& CLONE_VFORK
)
4354 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4356 if (flags
& CLONE_VM
) {
4357 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4358 #if defined(CONFIG_USE_NPTL)
4359 new_thread_info info
;
4360 pthread_attr_t attr
;
4362 ts
= g_malloc0(sizeof(TaskState
));
4363 init_task_state(ts
);
4364 /* we create a new CPU instance. */
4365 new_env
= cpu_copy(env
);
4366 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4367 cpu_reset(ENV_GET_CPU(new_env
));
4369 /* Init regs that differ from the parent. */
4370 cpu_clone_regs(new_env
, newsp
);
4371 new_env
->opaque
= ts
;
4372 ts
->bprm
= parent_ts
->bprm
;
4373 ts
->info
= parent_ts
->info
;
4374 #if defined(CONFIG_USE_NPTL)
4376 flags
&= ~CLONE_NPTL_FLAGS2
;
4378 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4379 ts
->child_tidptr
= child_tidptr
;
4382 if (nptl_flags
& CLONE_SETTLS
)
4383 cpu_set_tls (new_env
, newtls
);
4385 /* Grab a mutex so that thread setup appears atomic. */
4386 pthread_mutex_lock(&clone_lock
);
4388 memset(&info
, 0, sizeof(info
));
4389 pthread_mutex_init(&info
.mutex
, NULL
);
4390 pthread_mutex_lock(&info
.mutex
);
4391 pthread_cond_init(&info
.cond
, NULL
);
4393 if (nptl_flags
& CLONE_CHILD_SETTID
)
4394 info
.child_tidptr
= child_tidptr
;
4395 if (nptl_flags
& CLONE_PARENT_SETTID
)
4396 info
.parent_tidptr
= parent_tidptr
;
4398 ret
= pthread_attr_init(&attr
);
4399 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4400 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4401 /* It is not safe to deliver signals until the child has finished
4402 initializing, so temporarily block all signals. */
4403 sigfillset(&sigmask
);
4404 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4406 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4407 /* TODO: Free new CPU state if thread creation failed. */
4409 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4410 pthread_attr_destroy(&attr
);
4412 /* Wait for the child to initialize. */
4413 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4415 if (flags
& CLONE_PARENT_SETTID
)
4416 put_user_u32(ret
, parent_tidptr
);
4420 pthread_mutex_unlock(&info
.mutex
);
4421 pthread_cond_destroy(&info
.cond
);
4422 pthread_mutex_destroy(&info
.mutex
);
4423 pthread_mutex_unlock(&clone_lock
);
4425 if (flags
& CLONE_NPTL_FLAGS2
)
4427 /* This is probably going to die very quickly, but do it anyway. */
4428 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4430 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4432 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4436 /* if no CLONE_VM, we consider it is a fork */
4437 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4442 /* Child Process. */
4443 cpu_clone_regs(env
, newsp
);
4445 #if defined(CONFIG_USE_NPTL)
4446 /* There is a race condition here. The parent process could
4447 theoretically read the TID in the child process before the child
4448 tid is set. This would require using either ptrace
4449 (not implemented) or having *_tidptr to point at a shared memory
4450 mapping. We can't repeat the spinlock hack used above because
4451 the child process gets its own copy of the lock. */
4452 if (flags
& CLONE_CHILD_SETTID
)
4453 put_user_u32(gettid(), child_tidptr
);
4454 if (flags
& CLONE_PARENT_SETTID
)
4455 put_user_u32(gettid(), parent_tidptr
);
4456 ts
= (TaskState
*)env
->opaque
;
4457 if (flags
& CLONE_SETTLS
)
4458 cpu_set_tls (env
, newtls
);
4459 if (flags
& CLONE_CHILD_CLEARTID
)
4460 ts
->child_tidptr
= child_tidptr
;
4469 /* warning : doesn't handle linux specific flags... */
4470 static int target_to_host_fcntl_cmd(int cmd
)
4473 case TARGET_F_DUPFD
:
4474 case TARGET_F_GETFD
:
4475 case TARGET_F_SETFD
:
4476 case TARGET_F_GETFL
:
4477 case TARGET_F_SETFL
:
4479 case TARGET_F_GETLK
:
4481 case TARGET_F_SETLK
:
4483 case TARGET_F_SETLKW
:
4485 case TARGET_F_GETOWN
:
4487 case TARGET_F_SETOWN
:
4489 case TARGET_F_GETSIG
:
4491 case TARGET_F_SETSIG
:
4493 #if TARGET_ABI_BITS == 32
4494 case TARGET_F_GETLK64
:
4496 case TARGET_F_SETLK64
:
4498 case TARGET_F_SETLKW64
:
4501 case TARGET_F_SETLEASE
:
4503 case TARGET_F_GETLEASE
:
4505 #ifdef F_DUPFD_CLOEXEC
4506 case TARGET_F_DUPFD_CLOEXEC
:
4507 return F_DUPFD_CLOEXEC
;
4509 case TARGET_F_NOTIFY
:
4512 return -TARGET_EINVAL
;
4514 return -TARGET_EINVAL
;
4517 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4520 struct target_flock
*target_fl
;
4521 struct flock64 fl64
;
4522 struct target_flock64
*target_fl64
;
4524 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4526 if (host_cmd
== -TARGET_EINVAL
)
4530 case TARGET_F_GETLK
:
4531 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4532 return -TARGET_EFAULT
;
4533 fl
.l_type
= tswap16(target_fl
->l_type
);
4534 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4535 fl
.l_start
= tswapal(target_fl
->l_start
);
4536 fl
.l_len
= tswapal(target_fl
->l_len
);
4537 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4538 unlock_user_struct(target_fl
, arg
, 0);
4539 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4541 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4542 return -TARGET_EFAULT
;
4543 target_fl
->l_type
= tswap16(fl
.l_type
);
4544 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4545 target_fl
->l_start
= tswapal(fl
.l_start
);
4546 target_fl
->l_len
= tswapal(fl
.l_len
);
4547 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4548 unlock_user_struct(target_fl
, arg
, 1);
4552 case TARGET_F_SETLK
:
4553 case TARGET_F_SETLKW
:
4554 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4555 return -TARGET_EFAULT
;
4556 fl
.l_type
= tswap16(target_fl
->l_type
);
4557 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4558 fl
.l_start
= tswapal(target_fl
->l_start
);
4559 fl
.l_len
= tswapal(target_fl
->l_len
);
4560 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4561 unlock_user_struct(target_fl
, arg
, 0);
4562 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4565 case TARGET_F_GETLK64
:
4566 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4567 return -TARGET_EFAULT
;
4568 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4569 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4570 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4571 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4572 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4573 unlock_user_struct(target_fl64
, arg
, 0);
4574 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4576 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4577 return -TARGET_EFAULT
;
4578 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4579 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4580 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4581 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4582 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4583 unlock_user_struct(target_fl64
, arg
, 1);
4586 case TARGET_F_SETLK64
:
4587 case TARGET_F_SETLKW64
:
4588 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4589 return -TARGET_EFAULT
;
4590 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4591 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4592 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4593 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4594 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4595 unlock_user_struct(target_fl64
, arg
, 0);
4596 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4599 case TARGET_F_GETFL
:
4600 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4602 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4606 case TARGET_F_SETFL
:
4607 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4610 case TARGET_F_SETOWN
:
4611 case TARGET_F_GETOWN
:
4612 case TARGET_F_SETSIG
:
4613 case TARGET_F_GETSIG
:
4614 case TARGET_F_SETLEASE
:
4615 case TARGET_F_GETLEASE
:
4616 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4620 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4628 static inline int high2lowuid(int uid
)
4636 static inline int high2lowgid(int gid
)
4644 static inline int low2highuid(int uid
)
4646 if ((int16_t)uid
== -1)
4652 static inline int low2highgid(int gid
)
4654 if ((int16_t)gid
== -1)
4659 static inline int tswapid(int id
)
4663 #else /* !USE_UID16 */
4664 static inline int high2lowuid(int uid
)
4668 static inline int high2lowgid(int gid
)
4672 static inline int low2highuid(int uid
)
4676 static inline int low2highgid(int gid
)
4680 static inline int tswapid(int id
)
4684 #endif /* USE_UID16 */
4686 void syscall_init(void)
4689 const argtype
*arg_type
;
4693 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4694 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4695 #include "syscall_types.h"
4697 #undef STRUCT_SPECIAL
4699 /* Build target_to_host_errno_table[] table from
4700 * host_to_target_errno_table[]. */
4701 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4702 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4705 /* we patch the ioctl size if necessary. We rely on the fact that
4706 no ioctl has all the bits at '1' in the size field */
4708 while (ie
->target_cmd
!= 0) {
4709 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4710 TARGET_IOC_SIZEMASK
) {
4711 arg_type
= ie
->arg_type
;
4712 if (arg_type
[0] != TYPE_PTR
) {
4713 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4718 size
= thunk_type_size(arg_type
, 0);
4719 ie
->target_cmd
= (ie
->target_cmd
&
4720 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4721 (size
<< TARGET_IOC_SIZESHIFT
);
4724 /* automatic consistency check if same arch */
4725 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4726 (defined(__x86_64__) && defined(TARGET_X86_64))
4727 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4728 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4729 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4736 #if TARGET_ABI_BITS == 32
4737 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4739 #ifdef TARGET_WORDS_BIGENDIAN
4740 return ((uint64_t)word0
<< 32) | word1
;
4742 return ((uint64_t)word1
<< 32) | word0
;
4745 #else /* TARGET_ABI_BITS == 32 */
4746 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4750 #endif /* TARGET_ABI_BITS != 32 */
4752 #ifdef TARGET_NR_truncate64
4753 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4758 if (regpairs_aligned(cpu_env
)) {
4762 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4766 #ifdef TARGET_NR_ftruncate64
4767 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4772 if (regpairs_aligned(cpu_env
)) {
4776 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4780 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4781 abi_ulong target_addr
)
4783 struct target_timespec
*target_ts
;
4785 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4786 return -TARGET_EFAULT
;
4787 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4788 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4789 unlock_user_struct(target_ts
, target_addr
, 0);
4793 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4794 struct timespec
*host_ts
)
4796 struct target_timespec
*target_ts
;
4798 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4799 return -TARGET_EFAULT
;
4800 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4801 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4802 unlock_user_struct(target_ts
, target_addr
, 1);
4806 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4807 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4808 abi_ulong target_addr
,
4809 struct stat
*host_st
)
4812 if (((CPUARMState
*)cpu_env
)->eabi
) {
4813 struct target_eabi_stat64
*target_st
;
4815 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4816 return -TARGET_EFAULT
;
4817 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4818 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4819 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4820 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4821 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4823 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4824 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4825 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4826 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4827 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4828 __put_user(host_st
->st_size
, &target_st
->st_size
);
4829 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4830 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4831 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4832 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4833 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4834 unlock_user_struct(target_st
, target_addr
, 1);
4838 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4839 struct target_stat
*target_st
;
4841 struct target_stat64
*target_st
;
4844 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4845 return -TARGET_EFAULT
;
4846 memset(target_st
, 0, sizeof(*target_st
));
4847 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4848 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4849 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4850 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4852 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4853 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4854 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4855 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4856 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4857 /* XXX: better use of kernel struct */
4858 __put_user(host_st
->st_size
, &target_st
->st_size
);
4859 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4860 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4861 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4862 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4863 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4864 unlock_user_struct(target_st
, target_addr
, 1);
4871 #if defined(CONFIG_USE_NPTL)
4872 /* ??? Using host futex calls even when target atomic operations
4873 are not really atomic probably breaks things. However implementing
4874 futexes locally would make futexes shared between multiple processes
4875 tricky. However they're probably useless because guest atomic
4876 operations won't work either. */
4877 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4878 target_ulong uaddr2
, int val3
)
4880 struct timespec ts
, *pts
;
4883 /* ??? We assume FUTEX_* constants are the same on both host
4885 #ifdef FUTEX_CMD_MASK
4886 base_op
= op
& FUTEX_CMD_MASK
;
4894 target_to_host_timespec(pts
, timeout
);
4898 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4901 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4903 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4905 case FUTEX_CMP_REQUEUE
:
4907 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4908 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4909 But the prototype takes a `struct timespec *'; insert casts
4910 to satisfy the compiler. We do not need to tswap TIMEOUT
4911 since it's not compared to guest memory. */
4912 pts
= (struct timespec
*)(uintptr_t) timeout
;
4913 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4915 (base_op
== FUTEX_CMP_REQUEUE
4919 return -TARGET_ENOSYS
;
4924 /* Map host to target signal numbers for the wait family of syscalls.
4925 Assume all other status bits are the same. */
4926 int host_to_target_waitstatus(int status
)
4928 if (WIFSIGNALED(status
)) {
4929 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4931 if (WIFSTOPPED(status
)) {
4932 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4938 int get_osversion(void)
4940 static int osversion
;
4941 struct new_utsname buf
;
4946 if (qemu_uname_release
&& *qemu_uname_release
) {
4947 s
= qemu_uname_release
;
4949 if (sys_uname(&buf
))
4954 for (i
= 0; i
< 3; i
++) {
4956 while (*s
>= '0' && *s
<= '9') {
4961 tmp
= (tmp
<< 8) + n
;
4970 static int open_self_maps(void *cpu_env
, int fd
)
4972 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4973 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4980 fp
= fopen("/proc/self/maps", "r");
4985 while ((read
= getline(&line
, &len
, fp
)) != -1) {
4986 int fields
, dev_maj
, dev_min
, inode
;
4987 uint64_t min
, max
, offset
;
4988 char flag_r
, flag_w
, flag_x
, flag_p
;
4989 char path
[512] = "";
4990 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
4991 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
4992 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
4994 if ((fields
< 10) || (fields
> 11)) {
4997 if (!strncmp(path
, "[stack]", 7)) {
5000 if (h2g_valid(min
) && h2g_valid(max
)) {
5001 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5002 " %c%c%c%c %08" PRIx64
" %02x:%02x %d%s%s\n",
5003 h2g(min
), h2g(max
), flag_r
, flag_w
,
5004 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5005 path
[0] ? " " : "", path
);
5012 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5013 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5014 (unsigned long long)ts
->info
->stack_limit
,
5015 (unsigned long long)(ts
->info
->start_stack
+
5016 (TARGET_PAGE_SIZE
- 1)) & TARGET_PAGE_MASK
,
5017 (unsigned long long)0);
5023 static int open_self_stat(void *cpu_env
, int fd
)
5025 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5026 abi_ulong start_stack
= ts
->info
->start_stack
;
5029 for (i
= 0; i
< 44; i
++) {
5037 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5038 } else if (i
== 1) {
5040 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5041 } else if (i
== 27) {
5044 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5046 /* for the rest, there is MasterCard */
5047 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5051 if (write(fd
, buf
, len
) != len
) {
5059 static int open_self_auxv(void *cpu_env
, int fd
)
5061 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5062 abi_ulong auxv
= ts
->info
->saved_auxv
;
5063 abi_ulong len
= ts
->info
->auxv_len
;
5067 * Auxiliary vector is stored in target process stack.
5068 * read in whole auxv vector and copy it to file
5070 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5074 r
= write(fd
, ptr
, len
);
5081 lseek(fd
, 0, SEEK_SET
);
5082 unlock_user(ptr
, auxv
, len
);
5088 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
5091 const char *filename
;
5092 int (*fill
)(void *cpu_env
, int fd
);
5094 const struct fake_open
*fake_open
;
5095 static const struct fake_open fakes
[] = {
5096 { "/proc/self/maps", open_self_maps
},
5097 { "/proc/self/stat", open_self_stat
},
5098 { "/proc/self/auxv", open_self_auxv
},
5102 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5103 if (!strncmp(pathname
, fake_open
->filename
,
5104 strlen(fake_open
->filename
))) {
5109 if (fake_open
->filename
) {
5111 char filename
[PATH_MAX
];
5114 /* create temporary file to map stat to */
5115 tmpdir
= getenv("TMPDIR");
5118 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5119 fd
= mkstemp(filename
);
5125 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5129 lseek(fd
, 0, SEEK_SET
);
5134 return get_errno(open(path(pathname
), flags
, mode
));
5137 /* do_syscall() should always have a single exit point at the end so
5138 that actions, such as logging of syscall results, can be performed.
5139 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5140 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5141 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5142 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5151 gemu_log("syscall %d", num
);
5154 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5157 case TARGET_NR_exit
:
5158 #ifdef CONFIG_USE_NPTL
5159 /* In old applications this may be used to implement _exit(2).
5160 However in threaded applictions it is used for thread termination,
5161 and _exit_group is used for application termination.
5162 Do thread termination if we have more then one thread. */
5163 /* FIXME: This probably breaks if a signal arrives. We should probably
5164 be disabling signals. */
5165 if (first_cpu
->next_cpu
) {
5167 CPUArchState
**lastp
;
5173 while (p
&& p
!= (CPUArchState
*)cpu_env
) {
5174 lastp
= &p
->next_cpu
;
5177 /* If we didn't find the CPU for this thread then something is
5181 /* Remove the CPU from the list. */
5182 *lastp
= p
->next_cpu
;
5184 ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5185 if (ts
->child_tidptr
) {
5186 put_user_u32(0, ts
->child_tidptr
);
5187 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5191 object_delete(OBJECT(ENV_GET_CPU(cpu_env
)));
5199 gdb_exit(cpu_env
, arg1
);
5201 ret
= 0; /* avoid warning */
5203 case TARGET_NR_read
:
5207 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5209 ret
= get_errno(read(arg1
, p
, arg3
));
5210 unlock_user(p
, arg2
, ret
);
5213 case TARGET_NR_write
:
5214 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5216 ret
= get_errno(write(arg1
, p
, arg3
));
5217 unlock_user(p
, arg2
, 0);
5219 case TARGET_NR_open
:
5220 if (!(p
= lock_user_string(arg1
)))
5222 ret
= get_errno(do_open(cpu_env
, p
,
5223 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5225 unlock_user(p
, arg1
, 0);
5227 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5228 case TARGET_NR_openat
:
5229 if (!(p
= lock_user_string(arg2
)))
5231 ret
= get_errno(sys_openat(arg1
,
5233 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5235 unlock_user(p
, arg2
, 0);
5238 case TARGET_NR_close
:
5239 ret
= get_errno(close(arg1
));
5244 case TARGET_NR_fork
:
5245 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5247 #ifdef TARGET_NR_waitpid
5248 case TARGET_NR_waitpid
:
5251 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5252 if (!is_error(ret
) && arg2
&& ret
5253 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5258 #ifdef TARGET_NR_waitid
5259 case TARGET_NR_waitid
:
5263 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5264 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5265 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5267 host_to_target_siginfo(p
, &info
);
5268 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5273 #ifdef TARGET_NR_creat /* not on alpha */
5274 case TARGET_NR_creat
:
5275 if (!(p
= lock_user_string(arg1
)))
5277 ret
= get_errno(creat(p
, arg2
));
5278 unlock_user(p
, arg1
, 0);
5281 case TARGET_NR_link
:
5284 p
= lock_user_string(arg1
);
5285 p2
= lock_user_string(arg2
);
5287 ret
= -TARGET_EFAULT
;
5289 ret
= get_errno(link(p
, p2
));
5290 unlock_user(p2
, arg2
, 0);
5291 unlock_user(p
, arg1
, 0);
5294 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5295 case TARGET_NR_linkat
:
5300 p
= lock_user_string(arg2
);
5301 p2
= lock_user_string(arg4
);
5303 ret
= -TARGET_EFAULT
;
5305 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
5306 unlock_user(p
, arg2
, 0);
5307 unlock_user(p2
, arg4
, 0);
5311 case TARGET_NR_unlink
:
5312 if (!(p
= lock_user_string(arg1
)))
5314 ret
= get_errno(unlink(p
));
5315 unlock_user(p
, arg1
, 0);
5317 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5318 case TARGET_NR_unlinkat
:
5319 if (!(p
= lock_user_string(arg2
)))
5321 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
5322 unlock_user(p
, arg2
, 0);
5325 case TARGET_NR_execve
:
5327 char **argp
, **envp
;
5330 abi_ulong guest_argp
;
5331 abi_ulong guest_envp
;
5338 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5339 if (get_user_ual(addr
, gp
))
5347 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5348 if (get_user_ual(addr
, gp
))
5355 argp
= alloca((argc
+ 1) * sizeof(void *));
5356 envp
= alloca((envc
+ 1) * sizeof(void *));
5358 for (gp
= guest_argp
, q
= argp
; gp
;
5359 gp
+= sizeof(abi_ulong
), q
++) {
5360 if (get_user_ual(addr
, gp
))
5364 if (!(*q
= lock_user_string(addr
)))
5366 total_size
+= strlen(*q
) + 1;
5370 for (gp
= guest_envp
, q
= envp
; gp
;
5371 gp
+= sizeof(abi_ulong
), q
++) {
5372 if (get_user_ual(addr
, gp
))
5376 if (!(*q
= lock_user_string(addr
)))
5378 total_size
+= strlen(*q
) + 1;
5382 /* This case will not be caught by the host's execve() if its
5383 page size is bigger than the target's. */
5384 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5385 ret
= -TARGET_E2BIG
;
5388 if (!(p
= lock_user_string(arg1
)))
5390 ret
= get_errno(execve(p
, argp
, envp
));
5391 unlock_user(p
, arg1
, 0);
5396 ret
= -TARGET_EFAULT
;
5399 for (gp
= guest_argp
, q
= argp
; *q
;
5400 gp
+= sizeof(abi_ulong
), q
++) {
5401 if (get_user_ual(addr
, gp
)
5404 unlock_user(*q
, addr
, 0);
5406 for (gp
= guest_envp
, q
= envp
; *q
;
5407 gp
+= sizeof(abi_ulong
), q
++) {
5408 if (get_user_ual(addr
, gp
)
5411 unlock_user(*q
, addr
, 0);
5415 case TARGET_NR_chdir
:
5416 if (!(p
= lock_user_string(arg1
)))
5418 ret
= get_errno(chdir(p
));
5419 unlock_user(p
, arg1
, 0);
5421 #ifdef TARGET_NR_time
5422 case TARGET_NR_time
:
5425 ret
= get_errno(time(&host_time
));
5428 && put_user_sal(host_time
, arg1
))
5433 case TARGET_NR_mknod
:
5434 if (!(p
= lock_user_string(arg1
)))
5436 ret
= get_errno(mknod(p
, arg2
, arg3
));
5437 unlock_user(p
, arg1
, 0);
5439 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5440 case TARGET_NR_mknodat
:
5441 if (!(p
= lock_user_string(arg2
)))
5443 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
5444 unlock_user(p
, arg2
, 0);
5447 case TARGET_NR_chmod
:
5448 if (!(p
= lock_user_string(arg1
)))
5450 ret
= get_errno(chmod(p
, arg2
));
5451 unlock_user(p
, arg1
, 0);
5453 #ifdef TARGET_NR_break
5454 case TARGET_NR_break
:
5457 #ifdef TARGET_NR_oldstat
5458 case TARGET_NR_oldstat
:
5461 case TARGET_NR_lseek
:
5462 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5464 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5465 /* Alpha specific */
5466 case TARGET_NR_getxpid
:
5467 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5468 ret
= get_errno(getpid());
5471 #ifdef TARGET_NR_getpid
5472 case TARGET_NR_getpid
:
5473 ret
= get_errno(getpid());
5476 case TARGET_NR_mount
:
5478 /* need to look at the data field */
5480 p
= lock_user_string(arg1
);
5481 p2
= lock_user_string(arg2
);
5482 p3
= lock_user_string(arg3
);
5483 if (!p
|| !p2
|| !p3
)
5484 ret
= -TARGET_EFAULT
;
5486 /* FIXME - arg5 should be locked, but it isn't clear how to
5487 * do that since it's not guaranteed to be a NULL-terminated
5491 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5493 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5495 unlock_user(p
, arg1
, 0);
5496 unlock_user(p2
, arg2
, 0);
5497 unlock_user(p3
, arg3
, 0);
5500 #ifdef TARGET_NR_umount
5501 case TARGET_NR_umount
:
5502 if (!(p
= lock_user_string(arg1
)))
5504 ret
= get_errno(umount(p
));
5505 unlock_user(p
, arg1
, 0);
5508 #ifdef TARGET_NR_stime /* not on alpha */
5509 case TARGET_NR_stime
:
5512 if (get_user_sal(host_time
, arg1
))
5514 ret
= get_errno(stime(&host_time
));
5518 case TARGET_NR_ptrace
:
5520 #ifdef TARGET_NR_alarm /* not on alpha */
5521 case TARGET_NR_alarm
:
5525 #ifdef TARGET_NR_oldfstat
5526 case TARGET_NR_oldfstat
:
5529 #ifdef TARGET_NR_pause /* not on alpha */
5530 case TARGET_NR_pause
:
5531 ret
= get_errno(pause());
5534 #ifdef TARGET_NR_utime
5535 case TARGET_NR_utime
:
5537 struct utimbuf tbuf
, *host_tbuf
;
5538 struct target_utimbuf
*target_tbuf
;
5540 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5542 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5543 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5544 unlock_user_struct(target_tbuf
, arg2
, 0);
5549 if (!(p
= lock_user_string(arg1
)))
5551 ret
= get_errno(utime(p
, host_tbuf
));
5552 unlock_user(p
, arg1
, 0);
5556 case TARGET_NR_utimes
:
5558 struct timeval
*tvp
, tv
[2];
5560 if (copy_from_user_timeval(&tv
[0], arg2
)
5561 || copy_from_user_timeval(&tv
[1],
5562 arg2
+ sizeof(struct target_timeval
)))
5568 if (!(p
= lock_user_string(arg1
)))
5570 ret
= get_errno(utimes(p
, tvp
));
5571 unlock_user(p
, arg1
, 0);
5574 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5575 case TARGET_NR_futimesat
:
5577 struct timeval
*tvp
, tv
[2];
5579 if (copy_from_user_timeval(&tv
[0], arg3
)
5580 || copy_from_user_timeval(&tv
[1],
5581 arg3
+ sizeof(struct target_timeval
)))
5587 if (!(p
= lock_user_string(arg2
)))
5589 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5590 unlock_user(p
, arg2
, 0);
5594 #ifdef TARGET_NR_stty
5595 case TARGET_NR_stty
:
5598 #ifdef TARGET_NR_gtty
5599 case TARGET_NR_gtty
:
5602 case TARGET_NR_access
:
5603 if (!(p
= lock_user_string(arg1
)))
5605 ret
= get_errno(access(path(p
), arg2
));
5606 unlock_user(p
, arg1
, 0);
5608 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5609 case TARGET_NR_faccessat
:
5610 if (!(p
= lock_user_string(arg2
)))
5612 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5613 unlock_user(p
, arg2
, 0);
5616 #ifdef TARGET_NR_nice /* not on alpha */
5617 case TARGET_NR_nice
:
5618 ret
= get_errno(nice(arg1
));
5621 #ifdef TARGET_NR_ftime
5622 case TARGET_NR_ftime
:
5625 case TARGET_NR_sync
:
5629 case TARGET_NR_kill
:
5630 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5632 case TARGET_NR_rename
:
5635 p
= lock_user_string(arg1
);
5636 p2
= lock_user_string(arg2
);
5638 ret
= -TARGET_EFAULT
;
5640 ret
= get_errno(rename(p
, p2
));
5641 unlock_user(p2
, arg2
, 0);
5642 unlock_user(p
, arg1
, 0);
5645 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5646 case TARGET_NR_renameat
:
5649 p
= lock_user_string(arg2
);
5650 p2
= lock_user_string(arg4
);
5652 ret
= -TARGET_EFAULT
;
5654 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5655 unlock_user(p2
, arg4
, 0);
5656 unlock_user(p
, arg2
, 0);
5660 case TARGET_NR_mkdir
:
5661 if (!(p
= lock_user_string(arg1
)))
5663 ret
= get_errno(mkdir(p
, arg2
));
5664 unlock_user(p
, arg1
, 0);
5666 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5667 case TARGET_NR_mkdirat
:
5668 if (!(p
= lock_user_string(arg2
)))
5670 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5671 unlock_user(p
, arg2
, 0);
5674 case TARGET_NR_rmdir
:
5675 if (!(p
= lock_user_string(arg1
)))
5677 ret
= get_errno(rmdir(p
));
5678 unlock_user(p
, arg1
, 0);
5681 ret
= get_errno(dup(arg1
));
5683 case TARGET_NR_pipe
:
5684 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5686 #ifdef TARGET_NR_pipe2
5687 case TARGET_NR_pipe2
:
5688 ret
= do_pipe(cpu_env
, arg1
,
5689 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
5692 case TARGET_NR_times
:
5694 struct target_tms
*tmsp
;
5696 ret
= get_errno(times(&tms
));
5698 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5701 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5702 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5703 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5704 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5707 ret
= host_to_target_clock_t(ret
);
5710 #ifdef TARGET_NR_prof
5711 case TARGET_NR_prof
:
5714 #ifdef TARGET_NR_signal
5715 case TARGET_NR_signal
:
5718 case TARGET_NR_acct
:
5720 ret
= get_errno(acct(NULL
));
5722 if (!(p
= lock_user_string(arg1
)))
5724 ret
= get_errno(acct(path(p
)));
5725 unlock_user(p
, arg1
, 0);
5728 #ifdef TARGET_NR_umount2 /* not on alpha */
5729 case TARGET_NR_umount2
:
5730 if (!(p
= lock_user_string(arg1
)))
5732 ret
= get_errno(umount2(p
, arg2
));
5733 unlock_user(p
, arg1
, 0);
5736 #ifdef TARGET_NR_lock
5737 case TARGET_NR_lock
:
5740 case TARGET_NR_ioctl
:
5741 ret
= do_ioctl(arg1
, arg2
, arg3
);
5743 case TARGET_NR_fcntl
:
5744 ret
= do_fcntl(arg1
, arg2
, arg3
);
5746 #ifdef TARGET_NR_mpx
5750 case TARGET_NR_setpgid
:
5751 ret
= get_errno(setpgid(arg1
, arg2
));
5753 #ifdef TARGET_NR_ulimit
5754 case TARGET_NR_ulimit
:
5757 #ifdef TARGET_NR_oldolduname
5758 case TARGET_NR_oldolduname
:
5761 case TARGET_NR_umask
:
5762 ret
= get_errno(umask(arg1
));
5764 case TARGET_NR_chroot
:
5765 if (!(p
= lock_user_string(arg1
)))
5767 ret
= get_errno(chroot(p
));
5768 unlock_user(p
, arg1
, 0);
5770 case TARGET_NR_ustat
:
5772 case TARGET_NR_dup2
:
5773 ret
= get_errno(dup2(arg1
, arg2
));
5775 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5776 case TARGET_NR_dup3
:
5777 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5780 #ifdef TARGET_NR_getppid /* not on alpha */
5781 case TARGET_NR_getppid
:
5782 ret
= get_errno(getppid());
5785 case TARGET_NR_getpgrp
:
5786 ret
= get_errno(getpgrp());
5788 case TARGET_NR_setsid
:
5789 ret
= get_errno(setsid());
5791 #ifdef TARGET_NR_sigaction
5792 case TARGET_NR_sigaction
:
5794 #if defined(TARGET_ALPHA)
5795 struct target_sigaction act
, oact
, *pact
= 0;
5796 struct target_old_sigaction
*old_act
;
5798 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5800 act
._sa_handler
= old_act
->_sa_handler
;
5801 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5802 act
.sa_flags
= old_act
->sa_flags
;
5803 act
.sa_restorer
= 0;
5804 unlock_user_struct(old_act
, arg2
, 0);
5807 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5808 if (!is_error(ret
) && arg3
) {
5809 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5811 old_act
->_sa_handler
= oact
._sa_handler
;
5812 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5813 old_act
->sa_flags
= oact
.sa_flags
;
5814 unlock_user_struct(old_act
, arg3
, 1);
5816 #elif defined(TARGET_MIPS)
5817 struct target_sigaction act
, oact
, *pact
, *old_act
;
5820 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5822 act
._sa_handler
= old_act
->_sa_handler
;
5823 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5824 act
.sa_flags
= old_act
->sa_flags
;
5825 unlock_user_struct(old_act
, arg2
, 0);
5831 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5833 if (!is_error(ret
) && arg3
) {
5834 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5836 old_act
->_sa_handler
= oact
._sa_handler
;
5837 old_act
->sa_flags
= oact
.sa_flags
;
5838 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5839 old_act
->sa_mask
.sig
[1] = 0;
5840 old_act
->sa_mask
.sig
[2] = 0;
5841 old_act
->sa_mask
.sig
[3] = 0;
5842 unlock_user_struct(old_act
, arg3
, 1);
5845 struct target_old_sigaction
*old_act
;
5846 struct target_sigaction act
, oact
, *pact
;
5848 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5850 act
._sa_handler
= old_act
->_sa_handler
;
5851 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5852 act
.sa_flags
= old_act
->sa_flags
;
5853 act
.sa_restorer
= old_act
->sa_restorer
;
5854 unlock_user_struct(old_act
, arg2
, 0);
5859 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5860 if (!is_error(ret
) && arg3
) {
5861 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5863 old_act
->_sa_handler
= oact
._sa_handler
;
5864 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5865 old_act
->sa_flags
= oact
.sa_flags
;
5866 old_act
->sa_restorer
= oact
.sa_restorer
;
5867 unlock_user_struct(old_act
, arg3
, 1);
5873 case TARGET_NR_rt_sigaction
:
5875 #if defined(TARGET_ALPHA)
5876 struct target_sigaction act
, oact
, *pact
= 0;
5877 struct target_rt_sigaction
*rt_act
;
5878 /* ??? arg4 == sizeof(sigset_t). */
5880 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5882 act
._sa_handler
= rt_act
->_sa_handler
;
5883 act
.sa_mask
= rt_act
->sa_mask
;
5884 act
.sa_flags
= rt_act
->sa_flags
;
5885 act
.sa_restorer
= arg5
;
5886 unlock_user_struct(rt_act
, arg2
, 0);
5889 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5890 if (!is_error(ret
) && arg3
) {
5891 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5893 rt_act
->_sa_handler
= oact
._sa_handler
;
5894 rt_act
->sa_mask
= oact
.sa_mask
;
5895 rt_act
->sa_flags
= oact
.sa_flags
;
5896 unlock_user_struct(rt_act
, arg3
, 1);
5899 struct target_sigaction
*act
;
5900 struct target_sigaction
*oact
;
5903 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5908 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5909 ret
= -TARGET_EFAULT
;
5910 goto rt_sigaction_fail
;
5914 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5917 unlock_user_struct(act
, arg2
, 0);
5919 unlock_user_struct(oact
, arg3
, 1);
5923 #ifdef TARGET_NR_sgetmask /* not on alpha */
5924 case TARGET_NR_sgetmask
:
5927 abi_ulong target_set
;
5928 sigprocmask(0, NULL
, &cur_set
);
5929 host_to_target_old_sigset(&target_set
, &cur_set
);
5934 #ifdef TARGET_NR_ssetmask /* not on alpha */
5935 case TARGET_NR_ssetmask
:
5937 sigset_t set
, oset
, cur_set
;
5938 abi_ulong target_set
= arg1
;
5939 sigprocmask(0, NULL
, &cur_set
);
5940 target_to_host_old_sigset(&set
, &target_set
);
5941 sigorset(&set
, &set
, &cur_set
);
5942 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5943 host_to_target_old_sigset(&target_set
, &oset
);
5948 #ifdef TARGET_NR_sigprocmask
5949 case TARGET_NR_sigprocmask
:
5951 #if defined(TARGET_ALPHA)
5952 sigset_t set
, oldset
;
5957 case TARGET_SIG_BLOCK
:
5960 case TARGET_SIG_UNBLOCK
:
5963 case TARGET_SIG_SETMASK
:
5967 ret
= -TARGET_EINVAL
;
5971 target_to_host_old_sigset(&set
, &mask
);
5973 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5974 if (!is_error(ret
)) {
5975 host_to_target_old_sigset(&mask
, &oldset
);
5977 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
5980 sigset_t set
, oldset
, *set_ptr
;
5985 case TARGET_SIG_BLOCK
:
5988 case TARGET_SIG_UNBLOCK
:
5991 case TARGET_SIG_SETMASK
:
5995 ret
= -TARGET_EINVAL
;
5998 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6000 target_to_host_old_sigset(&set
, p
);
6001 unlock_user(p
, arg2
, 0);
6007 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6008 if (!is_error(ret
) && arg3
) {
6009 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6011 host_to_target_old_sigset(p
, &oldset
);
6012 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6018 case TARGET_NR_rt_sigprocmask
:
6021 sigset_t set
, oldset
, *set_ptr
;
6025 case TARGET_SIG_BLOCK
:
6028 case TARGET_SIG_UNBLOCK
:
6031 case TARGET_SIG_SETMASK
:
6035 ret
= -TARGET_EINVAL
;
6038 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6040 target_to_host_sigset(&set
, p
);
6041 unlock_user(p
, arg2
, 0);
6047 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6048 if (!is_error(ret
) && arg3
) {
6049 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6051 host_to_target_sigset(p
, &oldset
);
6052 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6056 #ifdef TARGET_NR_sigpending
6057 case TARGET_NR_sigpending
:
6060 ret
= get_errno(sigpending(&set
));
6061 if (!is_error(ret
)) {
6062 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6064 host_to_target_old_sigset(p
, &set
);
6065 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6070 case TARGET_NR_rt_sigpending
:
6073 ret
= get_errno(sigpending(&set
));
6074 if (!is_error(ret
)) {
6075 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6077 host_to_target_sigset(p
, &set
);
6078 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6082 #ifdef TARGET_NR_sigsuspend
6083 case TARGET_NR_sigsuspend
:
6086 #if defined(TARGET_ALPHA)
6087 abi_ulong mask
= arg1
;
6088 target_to_host_old_sigset(&set
, &mask
);
6090 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6092 target_to_host_old_sigset(&set
, p
);
6093 unlock_user(p
, arg1
, 0);
6095 ret
= get_errno(sigsuspend(&set
));
6099 case TARGET_NR_rt_sigsuspend
:
6102 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6104 target_to_host_sigset(&set
, p
);
6105 unlock_user(p
, arg1
, 0);
6106 ret
= get_errno(sigsuspend(&set
));
6109 case TARGET_NR_rt_sigtimedwait
:
6112 struct timespec uts
, *puts
;
6115 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6117 target_to_host_sigset(&set
, p
);
6118 unlock_user(p
, arg1
, 0);
6121 target_to_host_timespec(puts
, arg3
);
6125 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6126 if (!is_error(ret
) && arg2
) {
6127 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
6129 host_to_target_siginfo(p
, &uinfo
);
6130 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6134 case TARGET_NR_rt_sigqueueinfo
:
6137 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6139 target_to_host_siginfo(&uinfo
, p
);
6140 unlock_user(p
, arg1
, 0);
6141 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6144 #ifdef TARGET_NR_sigreturn
6145 case TARGET_NR_sigreturn
:
6146 /* NOTE: ret is eax, so not transcoding must be done */
6147 ret
= do_sigreturn(cpu_env
);
6150 case TARGET_NR_rt_sigreturn
:
6151 /* NOTE: ret is eax, so not transcoding must be done */
6152 ret
= do_rt_sigreturn(cpu_env
);
6154 case TARGET_NR_sethostname
:
6155 if (!(p
= lock_user_string(arg1
)))
6157 ret
= get_errno(sethostname(p
, arg2
));
6158 unlock_user(p
, arg1
, 0);
6160 case TARGET_NR_setrlimit
:
6162 int resource
= target_to_host_resource(arg1
);
6163 struct target_rlimit
*target_rlim
;
6165 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6167 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6168 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6169 unlock_user_struct(target_rlim
, arg2
, 0);
6170 ret
= get_errno(setrlimit(resource
, &rlim
));
6173 case TARGET_NR_getrlimit
:
6175 int resource
= target_to_host_resource(arg1
);
6176 struct target_rlimit
*target_rlim
;
6179 ret
= get_errno(getrlimit(resource
, &rlim
));
6180 if (!is_error(ret
)) {
6181 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6183 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6184 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6185 unlock_user_struct(target_rlim
, arg2
, 1);
6189 case TARGET_NR_getrusage
:
6191 struct rusage rusage
;
6192 ret
= get_errno(getrusage(arg1
, &rusage
));
6193 if (!is_error(ret
)) {
6194 host_to_target_rusage(arg2
, &rusage
);
6198 case TARGET_NR_gettimeofday
:
6201 ret
= get_errno(gettimeofday(&tv
, NULL
));
6202 if (!is_error(ret
)) {
6203 if (copy_to_user_timeval(arg1
, &tv
))
6208 case TARGET_NR_settimeofday
:
6211 if (copy_from_user_timeval(&tv
, arg1
))
6213 ret
= get_errno(settimeofday(&tv
, NULL
));
6216 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6217 case TARGET_NR_select
:
6219 struct target_sel_arg_struct
*sel
;
6220 abi_ulong inp
, outp
, exp
, tvp
;
6223 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6225 nsel
= tswapal(sel
->n
);
6226 inp
= tswapal(sel
->inp
);
6227 outp
= tswapal(sel
->outp
);
6228 exp
= tswapal(sel
->exp
);
6229 tvp
= tswapal(sel
->tvp
);
6230 unlock_user_struct(sel
, arg1
, 0);
6231 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6235 #ifdef TARGET_NR_pselect6
6236 case TARGET_NR_pselect6
:
6238 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6239 fd_set rfds
, wfds
, efds
;
6240 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6241 struct timespec ts
, *ts_ptr
;
6244 * The 6th arg is actually two args smashed together,
6245 * so we cannot use the C library.
6253 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6254 target_sigset_t
*target_sigset
;
6262 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6266 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6270 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6276 * This takes a timespec, and not a timeval, so we cannot
6277 * use the do_select() helper ...
6280 if (target_to_host_timespec(&ts
, ts_addr
)) {
6288 /* Extract the two packed args for the sigset */
6291 sig
.size
= _NSIG
/ 8;
6293 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6297 arg_sigset
= tswapal(arg7
[0]);
6298 arg_sigsize
= tswapal(arg7
[1]);
6299 unlock_user(arg7
, arg6
, 0);
6303 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6304 /* Like the kernel, we enforce correct size sigsets */
6305 ret
= -TARGET_EINVAL
;
6308 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6309 sizeof(*target_sigset
), 1);
6310 if (!target_sigset
) {
6313 target_to_host_sigset(&set
, target_sigset
);
6314 unlock_user(target_sigset
, arg_sigset
, 0);
6322 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6325 if (!is_error(ret
)) {
6326 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6328 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6330 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6333 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6339 case TARGET_NR_symlink
:
6342 p
= lock_user_string(arg1
);
6343 p2
= lock_user_string(arg2
);
6345 ret
= -TARGET_EFAULT
;
6347 ret
= get_errno(symlink(p
, p2
));
6348 unlock_user(p2
, arg2
, 0);
6349 unlock_user(p
, arg1
, 0);
6352 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6353 case TARGET_NR_symlinkat
:
6356 p
= lock_user_string(arg1
);
6357 p2
= lock_user_string(arg3
);
6359 ret
= -TARGET_EFAULT
;
6361 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
6362 unlock_user(p2
, arg3
, 0);
6363 unlock_user(p
, arg1
, 0);
6367 #ifdef TARGET_NR_oldlstat
6368 case TARGET_NR_oldlstat
:
6371 case TARGET_NR_readlink
:
6374 p
= lock_user_string(arg1
);
6375 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6377 ret
= -TARGET_EFAULT
;
6379 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
6380 char real
[PATH_MAX
];
6381 temp
= realpath(exec_path
,real
);
6382 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
6383 snprintf((char *)p2
, arg3
, "%s", real
);
6386 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6388 unlock_user(p2
, arg2
, ret
);
6389 unlock_user(p
, arg1
, 0);
6392 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6393 case TARGET_NR_readlinkat
:
6396 p
= lock_user_string(arg2
);
6397 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6399 ret
= -TARGET_EFAULT
;
6401 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
6402 unlock_user(p2
, arg3
, ret
);
6403 unlock_user(p
, arg2
, 0);
6407 #ifdef TARGET_NR_uselib
6408 case TARGET_NR_uselib
:
6411 #ifdef TARGET_NR_swapon
6412 case TARGET_NR_swapon
:
6413 if (!(p
= lock_user_string(arg1
)))
6415 ret
= get_errno(swapon(p
, arg2
));
6416 unlock_user(p
, arg1
, 0);
6419 case TARGET_NR_reboot
:
6420 if (!(p
= lock_user_string(arg4
)))
6422 ret
= reboot(arg1
, arg2
, arg3
, p
);
6423 unlock_user(p
, arg4
, 0);
6425 #ifdef TARGET_NR_readdir
6426 case TARGET_NR_readdir
:
6429 #ifdef TARGET_NR_mmap
6430 case TARGET_NR_mmap
:
6431 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6432 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6433 || defined(TARGET_S390X)
6436 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6437 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6445 unlock_user(v
, arg1
, 0);
6446 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6447 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6451 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6452 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6458 #ifdef TARGET_NR_mmap2
6459 case TARGET_NR_mmap2
:
6461 #define MMAP_SHIFT 12
6463 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6464 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6466 arg6
<< MMAP_SHIFT
));
6469 case TARGET_NR_munmap
:
6470 ret
= get_errno(target_munmap(arg1
, arg2
));
6472 case TARGET_NR_mprotect
:
6474 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
6475 /* Special hack to detect libc making the stack executable. */
6476 if ((arg3
& PROT_GROWSDOWN
)
6477 && arg1
>= ts
->info
->stack_limit
6478 && arg1
<= ts
->info
->start_stack
) {
6479 arg3
&= ~PROT_GROWSDOWN
;
6480 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6481 arg1
= ts
->info
->stack_limit
;
6484 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6486 #ifdef TARGET_NR_mremap
6487 case TARGET_NR_mremap
:
6488 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6491 /* ??? msync/mlock/munlock are broken for softmmu. */
6492 #ifdef TARGET_NR_msync
6493 case TARGET_NR_msync
:
6494 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6497 #ifdef TARGET_NR_mlock
6498 case TARGET_NR_mlock
:
6499 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6502 #ifdef TARGET_NR_munlock
6503 case TARGET_NR_munlock
:
6504 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6507 #ifdef TARGET_NR_mlockall
6508 case TARGET_NR_mlockall
:
6509 ret
= get_errno(mlockall(arg1
));
6512 #ifdef TARGET_NR_munlockall
6513 case TARGET_NR_munlockall
:
6514 ret
= get_errno(munlockall());
6517 case TARGET_NR_truncate
:
6518 if (!(p
= lock_user_string(arg1
)))
6520 ret
= get_errno(truncate(p
, arg2
));
6521 unlock_user(p
, arg1
, 0);
6523 case TARGET_NR_ftruncate
:
6524 ret
= get_errno(ftruncate(arg1
, arg2
));
6526 case TARGET_NR_fchmod
:
6527 ret
= get_errno(fchmod(arg1
, arg2
));
6529 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6530 case TARGET_NR_fchmodat
:
6531 if (!(p
= lock_user_string(arg2
)))
6533 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
6534 unlock_user(p
, arg2
, 0);
6537 case TARGET_NR_getpriority
:
6538 /* Note that negative values are valid for getpriority, so we must
6539 differentiate based on errno settings. */
6541 ret
= getpriority(arg1
, arg2
);
6542 if (ret
== -1 && errno
!= 0) {
6543 ret
= -host_to_target_errno(errno
);
6547 /* Return value is the unbiased priority. Signal no error. */
6548 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6550 /* Return value is a biased priority to avoid negative numbers. */
6554 case TARGET_NR_setpriority
:
6555 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6557 #ifdef TARGET_NR_profil
6558 case TARGET_NR_profil
:
6561 case TARGET_NR_statfs
:
6562 if (!(p
= lock_user_string(arg1
)))
6564 ret
= get_errno(statfs(path(p
), &stfs
));
6565 unlock_user(p
, arg1
, 0);
6567 if (!is_error(ret
)) {
6568 struct target_statfs
*target_stfs
;
6570 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6572 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6573 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6574 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6575 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6576 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6577 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6578 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6579 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6580 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6581 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6582 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6583 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6584 unlock_user_struct(target_stfs
, arg2
, 1);
6587 case TARGET_NR_fstatfs
:
6588 ret
= get_errno(fstatfs(arg1
, &stfs
));
6589 goto convert_statfs
;
6590 #ifdef TARGET_NR_statfs64
6591 case TARGET_NR_statfs64
:
6592 if (!(p
= lock_user_string(arg1
)))
6594 ret
= get_errno(statfs(path(p
), &stfs
));
6595 unlock_user(p
, arg1
, 0);
6597 if (!is_error(ret
)) {
6598 struct target_statfs64
*target_stfs
;
6600 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6602 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6603 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6604 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6605 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6606 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6607 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6608 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6609 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6610 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6611 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6612 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6613 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6614 unlock_user_struct(target_stfs
, arg3
, 1);
6617 case TARGET_NR_fstatfs64
:
6618 ret
= get_errno(fstatfs(arg1
, &stfs
));
6619 goto convert_statfs64
;
6621 #ifdef TARGET_NR_ioperm
6622 case TARGET_NR_ioperm
:
6625 #ifdef TARGET_NR_socketcall
6626 case TARGET_NR_socketcall
:
6627 ret
= do_socketcall(arg1
, arg2
);
6630 #ifdef TARGET_NR_accept
6631 case TARGET_NR_accept
:
6632 ret
= do_accept(arg1
, arg2
, arg3
);
6635 #ifdef TARGET_NR_bind
6636 case TARGET_NR_bind
:
6637 ret
= do_bind(arg1
, arg2
, arg3
);
6640 #ifdef TARGET_NR_connect
6641 case TARGET_NR_connect
:
6642 ret
= do_connect(arg1
, arg2
, arg3
);
6645 #ifdef TARGET_NR_getpeername
6646 case TARGET_NR_getpeername
:
6647 ret
= do_getpeername(arg1
, arg2
, arg3
);
6650 #ifdef TARGET_NR_getsockname
6651 case TARGET_NR_getsockname
:
6652 ret
= do_getsockname(arg1
, arg2
, arg3
);
6655 #ifdef TARGET_NR_getsockopt
6656 case TARGET_NR_getsockopt
:
6657 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6660 #ifdef TARGET_NR_listen
6661 case TARGET_NR_listen
:
6662 ret
= get_errno(listen(arg1
, arg2
));
6665 #ifdef TARGET_NR_recv
6666 case TARGET_NR_recv
:
6667 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6670 #ifdef TARGET_NR_recvfrom
6671 case TARGET_NR_recvfrom
:
6672 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6675 #ifdef TARGET_NR_recvmsg
6676 case TARGET_NR_recvmsg
:
6677 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6680 #ifdef TARGET_NR_send
6681 case TARGET_NR_send
:
6682 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6685 #ifdef TARGET_NR_sendmsg
6686 case TARGET_NR_sendmsg
:
6687 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6690 #ifdef TARGET_NR_sendto
6691 case TARGET_NR_sendto
:
6692 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6695 #ifdef TARGET_NR_shutdown
6696 case TARGET_NR_shutdown
:
6697 ret
= get_errno(shutdown(arg1
, arg2
));
6700 #ifdef TARGET_NR_socket
6701 case TARGET_NR_socket
:
6702 ret
= do_socket(arg1
, arg2
, arg3
);
6705 #ifdef TARGET_NR_socketpair
6706 case TARGET_NR_socketpair
:
6707 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6710 #ifdef TARGET_NR_setsockopt
6711 case TARGET_NR_setsockopt
:
6712 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6716 case TARGET_NR_syslog
:
6717 if (!(p
= lock_user_string(arg2
)))
6719 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6720 unlock_user(p
, arg2
, 0);
6723 case TARGET_NR_setitimer
:
6725 struct itimerval value
, ovalue
, *pvalue
;
6729 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6730 || copy_from_user_timeval(&pvalue
->it_value
,
6731 arg2
+ sizeof(struct target_timeval
)))
6736 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6737 if (!is_error(ret
) && arg3
) {
6738 if (copy_to_user_timeval(arg3
,
6739 &ovalue
.it_interval
)
6740 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6746 case TARGET_NR_getitimer
:
6748 struct itimerval value
;
6750 ret
= get_errno(getitimer(arg1
, &value
));
6751 if (!is_error(ret
) && arg2
) {
6752 if (copy_to_user_timeval(arg2
,
6754 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6760 case TARGET_NR_stat
:
6761 if (!(p
= lock_user_string(arg1
)))
6763 ret
= get_errno(stat(path(p
), &st
));
6764 unlock_user(p
, arg1
, 0);
6766 case TARGET_NR_lstat
:
6767 if (!(p
= lock_user_string(arg1
)))
6769 ret
= get_errno(lstat(path(p
), &st
));
6770 unlock_user(p
, arg1
, 0);
6772 case TARGET_NR_fstat
:
6774 ret
= get_errno(fstat(arg1
, &st
));
6776 if (!is_error(ret
)) {
6777 struct target_stat
*target_st
;
6779 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6781 memset(target_st
, 0, sizeof(*target_st
));
6782 __put_user(st
.st_dev
, &target_st
->st_dev
);
6783 __put_user(st
.st_ino
, &target_st
->st_ino
);
6784 __put_user(st
.st_mode
, &target_st
->st_mode
);
6785 __put_user(st
.st_uid
, &target_st
->st_uid
);
6786 __put_user(st
.st_gid
, &target_st
->st_gid
);
6787 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6788 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6789 __put_user(st
.st_size
, &target_st
->st_size
);
6790 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6791 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6792 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6793 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6794 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6795 unlock_user_struct(target_st
, arg2
, 1);
6799 #ifdef TARGET_NR_olduname
6800 case TARGET_NR_olduname
:
6803 #ifdef TARGET_NR_iopl
6804 case TARGET_NR_iopl
:
6807 case TARGET_NR_vhangup
:
6808 ret
= get_errno(vhangup());
6810 #ifdef TARGET_NR_idle
6811 case TARGET_NR_idle
:
6814 #ifdef TARGET_NR_syscall
6815 case TARGET_NR_syscall
:
6816 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6817 arg6
, arg7
, arg8
, 0);
6820 case TARGET_NR_wait4
:
6823 abi_long status_ptr
= arg2
;
6824 struct rusage rusage
, *rusage_ptr
;
6825 abi_ulong target_rusage
= arg4
;
6827 rusage_ptr
= &rusage
;
6830 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6831 if (!is_error(ret
)) {
6832 if (status_ptr
&& ret
) {
6833 status
= host_to_target_waitstatus(status
);
6834 if (put_user_s32(status
, status_ptr
))
6838 host_to_target_rusage(target_rusage
, &rusage
);
6842 #ifdef TARGET_NR_swapoff
6843 case TARGET_NR_swapoff
:
6844 if (!(p
= lock_user_string(arg1
)))
6846 ret
= get_errno(swapoff(p
));
6847 unlock_user(p
, arg1
, 0);
6850 case TARGET_NR_sysinfo
:
6852 struct target_sysinfo
*target_value
;
6853 struct sysinfo value
;
6854 ret
= get_errno(sysinfo(&value
));
6855 if (!is_error(ret
) && arg1
)
6857 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6859 __put_user(value
.uptime
, &target_value
->uptime
);
6860 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6861 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6862 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6863 __put_user(value
.totalram
, &target_value
->totalram
);
6864 __put_user(value
.freeram
, &target_value
->freeram
);
6865 __put_user(value
.sharedram
, &target_value
->sharedram
);
6866 __put_user(value
.bufferram
, &target_value
->bufferram
);
6867 __put_user(value
.totalswap
, &target_value
->totalswap
);
6868 __put_user(value
.freeswap
, &target_value
->freeswap
);
6869 __put_user(value
.procs
, &target_value
->procs
);
6870 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6871 __put_user(value
.freehigh
, &target_value
->freehigh
);
6872 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6873 unlock_user_struct(target_value
, arg1
, 1);
6877 #ifdef TARGET_NR_ipc
6879 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6882 #ifdef TARGET_NR_semget
6883 case TARGET_NR_semget
:
6884 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6887 #ifdef TARGET_NR_semop
6888 case TARGET_NR_semop
:
6889 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6892 #ifdef TARGET_NR_semctl
6893 case TARGET_NR_semctl
:
6894 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6897 #ifdef TARGET_NR_msgctl
6898 case TARGET_NR_msgctl
:
6899 ret
= do_msgctl(arg1
, arg2
, arg3
);
6902 #ifdef TARGET_NR_msgget
6903 case TARGET_NR_msgget
:
6904 ret
= get_errno(msgget(arg1
, arg2
));
6907 #ifdef TARGET_NR_msgrcv
6908 case TARGET_NR_msgrcv
:
6909 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6912 #ifdef TARGET_NR_msgsnd
6913 case TARGET_NR_msgsnd
:
6914 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6917 #ifdef TARGET_NR_shmget
6918 case TARGET_NR_shmget
:
6919 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6922 #ifdef TARGET_NR_shmctl
6923 case TARGET_NR_shmctl
:
6924 ret
= do_shmctl(arg1
, arg2
, arg3
);
6927 #ifdef TARGET_NR_shmat
6928 case TARGET_NR_shmat
:
6929 ret
= do_shmat(arg1
, arg2
, arg3
);
6932 #ifdef TARGET_NR_shmdt
6933 case TARGET_NR_shmdt
:
6934 ret
= do_shmdt(arg1
);
6937 case TARGET_NR_fsync
:
6938 ret
= get_errno(fsync(arg1
));
6940 case TARGET_NR_clone
:
6941 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6942 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6943 #elif defined(TARGET_CRIS)
6944 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6945 #elif defined(TARGET_MICROBLAZE)
6946 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
6947 #elif defined(TARGET_S390X)
6948 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6950 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6953 #ifdef __NR_exit_group
6954 /* new thread calls */
6955 case TARGET_NR_exit_group
:
6959 gdb_exit(cpu_env
, arg1
);
6960 ret
= get_errno(exit_group(arg1
));
6963 case TARGET_NR_setdomainname
:
6964 if (!(p
= lock_user_string(arg1
)))
6966 ret
= get_errno(setdomainname(p
, arg2
));
6967 unlock_user(p
, arg1
, 0);
6969 case TARGET_NR_uname
:
6970 /* no need to transcode because we use the linux syscall */
6972 struct new_utsname
* buf
;
6974 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6976 ret
= get_errno(sys_uname(buf
));
6977 if (!is_error(ret
)) {
6978 /* Overrite the native machine name with whatever is being
6980 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6981 /* Allow the user to override the reported release. */
6982 if (qemu_uname_release
&& *qemu_uname_release
)
6983 strcpy (buf
->release
, qemu_uname_release
);
6985 unlock_user_struct(buf
, arg1
, 1);
6989 case TARGET_NR_modify_ldt
:
6990 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6992 #if !defined(TARGET_X86_64)
6993 case TARGET_NR_vm86old
:
6995 case TARGET_NR_vm86
:
6996 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7000 case TARGET_NR_adjtimex
:
7002 #ifdef TARGET_NR_create_module
7003 case TARGET_NR_create_module
:
7005 case TARGET_NR_init_module
:
7006 case TARGET_NR_delete_module
:
7007 #ifdef TARGET_NR_get_kernel_syms
7008 case TARGET_NR_get_kernel_syms
:
7011 case TARGET_NR_quotactl
:
7013 case TARGET_NR_getpgid
:
7014 ret
= get_errno(getpgid(arg1
));
7016 case TARGET_NR_fchdir
:
7017 ret
= get_errno(fchdir(arg1
));
7019 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7020 case TARGET_NR_bdflush
:
7023 #ifdef TARGET_NR_sysfs
7024 case TARGET_NR_sysfs
:
7027 case TARGET_NR_personality
:
7028 ret
= get_errno(personality(arg1
));
7030 #ifdef TARGET_NR_afs_syscall
7031 case TARGET_NR_afs_syscall
:
7034 #ifdef TARGET_NR__llseek /* Not on alpha */
7035 case TARGET_NR__llseek
:
7038 #if !defined(__NR_llseek)
7039 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7041 ret
= get_errno(res
);
7046 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7048 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7054 case TARGET_NR_getdents
:
7055 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7057 struct target_dirent
*target_dirp
;
7058 struct linux_dirent
*dirp
;
7059 abi_long count
= arg3
;
7061 dirp
= malloc(count
);
7063 ret
= -TARGET_ENOMEM
;
7067 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7068 if (!is_error(ret
)) {
7069 struct linux_dirent
*de
;
7070 struct target_dirent
*tde
;
7072 int reclen
, treclen
;
7073 int count1
, tnamelen
;
7077 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7081 reclen
= de
->d_reclen
;
7082 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7083 assert(tnamelen
>= 0);
7084 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7085 assert(count1
+ treclen
<= count
);
7086 tde
->d_reclen
= tswap16(treclen
);
7087 tde
->d_ino
= tswapal(de
->d_ino
);
7088 tde
->d_off
= tswapal(de
->d_off
);
7089 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7090 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7092 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7096 unlock_user(target_dirp
, arg2
, ret
);
7102 struct linux_dirent
*dirp
;
7103 abi_long count
= arg3
;
7105 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7107 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7108 if (!is_error(ret
)) {
7109 struct linux_dirent
*de
;
7114 reclen
= de
->d_reclen
;
7117 de
->d_reclen
= tswap16(reclen
);
7118 tswapls(&de
->d_ino
);
7119 tswapls(&de
->d_off
);
7120 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7124 unlock_user(dirp
, arg2
, ret
);
7128 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7129 case TARGET_NR_getdents64
:
7131 struct linux_dirent64
*dirp
;
7132 abi_long count
= arg3
;
7133 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7135 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7136 if (!is_error(ret
)) {
7137 struct linux_dirent64
*de
;
7142 reclen
= de
->d_reclen
;
7145 de
->d_reclen
= tswap16(reclen
);
7146 tswap64s((uint64_t *)&de
->d_ino
);
7147 tswap64s((uint64_t *)&de
->d_off
);
7148 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7152 unlock_user(dirp
, arg2
, ret
);
7155 #endif /* TARGET_NR_getdents64 */
7156 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7158 case TARGET_NR_select
:
7160 case TARGET_NR__newselect
:
7162 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7165 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7166 # ifdef TARGET_NR_poll
7167 case TARGET_NR_poll
:
7169 # ifdef TARGET_NR_ppoll
7170 case TARGET_NR_ppoll
:
7173 struct target_pollfd
*target_pfd
;
7174 unsigned int nfds
= arg2
;
7179 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7183 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7184 for(i
= 0; i
< nfds
; i
++) {
7185 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7186 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7189 # ifdef TARGET_NR_ppoll
7190 if (num
== TARGET_NR_ppoll
) {
7191 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7192 target_sigset_t
*target_set
;
7193 sigset_t _set
, *set
= &_set
;
7196 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7197 unlock_user(target_pfd
, arg1
, 0);
7205 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7207 unlock_user(target_pfd
, arg1
, 0);
7210 target_to_host_sigset(set
, target_set
);
7215 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7217 if (!is_error(ret
) && arg3
) {
7218 host_to_target_timespec(arg3
, timeout_ts
);
7221 unlock_user(target_set
, arg4
, 0);
7225 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7227 if (!is_error(ret
)) {
7228 for(i
= 0; i
< nfds
; i
++) {
7229 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7232 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7236 case TARGET_NR_flock
:
7237 /* NOTE: the flock constant seems to be the same for every
7239 ret
= get_errno(flock(arg1
, arg2
));
7241 case TARGET_NR_readv
:
7243 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7245 ret
= get_errno(readv(arg1
, vec
, arg3
));
7246 unlock_iovec(vec
, arg2
, arg3
, 1);
7248 ret
= -host_to_target_errno(errno
);
7252 case TARGET_NR_writev
:
7254 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7256 ret
= get_errno(writev(arg1
, vec
, arg3
));
7257 unlock_iovec(vec
, arg2
, arg3
, 0);
7259 ret
= -host_to_target_errno(errno
);
7263 case TARGET_NR_getsid
:
7264 ret
= get_errno(getsid(arg1
));
7266 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7267 case TARGET_NR_fdatasync
:
7268 ret
= get_errno(fdatasync(arg1
));
7271 case TARGET_NR__sysctl
:
7272 /* We don't implement this, but ENOTDIR is always a safe
7274 ret
= -TARGET_ENOTDIR
;
7276 case TARGET_NR_sched_getaffinity
:
7278 unsigned int mask_size
;
7279 unsigned long *mask
;
7282 * sched_getaffinity needs multiples of ulong, so need to take
7283 * care of mismatches between target ulong and host ulong sizes.
7285 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7286 ret
= -TARGET_EINVAL
;
7289 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7291 mask
= alloca(mask_size
);
7292 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7294 if (!is_error(ret
)) {
7295 if (copy_to_user(arg3
, mask
, ret
)) {
7301 case TARGET_NR_sched_setaffinity
:
7303 unsigned int mask_size
;
7304 unsigned long *mask
;
7307 * sched_setaffinity needs multiples of ulong, so need to take
7308 * care of mismatches between target ulong and host ulong sizes.
7310 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7311 ret
= -TARGET_EINVAL
;
7314 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7316 mask
= alloca(mask_size
);
7317 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7320 memcpy(mask
, p
, arg2
);
7321 unlock_user_struct(p
, arg2
, 0);
7323 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7326 case TARGET_NR_sched_setparam
:
7328 struct sched_param
*target_schp
;
7329 struct sched_param schp
;
7331 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7333 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7334 unlock_user_struct(target_schp
, arg2
, 0);
7335 ret
= get_errno(sched_setparam(arg1
, &schp
));
7338 case TARGET_NR_sched_getparam
:
7340 struct sched_param
*target_schp
;
7341 struct sched_param schp
;
7342 ret
= get_errno(sched_getparam(arg1
, &schp
));
7343 if (!is_error(ret
)) {
7344 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7346 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7347 unlock_user_struct(target_schp
, arg2
, 1);
7351 case TARGET_NR_sched_setscheduler
:
7353 struct sched_param
*target_schp
;
7354 struct sched_param schp
;
7355 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7357 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7358 unlock_user_struct(target_schp
, arg3
, 0);
7359 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7362 case TARGET_NR_sched_getscheduler
:
7363 ret
= get_errno(sched_getscheduler(arg1
));
7365 case TARGET_NR_sched_yield
:
7366 ret
= get_errno(sched_yield());
7368 case TARGET_NR_sched_get_priority_max
:
7369 ret
= get_errno(sched_get_priority_max(arg1
));
7371 case TARGET_NR_sched_get_priority_min
:
7372 ret
= get_errno(sched_get_priority_min(arg1
));
7374 case TARGET_NR_sched_rr_get_interval
:
7377 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7378 if (!is_error(ret
)) {
7379 host_to_target_timespec(arg2
, &ts
);
7383 case TARGET_NR_nanosleep
:
7385 struct timespec req
, rem
;
7386 target_to_host_timespec(&req
, arg1
);
7387 ret
= get_errno(nanosleep(&req
, &rem
));
7388 if (is_error(ret
) && arg2
) {
7389 host_to_target_timespec(arg2
, &rem
);
7393 #ifdef TARGET_NR_query_module
7394 case TARGET_NR_query_module
:
7397 #ifdef TARGET_NR_nfsservctl
7398 case TARGET_NR_nfsservctl
:
7401 case TARGET_NR_prctl
:
7403 case PR_GET_PDEATHSIG
:
7406 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7407 if (!is_error(ret
) && arg2
7408 && put_user_ual(deathsig
, arg2
)) {
7416 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7420 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7422 unlock_user(name
, arg2
, 16);
7427 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7431 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7433 unlock_user(name
, arg2
, 0);
7438 /* Most prctl options have no pointer arguments */
7439 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7443 #ifdef TARGET_NR_arch_prctl
7444 case TARGET_NR_arch_prctl
:
7445 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7446 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7452 #ifdef TARGET_NR_pread
7453 case TARGET_NR_pread
:
7454 if (regpairs_aligned(cpu_env
))
7456 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7458 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
7459 unlock_user(p
, arg2
, ret
);
7461 case TARGET_NR_pwrite
:
7462 if (regpairs_aligned(cpu_env
))
7464 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7466 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
7467 unlock_user(p
, arg2
, 0);
7470 #ifdef TARGET_NR_pread64
7471 case TARGET_NR_pread64
:
7472 if (regpairs_aligned(cpu_env
)) {
7476 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7478 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7479 unlock_user(p
, arg2
, ret
);
7481 case TARGET_NR_pwrite64
:
7482 if (regpairs_aligned(cpu_env
)) {
7486 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7488 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7489 unlock_user(p
, arg2
, 0);
7492 case TARGET_NR_getcwd
:
7493 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7495 ret
= get_errno(sys_getcwd1(p
, arg2
));
7496 unlock_user(p
, arg1
, ret
);
7498 case TARGET_NR_capget
:
7500 case TARGET_NR_capset
:
7502 case TARGET_NR_sigaltstack
:
7503 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7504 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7505 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7506 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7511 case TARGET_NR_sendfile
:
7513 #ifdef TARGET_NR_getpmsg
7514 case TARGET_NR_getpmsg
:
7517 #ifdef TARGET_NR_putpmsg
7518 case TARGET_NR_putpmsg
:
7521 #ifdef TARGET_NR_vfork
7522 case TARGET_NR_vfork
:
7523 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7527 #ifdef TARGET_NR_ugetrlimit
7528 case TARGET_NR_ugetrlimit
:
7531 int resource
= target_to_host_resource(arg1
);
7532 ret
= get_errno(getrlimit(resource
, &rlim
));
7533 if (!is_error(ret
)) {
7534 struct target_rlimit
*target_rlim
;
7535 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7537 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7538 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7539 unlock_user_struct(target_rlim
, arg2
, 1);
7544 #ifdef TARGET_NR_truncate64
7545 case TARGET_NR_truncate64
:
7546 if (!(p
= lock_user_string(arg1
)))
7548 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7549 unlock_user(p
, arg1
, 0);
7552 #ifdef TARGET_NR_ftruncate64
7553 case TARGET_NR_ftruncate64
:
7554 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7557 #ifdef TARGET_NR_stat64
7558 case TARGET_NR_stat64
:
7559 if (!(p
= lock_user_string(arg1
)))
7561 ret
= get_errno(stat(path(p
), &st
));
7562 unlock_user(p
, arg1
, 0);
7564 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7567 #ifdef TARGET_NR_lstat64
7568 case TARGET_NR_lstat64
:
7569 if (!(p
= lock_user_string(arg1
)))
7571 ret
= get_errno(lstat(path(p
), &st
));
7572 unlock_user(p
, arg1
, 0);
7574 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7577 #ifdef TARGET_NR_fstat64
7578 case TARGET_NR_fstat64
:
7579 ret
= get_errno(fstat(arg1
, &st
));
7581 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7584 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7585 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7586 #ifdef TARGET_NR_fstatat64
7587 case TARGET_NR_fstatat64
:
7589 #ifdef TARGET_NR_newfstatat
7590 case TARGET_NR_newfstatat
:
7592 if (!(p
= lock_user_string(arg2
)))
7594 #ifdef __NR_fstatat64
7595 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7597 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7600 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7603 case TARGET_NR_lchown
:
7604 if (!(p
= lock_user_string(arg1
)))
7606 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7607 unlock_user(p
, arg1
, 0);
7609 #ifdef TARGET_NR_getuid
7610 case TARGET_NR_getuid
:
7611 ret
= get_errno(high2lowuid(getuid()));
7614 #ifdef TARGET_NR_getgid
7615 case TARGET_NR_getgid
:
7616 ret
= get_errno(high2lowgid(getgid()));
7619 #ifdef TARGET_NR_geteuid
7620 case TARGET_NR_geteuid
:
7621 ret
= get_errno(high2lowuid(geteuid()));
7624 #ifdef TARGET_NR_getegid
7625 case TARGET_NR_getegid
:
7626 ret
= get_errno(high2lowgid(getegid()));
7629 case TARGET_NR_setreuid
:
7630 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7632 case TARGET_NR_setregid
:
7633 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7635 case TARGET_NR_getgroups
:
7637 int gidsetsize
= arg1
;
7638 target_id
*target_grouplist
;
7642 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7643 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7644 if (gidsetsize
== 0)
7646 if (!is_error(ret
)) {
7647 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7648 if (!target_grouplist
)
7650 for(i
= 0;i
< ret
; i
++)
7651 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7652 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7656 case TARGET_NR_setgroups
:
7658 int gidsetsize
= arg1
;
7659 target_id
*target_grouplist
;
7663 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7664 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7665 if (!target_grouplist
) {
7666 ret
= -TARGET_EFAULT
;
7669 for(i
= 0;i
< gidsetsize
; i
++)
7670 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7671 unlock_user(target_grouplist
, arg2
, 0);
7672 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7675 case TARGET_NR_fchown
:
7676 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7678 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7679 case TARGET_NR_fchownat
:
7680 if (!(p
= lock_user_string(arg2
)))
7682 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7683 unlock_user(p
, arg2
, 0);
7686 #ifdef TARGET_NR_setresuid
7687 case TARGET_NR_setresuid
:
7688 ret
= get_errno(setresuid(low2highuid(arg1
),
7690 low2highuid(arg3
)));
7693 #ifdef TARGET_NR_getresuid
7694 case TARGET_NR_getresuid
:
7696 uid_t ruid
, euid
, suid
;
7697 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7698 if (!is_error(ret
)) {
7699 if (put_user_u16(high2lowuid(ruid
), arg1
)
7700 || put_user_u16(high2lowuid(euid
), arg2
)
7701 || put_user_u16(high2lowuid(suid
), arg3
))
7707 #ifdef TARGET_NR_getresgid
7708 case TARGET_NR_setresgid
:
7709 ret
= get_errno(setresgid(low2highgid(arg1
),
7711 low2highgid(arg3
)));
7714 #ifdef TARGET_NR_getresgid
7715 case TARGET_NR_getresgid
:
7717 gid_t rgid
, egid
, sgid
;
7718 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7719 if (!is_error(ret
)) {
7720 if (put_user_u16(high2lowgid(rgid
), arg1
)
7721 || put_user_u16(high2lowgid(egid
), arg2
)
7722 || put_user_u16(high2lowgid(sgid
), arg3
))
7728 case TARGET_NR_chown
:
7729 if (!(p
= lock_user_string(arg1
)))
7731 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7732 unlock_user(p
, arg1
, 0);
7734 case TARGET_NR_setuid
:
7735 ret
= get_errno(setuid(low2highuid(arg1
)));
7737 case TARGET_NR_setgid
:
7738 ret
= get_errno(setgid(low2highgid(arg1
)));
7740 case TARGET_NR_setfsuid
:
7741 ret
= get_errno(setfsuid(arg1
));
7743 case TARGET_NR_setfsgid
:
7744 ret
= get_errno(setfsgid(arg1
));
7747 #ifdef TARGET_NR_lchown32
7748 case TARGET_NR_lchown32
:
7749 if (!(p
= lock_user_string(arg1
)))
7751 ret
= get_errno(lchown(p
, arg2
, arg3
));
7752 unlock_user(p
, arg1
, 0);
7755 #ifdef TARGET_NR_getuid32
7756 case TARGET_NR_getuid32
:
7757 ret
= get_errno(getuid());
7761 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7762 /* Alpha specific */
7763 case TARGET_NR_getxuid
:
7767 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7769 ret
= get_errno(getuid());
7772 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7773 /* Alpha specific */
7774 case TARGET_NR_getxgid
:
7778 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7780 ret
= get_errno(getgid());
7783 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7784 /* Alpha specific */
7785 case TARGET_NR_osf_getsysinfo
:
7786 ret
= -TARGET_EOPNOTSUPP
;
7788 case TARGET_GSI_IEEE_FP_CONTROL
:
7790 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7792 /* Copied from linux ieee_fpcr_to_swcr. */
7793 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7794 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7795 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7796 | SWCR_TRAP_ENABLE_DZE
7797 | SWCR_TRAP_ENABLE_OVF
);
7798 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7799 | SWCR_TRAP_ENABLE_INE
);
7800 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7801 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7803 if (put_user_u64 (swcr
, arg2
))
7809 /* case GSI_IEEE_STATE_AT_SIGNAL:
7810 -- Not implemented in linux kernel.
7812 -- Retrieves current unaligned access state; not much used.
7814 -- Retrieves implver information; surely not used.
7816 -- Grabs a copy of the HWRPB; surely not used.
7821 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7822 /* Alpha specific */
7823 case TARGET_NR_osf_setsysinfo
:
7824 ret
= -TARGET_EOPNOTSUPP
;
7826 case TARGET_SSI_IEEE_FP_CONTROL
:
7828 uint64_t swcr
, fpcr
, orig_fpcr
;
7830 if (get_user_u64 (swcr
, arg2
)) {
7833 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7834 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7836 /* Copied from linux ieee_swcr_to_fpcr. */
7837 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7838 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7839 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7840 | SWCR_TRAP_ENABLE_DZE
7841 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7842 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7843 | SWCR_TRAP_ENABLE_INE
)) << 57;
7844 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7845 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7847 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7852 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7854 uint64_t exc
, fpcr
, orig_fpcr
;
7857 if (get_user_u64(exc
, arg2
)) {
7861 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7863 /* We only add to the exception status here. */
7864 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
7866 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7869 /* Old exceptions are not signaled. */
7870 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7872 /* If any exceptions set by this call,
7873 and are unmasked, send a signal. */
7875 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
7876 si_code
= TARGET_FPE_FLTRES
;
7878 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
7879 si_code
= TARGET_FPE_FLTUND
;
7881 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
7882 si_code
= TARGET_FPE_FLTOVF
;
7884 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
7885 si_code
= TARGET_FPE_FLTDIV
;
7887 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
7888 si_code
= TARGET_FPE_FLTINV
;
7891 target_siginfo_t info
;
7892 info
.si_signo
= SIGFPE
;
7894 info
.si_code
= si_code
;
7895 info
._sifields
._sigfault
._addr
7896 = ((CPUArchState
*)cpu_env
)->pc
;
7897 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
7902 /* case SSI_NVPAIRS:
7903 -- Used with SSIN_UACPROC to enable unaligned accesses.
7904 case SSI_IEEE_STATE_AT_SIGNAL:
7905 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7906 -- Not implemented in linux kernel
7911 #ifdef TARGET_NR_osf_sigprocmask
7912 /* Alpha specific. */
7913 case TARGET_NR_osf_sigprocmask
:
7917 sigset_t set
, oldset
;
7920 case TARGET_SIG_BLOCK
:
7923 case TARGET_SIG_UNBLOCK
:
7926 case TARGET_SIG_SETMASK
:
7930 ret
= -TARGET_EINVAL
;
7934 target_to_host_old_sigset(&set
, &mask
);
7935 sigprocmask(how
, &set
, &oldset
);
7936 host_to_target_old_sigset(&mask
, &oldset
);
7942 #ifdef TARGET_NR_getgid32
7943 case TARGET_NR_getgid32
:
7944 ret
= get_errno(getgid());
7947 #ifdef TARGET_NR_geteuid32
7948 case TARGET_NR_geteuid32
:
7949 ret
= get_errno(geteuid());
7952 #ifdef TARGET_NR_getegid32
7953 case TARGET_NR_getegid32
:
7954 ret
= get_errno(getegid());
7957 #ifdef TARGET_NR_setreuid32
7958 case TARGET_NR_setreuid32
:
7959 ret
= get_errno(setreuid(arg1
, arg2
));
7962 #ifdef TARGET_NR_setregid32
7963 case TARGET_NR_setregid32
:
7964 ret
= get_errno(setregid(arg1
, arg2
));
7967 #ifdef TARGET_NR_getgroups32
7968 case TARGET_NR_getgroups32
:
7970 int gidsetsize
= arg1
;
7971 uint32_t *target_grouplist
;
7975 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7976 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7977 if (gidsetsize
== 0)
7979 if (!is_error(ret
)) {
7980 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7981 if (!target_grouplist
) {
7982 ret
= -TARGET_EFAULT
;
7985 for(i
= 0;i
< ret
; i
++)
7986 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7987 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7992 #ifdef TARGET_NR_setgroups32
7993 case TARGET_NR_setgroups32
:
7995 int gidsetsize
= arg1
;
7996 uint32_t *target_grouplist
;
8000 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8001 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8002 if (!target_grouplist
) {
8003 ret
= -TARGET_EFAULT
;
8006 for(i
= 0;i
< gidsetsize
; i
++)
8007 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8008 unlock_user(target_grouplist
, arg2
, 0);
8009 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8013 #ifdef TARGET_NR_fchown32
8014 case TARGET_NR_fchown32
:
8015 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8018 #ifdef TARGET_NR_setresuid32
8019 case TARGET_NR_setresuid32
:
8020 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8023 #ifdef TARGET_NR_getresuid32
8024 case TARGET_NR_getresuid32
:
8026 uid_t ruid
, euid
, suid
;
8027 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8028 if (!is_error(ret
)) {
8029 if (put_user_u32(ruid
, arg1
)
8030 || put_user_u32(euid
, arg2
)
8031 || put_user_u32(suid
, arg3
))
8037 #ifdef TARGET_NR_setresgid32
8038 case TARGET_NR_setresgid32
:
8039 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8042 #ifdef TARGET_NR_getresgid32
8043 case TARGET_NR_getresgid32
:
8045 gid_t rgid
, egid
, sgid
;
8046 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8047 if (!is_error(ret
)) {
8048 if (put_user_u32(rgid
, arg1
)
8049 || put_user_u32(egid
, arg2
)
8050 || put_user_u32(sgid
, arg3
))
8056 #ifdef TARGET_NR_chown32
8057 case TARGET_NR_chown32
:
8058 if (!(p
= lock_user_string(arg1
)))
8060 ret
= get_errno(chown(p
, arg2
, arg3
));
8061 unlock_user(p
, arg1
, 0);
8064 #ifdef TARGET_NR_setuid32
8065 case TARGET_NR_setuid32
:
8066 ret
= get_errno(setuid(arg1
));
8069 #ifdef TARGET_NR_setgid32
8070 case TARGET_NR_setgid32
:
8071 ret
= get_errno(setgid(arg1
));
8074 #ifdef TARGET_NR_setfsuid32
8075 case TARGET_NR_setfsuid32
:
8076 ret
= get_errno(setfsuid(arg1
));
8079 #ifdef TARGET_NR_setfsgid32
8080 case TARGET_NR_setfsgid32
:
8081 ret
= get_errno(setfsgid(arg1
));
8085 case TARGET_NR_pivot_root
:
8087 #ifdef TARGET_NR_mincore
8088 case TARGET_NR_mincore
:
8091 ret
= -TARGET_EFAULT
;
8092 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8094 if (!(p
= lock_user_string(arg3
)))
8096 ret
= get_errno(mincore(a
, arg2
, p
));
8097 unlock_user(p
, arg3
, ret
);
8099 unlock_user(a
, arg1
, 0);
8103 #ifdef TARGET_NR_arm_fadvise64_64
8104 case TARGET_NR_arm_fadvise64_64
:
8107 * arm_fadvise64_64 looks like fadvise64_64 but
8108 * with different argument order
8116 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8117 #ifdef TARGET_NR_fadvise64_64
8118 case TARGET_NR_fadvise64_64
:
8120 #ifdef TARGET_NR_fadvise64
8121 case TARGET_NR_fadvise64
:
8125 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8126 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8127 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8128 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8132 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8135 #ifdef TARGET_NR_madvise
8136 case TARGET_NR_madvise
:
8137 /* A straight passthrough may not be safe because qemu sometimes
8138 turns private flie-backed mappings into anonymous mappings.
8139 This will break MADV_DONTNEED.
8140 This is a hint, so ignoring and returning success is ok. */
8144 #if TARGET_ABI_BITS == 32
8145 case TARGET_NR_fcntl64
:
8149 struct target_flock64
*target_fl
;
8151 struct target_eabi_flock64
*target_efl
;
8154 cmd
= target_to_host_fcntl_cmd(arg2
);
8155 if (cmd
== -TARGET_EINVAL
) {
8161 case TARGET_F_GETLK64
:
8163 if (((CPUARMState
*)cpu_env
)->eabi
) {
8164 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8166 fl
.l_type
= tswap16(target_efl
->l_type
);
8167 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8168 fl
.l_start
= tswap64(target_efl
->l_start
);
8169 fl
.l_len
= tswap64(target_efl
->l_len
);
8170 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8171 unlock_user_struct(target_efl
, arg3
, 0);
8175 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8177 fl
.l_type
= tswap16(target_fl
->l_type
);
8178 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8179 fl
.l_start
= tswap64(target_fl
->l_start
);
8180 fl
.l_len
= tswap64(target_fl
->l_len
);
8181 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8182 unlock_user_struct(target_fl
, arg3
, 0);
8184 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8187 if (((CPUARMState
*)cpu_env
)->eabi
) {
8188 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8190 target_efl
->l_type
= tswap16(fl
.l_type
);
8191 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8192 target_efl
->l_start
= tswap64(fl
.l_start
);
8193 target_efl
->l_len
= tswap64(fl
.l_len
);
8194 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8195 unlock_user_struct(target_efl
, arg3
, 1);
8199 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8201 target_fl
->l_type
= tswap16(fl
.l_type
);
8202 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8203 target_fl
->l_start
= tswap64(fl
.l_start
);
8204 target_fl
->l_len
= tswap64(fl
.l_len
);
8205 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8206 unlock_user_struct(target_fl
, arg3
, 1);
8211 case TARGET_F_SETLK64
:
8212 case TARGET_F_SETLKW64
:
8214 if (((CPUARMState
*)cpu_env
)->eabi
) {
8215 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8217 fl
.l_type
= tswap16(target_efl
->l_type
);
8218 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8219 fl
.l_start
= tswap64(target_efl
->l_start
);
8220 fl
.l_len
= tswap64(target_efl
->l_len
);
8221 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8222 unlock_user_struct(target_efl
, arg3
, 0);
8226 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8228 fl
.l_type
= tswap16(target_fl
->l_type
);
8229 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8230 fl
.l_start
= tswap64(target_fl
->l_start
);
8231 fl
.l_len
= tswap64(target_fl
->l_len
);
8232 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8233 unlock_user_struct(target_fl
, arg3
, 0);
8235 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8238 ret
= do_fcntl(arg1
, arg2
, arg3
);
8244 #ifdef TARGET_NR_cacheflush
8245 case TARGET_NR_cacheflush
:
8246 /* self-modifying code is handled automatically, so nothing needed */
8250 #ifdef TARGET_NR_security
8251 case TARGET_NR_security
:
8254 #ifdef TARGET_NR_getpagesize
8255 case TARGET_NR_getpagesize
:
8256 ret
= TARGET_PAGE_SIZE
;
8259 case TARGET_NR_gettid
:
8260 ret
= get_errno(gettid());
8262 #ifdef TARGET_NR_readahead
8263 case TARGET_NR_readahead
:
8264 #if TARGET_ABI_BITS == 32
8265 if (regpairs_aligned(cpu_env
)) {
8270 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8272 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8277 #ifdef TARGET_NR_setxattr
8278 case TARGET_NR_listxattr
:
8279 case TARGET_NR_llistxattr
:
8283 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8285 ret
= -TARGET_EFAULT
;
8289 p
= lock_user_string(arg1
);
8291 if (num
== TARGET_NR_listxattr
) {
8292 ret
= get_errno(listxattr(p
, b
, arg3
));
8294 ret
= get_errno(llistxattr(p
, b
, arg3
));
8297 ret
= -TARGET_EFAULT
;
8299 unlock_user(p
, arg1
, 0);
8300 unlock_user(b
, arg2
, arg3
);
8303 case TARGET_NR_flistxattr
:
8307 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8309 ret
= -TARGET_EFAULT
;
8313 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8314 unlock_user(b
, arg2
, arg3
);
8317 case TARGET_NR_setxattr
:
8318 case TARGET_NR_lsetxattr
:
8320 void *p
, *n
, *v
= 0;
8322 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8324 ret
= -TARGET_EFAULT
;
8328 p
= lock_user_string(arg1
);
8329 n
= lock_user_string(arg2
);
8331 if (num
== TARGET_NR_setxattr
) {
8332 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8334 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8337 ret
= -TARGET_EFAULT
;
8339 unlock_user(p
, arg1
, 0);
8340 unlock_user(n
, arg2
, 0);
8341 unlock_user(v
, arg3
, 0);
8344 case TARGET_NR_fsetxattr
:
8348 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8350 ret
= -TARGET_EFAULT
;
8354 n
= lock_user_string(arg2
);
8356 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8358 ret
= -TARGET_EFAULT
;
8360 unlock_user(n
, arg2
, 0);
8361 unlock_user(v
, arg3
, 0);
8364 case TARGET_NR_getxattr
:
8365 case TARGET_NR_lgetxattr
:
8367 void *p
, *n
, *v
= 0;
8369 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8371 ret
= -TARGET_EFAULT
;
8375 p
= lock_user_string(arg1
);
8376 n
= lock_user_string(arg2
);
8378 if (num
== TARGET_NR_getxattr
) {
8379 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8381 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8384 ret
= -TARGET_EFAULT
;
8386 unlock_user(p
, arg1
, 0);
8387 unlock_user(n
, arg2
, 0);
8388 unlock_user(v
, arg3
, arg4
);
8391 case TARGET_NR_fgetxattr
:
8395 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8397 ret
= -TARGET_EFAULT
;
8401 n
= lock_user_string(arg2
);
8403 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8405 ret
= -TARGET_EFAULT
;
8407 unlock_user(n
, arg2
, 0);
8408 unlock_user(v
, arg3
, arg4
);
8411 case TARGET_NR_removexattr
:
8412 case TARGET_NR_lremovexattr
:
8415 p
= lock_user_string(arg1
);
8416 n
= lock_user_string(arg2
);
8418 if (num
== TARGET_NR_removexattr
) {
8419 ret
= get_errno(removexattr(p
, n
));
8421 ret
= get_errno(lremovexattr(p
, n
));
8424 ret
= -TARGET_EFAULT
;
8426 unlock_user(p
, arg1
, 0);
8427 unlock_user(n
, arg2
, 0);
8430 case TARGET_NR_fremovexattr
:
8433 n
= lock_user_string(arg2
);
8435 ret
= get_errno(fremovexattr(arg1
, n
));
8437 ret
= -TARGET_EFAULT
;
8439 unlock_user(n
, arg2
, 0);
8443 #endif /* CONFIG_ATTR */
8444 #ifdef TARGET_NR_set_thread_area
8445 case TARGET_NR_set_thread_area
:
8446 #if defined(TARGET_MIPS)
8447 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8450 #elif defined(TARGET_CRIS)
8452 ret
= -TARGET_EINVAL
;
8454 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8458 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8459 ret
= do_set_thread_area(cpu_env
, arg1
);
8462 goto unimplemented_nowarn
;
8465 #ifdef TARGET_NR_get_thread_area
8466 case TARGET_NR_get_thread_area
:
8467 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8468 ret
= do_get_thread_area(cpu_env
, arg1
);
8470 goto unimplemented_nowarn
;
8473 #ifdef TARGET_NR_getdomainname
8474 case TARGET_NR_getdomainname
:
8475 goto unimplemented_nowarn
;
8478 #ifdef TARGET_NR_clock_gettime
8479 case TARGET_NR_clock_gettime
:
8482 ret
= get_errno(clock_gettime(arg1
, &ts
));
8483 if (!is_error(ret
)) {
8484 host_to_target_timespec(arg2
, &ts
);
8489 #ifdef TARGET_NR_clock_getres
8490 case TARGET_NR_clock_getres
:
8493 ret
= get_errno(clock_getres(arg1
, &ts
));
8494 if (!is_error(ret
)) {
8495 host_to_target_timespec(arg2
, &ts
);
8500 #ifdef TARGET_NR_clock_nanosleep
8501 case TARGET_NR_clock_nanosleep
:
8504 target_to_host_timespec(&ts
, arg3
);
8505 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8507 host_to_target_timespec(arg4
, &ts
);
8512 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8513 case TARGET_NR_set_tid_address
:
8514 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8518 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8519 case TARGET_NR_tkill
:
8520 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8524 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8525 case TARGET_NR_tgkill
:
8526 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8527 target_to_host_signal(arg3
)));
8531 #ifdef TARGET_NR_set_robust_list
8532 case TARGET_NR_set_robust_list
:
8533 goto unimplemented_nowarn
;
8536 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8537 case TARGET_NR_utimensat
:
8539 struct timespec
*tsp
, ts
[2];
8543 target_to_host_timespec(ts
, arg3
);
8544 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8548 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8550 if (!(p
= lock_user_string(arg2
))) {
8551 ret
= -TARGET_EFAULT
;
8554 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8555 unlock_user(p
, arg2
, 0);
8560 #if defined(CONFIG_USE_NPTL)
8561 case TARGET_NR_futex
:
8562 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8565 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8566 case TARGET_NR_inotify_init
:
8567 ret
= get_errno(sys_inotify_init());
8570 #ifdef CONFIG_INOTIFY1
8571 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8572 case TARGET_NR_inotify_init1
:
8573 ret
= get_errno(sys_inotify_init1(arg1
));
8577 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8578 case TARGET_NR_inotify_add_watch
:
8579 p
= lock_user_string(arg2
);
8580 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8581 unlock_user(p
, arg2
, 0);
8584 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8585 case TARGET_NR_inotify_rm_watch
:
8586 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8590 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8591 case TARGET_NR_mq_open
:
8593 struct mq_attr posix_mq_attr
;
8595 p
= lock_user_string(arg1
- 1);
8597 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8598 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8599 unlock_user (p
, arg1
, 0);
8603 case TARGET_NR_mq_unlink
:
8604 p
= lock_user_string(arg1
- 1);
8605 ret
= get_errno(mq_unlink(p
));
8606 unlock_user (p
, arg1
, 0);
8609 case TARGET_NR_mq_timedsend
:
8613 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8615 target_to_host_timespec(&ts
, arg5
);
8616 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8617 host_to_target_timespec(arg5
, &ts
);
8620 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8621 unlock_user (p
, arg2
, arg3
);
8625 case TARGET_NR_mq_timedreceive
:
8630 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8632 target_to_host_timespec(&ts
, arg5
);
8633 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8634 host_to_target_timespec(arg5
, &ts
);
8637 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8638 unlock_user (p
, arg2
, arg3
);
8640 put_user_u32(prio
, arg4
);
8644 /* Not implemented for now... */
8645 /* case TARGET_NR_mq_notify: */
8648 case TARGET_NR_mq_getsetattr
:
8650 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8653 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8654 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8657 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8658 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8665 #ifdef CONFIG_SPLICE
8666 #ifdef TARGET_NR_tee
8669 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8673 #ifdef TARGET_NR_splice
8674 case TARGET_NR_splice
:
8676 loff_t loff_in
, loff_out
;
8677 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8679 get_user_u64(loff_in
, arg2
);
8680 ploff_in
= &loff_in
;
8683 get_user_u64(loff_out
, arg2
);
8684 ploff_out
= &loff_out
;
8686 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8690 #ifdef TARGET_NR_vmsplice
8691 case TARGET_NR_vmsplice
:
8693 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8695 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
8696 unlock_iovec(vec
, arg2
, arg3
, 0);
8698 ret
= -host_to_target_errno(errno
);
8703 #endif /* CONFIG_SPLICE */
8704 #ifdef CONFIG_EVENTFD
8705 #if defined(TARGET_NR_eventfd)
8706 case TARGET_NR_eventfd
:
8707 ret
= get_errno(eventfd(arg1
, 0));
8710 #if defined(TARGET_NR_eventfd2)
8711 case TARGET_NR_eventfd2
:
8712 ret
= get_errno(eventfd(arg1
, arg2
));
8715 #endif /* CONFIG_EVENTFD */
8716 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8717 case TARGET_NR_fallocate
:
8718 #if TARGET_ABI_BITS == 32
8719 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8720 target_offset64(arg5
, arg6
)));
8722 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8726 #if defined(CONFIG_SYNC_FILE_RANGE)
8727 #if defined(TARGET_NR_sync_file_range)
8728 case TARGET_NR_sync_file_range
:
8729 #if TARGET_ABI_BITS == 32
8730 #if defined(TARGET_MIPS)
8731 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8732 target_offset64(arg5
, arg6
), arg7
));
8734 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8735 target_offset64(arg4
, arg5
), arg6
));
8736 #endif /* !TARGET_MIPS */
8738 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8742 #if defined(TARGET_NR_sync_file_range2)
8743 case TARGET_NR_sync_file_range2
:
8744 /* This is like sync_file_range but the arguments are reordered */
8745 #if TARGET_ABI_BITS == 32
8746 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8747 target_offset64(arg5
, arg6
), arg2
));
8749 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8754 #if defined(CONFIG_EPOLL)
8755 #if defined(TARGET_NR_epoll_create)
8756 case TARGET_NR_epoll_create
:
8757 ret
= get_errno(epoll_create(arg1
));
8760 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8761 case TARGET_NR_epoll_create1
:
8762 ret
= get_errno(epoll_create1(arg1
));
8765 #if defined(TARGET_NR_epoll_ctl)
8766 case TARGET_NR_epoll_ctl
:
8768 struct epoll_event ep
;
8769 struct epoll_event
*epp
= 0;
8771 struct target_epoll_event
*target_ep
;
8772 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8775 ep
.events
= tswap32(target_ep
->events
);
8776 /* The epoll_data_t union is just opaque data to the kernel,
8777 * so we transfer all 64 bits across and need not worry what
8778 * actual data type it is.
8780 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8781 unlock_user_struct(target_ep
, arg4
, 0);
8784 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8789 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8790 #define IMPLEMENT_EPOLL_PWAIT
8792 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8793 #if defined(TARGET_NR_epoll_wait)
8794 case TARGET_NR_epoll_wait
:
8796 #if defined(IMPLEMENT_EPOLL_PWAIT)
8797 case TARGET_NR_epoll_pwait
:
8800 struct target_epoll_event
*target_ep
;
8801 struct epoll_event
*ep
;
8803 int maxevents
= arg3
;
8806 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8807 maxevents
* sizeof(struct target_epoll_event
), 1);
8812 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8815 #if defined(IMPLEMENT_EPOLL_PWAIT)
8816 case TARGET_NR_epoll_pwait
:
8818 target_sigset_t
*target_set
;
8819 sigset_t _set
, *set
= &_set
;
8822 target_set
= lock_user(VERIFY_READ
, arg5
,
8823 sizeof(target_sigset_t
), 1);
8825 unlock_user(target_ep
, arg2
, 0);
8828 target_to_host_sigset(set
, target_set
);
8829 unlock_user(target_set
, arg5
, 0);
8834 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8838 #if defined(TARGET_NR_epoll_wait)
8839 case TARGET_NR_epoll_wait
:
8840 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8844 ret
= -TARGET_ENOSYS
;
8846 if (!is_error(ret
)) {
8848 for (i
= 0; i
< ret
; i
++) {
8849 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8850 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8853 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8858 #ifdef TARGET_NR_prlimit64
8859 case TARGET_NR_prlimit64
:
8861 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8862 struct target_rlimit64
*target_rnew
, *target_rold
;
8863 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8865 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8868 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8869 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8870 unlock_user_struct(target_rnew
, arg3
, 0);
8874 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8875 if (!is_error(ret
) && arg4
) {
8876 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8879 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8880 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8881 unlock_user_struct(target_rold
, arg4
, 1);
8886 #ifdef TARGET_NR_gethostname
8887 case TARGET_NR_gethostname
:
8889 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8891 ret
= get_errno(gethostname(name
, arg2
));
8892 unlock_user(name
, arg1
, arg2
);
8894 ret
= -TARGET_EFAULT
;
8901 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8902 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8903 unimplemented_nowarn
:
8905 ret
= -TARGET_ENOSYS
;
8910 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8913 print_syscall_ret(num
, ret
);
8916 ret
= -TARGET_EFAULT
;