4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include "qemu-xattr.h"
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include "linux_loop.h"
100 #include "cpu-uname.h"
104 #if defined(CONFIG_USE_NPTL)
105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
108 /* XXX: Hardcode the above values. */
109 #define CLONE_NPTL_FLAGS2 0
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 #define _syscall0(type,name) \
128 static type name (void) \
130 return syscall(__NR_##name); \
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
136 return syscall(__NR_##name, arg1); \
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
142 return syscall(__NR_##name, arg1, arg2); \
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_faccessat __NR_faccessat
176 #define __NR_sys_fchmodat __NR_fchmodat
177 #define __NR_sys_fchownat __NR_fchownat
178 #define __NR_sys_fstatat64 __NR_fstatat64
179 #define __NR_sys_futimesat __NR_futimesat
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_linkat __NR_linkat
185 #define __NR_sys_mkdirat __NR_mkdirat
186 #define __NR_sys_mknodat __NR_mknodat
187 #define __NR_sys_newfstatat __NR_newfstatat
188 #define __NR_sys_openat __NR_openat
189 #define __NR_sys_readlinkat __NR_readlinkat
190 #define __NR_sys_renameat __NR_renameat
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_symlinkat __NR_symlinkat
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_unlinkat __NR_unlinkat
197 #define __NR_sys_utimensat __NR_utimensat
198 #define __NR_sys_futex __NR_futex
199 #define __NR_sys_inotify_init __NR_inotify_init
200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
205 #define __NR__llseek __NR_lseek
209 _syscall0(int, gettid
)
211 /* This is a replacement for the host gettid() and must return a host
213 static int gettid(void) {
217 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
219 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
221 _syscall2(int, sys_getpriority
, int, which
, int, who
);
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
224 loff_t
*, res
, uint
, wh
);
226 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
227 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group
,int,error_code
)
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address
,int *,tidptr
)
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
243 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
251 unsigned long *, user_mask_ptr
);
252 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
255 static bitmask_transtbl fcntl_flags_tbl
[] = {
256 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
257 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
258 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
259 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
260 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
261 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
262 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
263 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
264 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
265 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
266 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
267 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
268 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
275 #define COPY_UTSNAME_FIELD(dest, src) \
277 /* __NEW_UTS_LEN doesn't include terminating null */ \
278 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
279 (dest)[__NEW_UTS_LEN] = '\0'; \
282 static int sys_uname(struct new_utsname
*buf
)
284 struct utsname uts_buf
;
286 if (uname(&uts_buf
) < 0)
290 * Just in case these have some differences, we
291 * translate utsname to new_utsname (which is the
292 * struct linux kernel uses).
295 memset(buf
, 0, sizeof(*buf
));
296 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
297 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
298 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
299 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
300 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
302 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
306 #undef COPY_UTSNAME_FIELD
309 static int sys_getcwd1(char *buf
, size_t size
)
311 if (getcwd(buf
, size
) == NULL
) {
312 /* getcwd() sets errno */
315 return strlen(buf
)+1;
320 * Host system seems to have atfile syscall stubs available. We
321 * now enable them one by one as specified by target syscall_nr.h.
324 #ifdef TARGET_NR_faccessat
325 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
327 return (faccessat(dirfd
, pathname
, mode
, 0));
330 #ifdef TARGET_NR_fchmodat
331 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
333 return (fchmodat(dirfd
, pathname
, mode
, 0));
336 #if defined(TARGET_NR_fchownat)
337 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
338 gid_t group
, int flags
)
340 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
343 #ifdef __NR_fstatat64
344 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
347 return (fstatat(dirfd
, pathname
, buf
, flags
));
350 #ifdef __NR_newfstatat
351 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
354 return (fstatat(dirfd
, pathname
, buf
, flags
));
357 #ifdef TARGET_NR_futimesat
358 static int sys_futimesat(int dirfd
, const char *pathname
,
359 const struct timeval times
[2])
361 return (futimesat(dirfd
, pathname
, times
));
364 #ifdef TARGET_NR_linkat
365 static int sys_linkat(int olddirfd
, const char *oldpath
,
366 int newdirfd
, const char *newpath
, int flags
)
368 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
371 #ifdef TARGET_NR_mkdirat
372 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
374 return (mkdirat(dirfd
, pathname
, mode
));
377 #ifdef TARGET_NR_mknodat
378 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
381 return (mknodat(dirfd
, pathname
, mode
, dev
));
384 #ifdef TARGET_NR_openat
385 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
388 * open(2) has extra parameter 'mode' when called with
391 if ((flags
& O_CREAT
) != 0) {
392 return (openat(dirfd
, pathname
, flags
, mode
));
394 return (openat(dirfd
, pathname
, flags
));
397 #ifdef TARGET_NR_readlinkat
398 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
400 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
403 #ifdef TARGET_NR_renameat
404 static int sys_renameat(int olddirfd
, const char *oldpath
,
405 int newdirfd
, const char *newpath
)
407 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
410 #ifdef TARGET_NR_symlinkat
411 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
413 return (symlinkat(oldpath
, newdirfd
, newpath
));
416 #ifdef TARGET_NR_unlinkat
417 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
419 return (unlinkat(dirfd
, pathname
, flags
));
422 #else /* !CONFIG_ATFILE */
425 * Try direct syscalls instead
427 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
428 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
430 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
431 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
433 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
434 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
435 uid_t
,owner
,gid_t
,group
,int,flags
)
437 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
438 defined(__NR_fstatat64)
439 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
440 struct stat
*,buf
,int,flags
)
442 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
443 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
444 const struct timeval
*,times
)
446 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
447 defined(__NR_newfstatat)
448 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
449 struct stat
*,buf
,int,flags
)
451 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
452 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
453 int,newdirfd
,const char *,newpath
,int,flags
)
455 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
456 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
458 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
459 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
460 mode_t
,mode
,dev_t
,dev
)
462 #if defined(TARGET_NR_openat) && defined(__NR_openat)
463 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
465 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
466 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
467 char *,buf
,size_t,bufsize
)
469 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
470 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
471 int,newdirfd
,const char *,newpath
)
473 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
474 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
475 int,newdirfd
,const char *,newpath
)
477 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
478 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
481 #endif /* CONFIG_ATFILE */
483 #ifdef CONFIG_UTIMENSAT
484 static int sys_utimensat(int dirfd
, const char *pathname
,
485 const struct timespec times
[2], int flags
)
487 if (pathname
== NULL
)
488 return futimens(dirfd
, times
);
490 return utimensat(dirfd
, pathname
, times
, flags
);
493 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
494 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
495 const struct timespec
*,tsp
,int,flags
)
497 #endif /* CONFIG_UTIMENSAT */
499 #ifdef CONFIG_INOTIFY
500 #include <sys/inotify.h>
502 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
503 static int sys_inotify_init(void)
505 return (inotify_init());
508 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
509 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
511 return (inotify_add_watch(fd
, pathname
, mask
));
514 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
515 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
517 return (inotify_rm_watch(fd
, wd
));
520 #ifdef CONFIG_INOTIFY1
521 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
522 static int sys_inotify_init1(int flags
)
524 return (inotify_init1(flags
));
529 /* Userspace can usually survive runtime without inotify */
530 #undef TARGET_NR_inotify_init
531 #undef TARGET_NR_inotify_init1
532 #undef TARGET_NR_inotify_add_watch
533 #undef TARGET_NR_inotify_rm_watch
534 #endif /* CONFIG_INOTIFY */
536 #if defined(TARGET_NR_ppoll)
538 # define __NR_ppoll -1
540 #define __NR_sys_ppoll __NR_ppoll
541 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
542 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
546 #if defined(TARGET_NR_pselect6)
547 #ifndef __NR_pselect6
548 # define __NR_pselect6 -1
550 #define __NR_sys_pselect6 __NR_pselect6
551 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
552 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
555 #if defined(TARGET_NR_prlimit64)
556 #ifndef __NR_prlimit64
557 # define __NR_prlimit64 -1
559 #define __NR_sys_prlimit64 __NR_prlimit64
560 /* The glibc rlimit structure may not be that used by the underlying syscall */
561 struct host_rlimit64
{
565 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
566 const struct host_rlimit64
*, new_limit
,
567 struct host_rlimit64
*, old_limit
)
570 extern int personality(int);
571 extern int flock(int, int);
572 extern int setfsuid(int);
573 extern int setfsgid(int);
574 extern int setgroups(int, gid_t
*);
576 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
578 static inline int regpairs_aligned(void *cpu_env
) {
579 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
581 #elif defined(TARGET_MIPS)
582 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
584 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
587 #define ERRNO_TABLE_SIZE 1200
589 /* target_to_host_errno_table[] is initialized from
590 * host_to_target_errno_table[] in syscall_init(). */
591 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
595 * This list is the union of errno values overridden in asm-<arch>/errno.h
596 * minus the errnos that are not actually generic to all archs.
598 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
599 [EIDRM
] = TARGET_EIDRM
,
600 [ECHRNG
] = TARGET_ECHRNG
,
601 [EL2NSYNC
] = TARGET_EL2NSYNC
,
602 [EL3HLT
] = TARGET_EL3HLT
,
603 [EL3RST
] = TARGET_EL3RST
,
604 [ELNRNG
] = TARGET_ELNRNG
,
605 [EUNATCH
] = TARGET_EUNATCH
,
606 [ENOCSI
] = TARGET_ENOCSI
,
607 [EL2HLT
] = TARGET_EL2HLT
,
608 [EDEADLK
] = TARGET_EDEADLK
,
609 [ENOLCK
] = TARGET_ENOLCK
,
610 [EBADE
] = TARGET_EBADE
,
611 [EBADR
] = TARGET_EBADR
,
612 [EXFULL
] = TARGET_EXFULL
,
613 [ENOANO
] = TARGET_ENOANO
,
614 [EBADRQC
] = TARGET_EBADRQC
,
615 [EBADSLT
] = TARGET_EBADSLT
,
616 [EBFONT
] = TARGET_EBFONT
,
617 [ENOSTR
] = TARGET_ENOSTR
,
618 [ENODATA
] = TARGET_ENODATA
,
619 [ETIME
] = TARGET_ETIME
,
620 [ENOSR
] = TARGET_ENOSR
,
621 [ENONET
] = TARGET_ENONET
,
622 [ENOPKG
] = TARGET_ENOPKG
,
623 [EREMOTE
] = TARGET_EREMOTE
,
624 [ENOLINK
] = TARGET_ENOLINK
,
625 [EADV
] = TARGET_EADV
,
626 [ESRMNT
] = TARGET_ESRMNT
,
627 [ECOMM
] = TARGET_ECOMM
,
628 [EPROTO
] = TARGET_EPROTO
,
629 [EDOTDOT
] = TARGET_EDOTDOT
,
630 [EMULTIHOP
] = TARGET_EMULTIHOP
,
631 [EBADMSG
] = TARGET_EBADMSG
,
632 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
633 [EOVERFLOW
] = TARGET_EOVERFLOW
,
634 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
635 [EBADFD
] = TARGET_EBADFD
,
636 [EREMCHG
] = TARGET_EREMCHG
,
637 [ELIBACC
] = TARGET_ELIBACC
,
638 [ELIBBAD
] = TARGET_ELIBBAD
,
639 [ELIBSCN
] = TARGET_ELIBSCN
,
640 [ELIBMAX
] = TARGET_ELIBMAX
,
641 [ELIBEXEC
] = TARGET_ELIBEXEC
,
642 [EILSEQ
] = TARGET_EILSEQ
,
643 [ENOSYS
] = TARGET_ENOSYS
,
644 [ELOOP
] = TARGET_ELOOP
,
645 [ERESTART
] = TARGET_ERESTART
,
646 [ESTRPIPE
] = TARGET_ESTRPIPE
,
647 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
648 [EUSERS
] = TARGET_EUSERS
,
649 [ENOTSOCK
] = TARGET_ENOTSOCK
,
650 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
651 [EMSGSIZE
] = TARGET_EMSGSIZE
,
652 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
653 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
654 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
655 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
656 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
657 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
658 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
659 [EADDRINUSE
] = TARGET_EADDRINUSE
,
660 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
661 [ENETDOWN
] = TARGET_ENETDOWN
,
662 [ENETUNREACH
] = TARGET_ENETUNREACH
,
663 [ENETRESET
] = TARGET_ENETRESET
,
664 [ECONNABORTED
] = TARGET_ECONNABORTED
,
665 [ECONNRESET
] = TARGET_ECONNRESET
,
666 [ENOBUFS
] = TARGET_ENOBUFS
,
667 [EISCONN
] = TARGET_EISCONN
,
668 [ENOTCONN
] = TARGET_ENOTCONN
,
669 [EUCLEAN
] = TARGET_EUCLEAN
,
670 [ENOTNAM
] = TARGET_ENOTNAM
,
671 [ENAVAIL
] = TARGET_ENAVAIL
,
672 [EISNAM
] = TARGET_EISNAM
,
673 [EREMOTEIO
] = TARGET_EREMOTEIO
,
674 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
675 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
676 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
677 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
678 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
679 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
680 [EALREADY
] = TARGET_EALREADY
,
681 [EINPROGRESS
] = TARGET_EINPROGRESS
,
682 [ESTALE
] = TARGET_ESTALE
,
683 [ECANCELED
] = TARGET_ECANCELED
,
684 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
685 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
687 [ENOKEY
] = TARGET_ENOKEY
,
690 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
693 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
696 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
699 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
701 #ifdef ENOTRECOVERABLE
702 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
706 static inline int host_to_target_errno(int err
)
708 if(host_to_target_errno_table
[err
])
709 return host_to_target_errno_table
[err
];
713 static inline int target_to_host_errno(int err
)
715 if (target_to_host_errno_table
[err
])
716 return target_to_host_errno_table
[err
];
720 static inline abi_long
get_errno(abi_long ret
)
723 return -host_to_target_errno(errno
);
728 static inline int is_error(abi_long ret
)
730 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
733 char *target_strerror(int err
)
735 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
738 return strerror(target_to_host_errno(err
));
741 static abi_ulong target_brk
;
742 static abi_ulong target_original_brk
;
743 static abi_ulong brk_page
;
745 void target_set_brk(abi_ulong new_brk
)
747 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
748 brk_page
= HOST_PAGE_ALIGN(target_brk
);
751 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
752 #define DEBUGF_BRK(message, args...)
754 /* do_brk() must return target values and target errnos. */
755 abi_long
do_brk(abi_ulong new_brk
)
757 abi_long mapped_addr
;
760 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
763 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
766 if (new_brk
< target_original_brk
) {
767 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
772 /* If the new brk is less than the highest page reserved to the
773 * target heap allocation, set it and we're almost done... */
774 if (new_brk
<= brk_page
) {
775 /* Heap contents are initialized to zero, as for anonymous
777 if (new_brk
> target_brk
) {
778 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
780 target_brk
= new_brk
;
781 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
785 /* We need to allocate more memory after the brk... Note that
786 * we don't use MAP_FIXED because that will map over the top of
787 * any existing mapping (like the one with the host libc or qemu
788 * itself); instead we treat "mapped but at wrong address" as
789 * a failure and unmap again.
791 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
792 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
793 PROT_READ
|PROT_WRITE
,
794 MAP_ANON
|MAP_PRIVATE
, 0, 0));
796 if (mapped_addr
== brk_page
) {
797 /* Heap contents are initialized to zero, as for anonymous
798 * mapped pages. Technically the new pages are already
799 * initialized to zero since they *are* anonymous mapped
800 * pages, however we have to take care with the contents that
801 * come from the remaining part of the previous page: it may
802 * contains garbage data due to a previous heap usage (grown
804 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
806 target_brk
= new_brk
;
807 brk_page
= HOST_PAGE_ALIGN(target_brk
);
808 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
811 } else if (mapped_addr
!= -1) {
812 /* Mapped but at wrong address, meaning there wasn't actually
813 * enough space for this brk.
815 target_munmap(mapped_addr
, new_alloc_size
);
817 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
820 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
823 #if defined(TARGET_ALPHA)
824 /* We (partially) emulate OSF/1 on Alpha, which requires we
825 return a proper errno, not an unchanged brk value. */
826 return -TARGET_ENOMEM
;
828 /* For everything else, return the previous break. */
832 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
833 abi_ulong target_fds_addr
,
837 abi_ulong b
, *target_fds
;
839 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
840 if (!(target_fds
= lock_user(VERIFY_READ
,
842 sizeof(abi_ulong
) * nw
,
844 return -TARGET_EFAULT
;
848 for (i
= 0; i
< nw
; i
++) {
849 /* grab the abi_ulong */
850 __get_user(b
, &target_fds
[i
]);
851 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
852 /* check the bit inside the abi_ulong */
859 unlock_user(target_fds
, target_fds_addr
, 0);
864 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
865 abi_ulong target_fds_addr
,
868 if (target_fds_addr
) {
869 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
870 return -TARGET_EFAULT
;
878 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
884 abi_ulong
*target_fds
;
886 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
887 if (!(target_fds
= lock_user(VERIFY_WRITE
,
889 sizeof(abi_ulong
) * nw
,
891 return -TARGET_EFAULT
;
894 for (i
= 0; i
< nw
; i
++) {
896 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
897 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
900 __put_user(v
, &target_fds
[i
]);
903 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
908 #if defined(__alpha__)
914 static inline abi_long
host_to_target_clock_t(long ticks
)
916 #if HOST_HZ == TARGET_HZ
919 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
923 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
924 const struct rusage
*rusage
)
926 struct target_rusage
*target_rusage
;
928 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
929 return -TARGET_EFAULT
;
930 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
931 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
932 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
933 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
934 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
935 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
936 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
937 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
938 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
939 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
940 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
941 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
942 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
943 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
944 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
945 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
946 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
947 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
948 unlock_user_struct(target_rusage
, target_addr
, 1);
953 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
955 abi_ulong target_rlim_swap
;
958 target_rlim_swap
= tswapal(target_rlim
);
959 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
960 return RLIM_INFINITY
;
962 result
= target_rlim_swap
;
963 if (target_rlim_swap
!= (rlim_t
)result
)
964 return RLIM_INFINITY
;
969 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
971 abi_ulong target_rlim_swap
;
974 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
975 target_rlim_swap
= TARGET_RLIM_INFINITY
;
977 target_rlim_swap
= rlim
;
978 result
= tswapal(target_rlim_swap
);
983 static inline int target_to_host_resource(int code
)
986 case TARGET_RLIMIT_AS
:
988 case TARGET_RLIMIT_CORE
:
990 case TARGET_RLIMIT_CPU
:
992 case TARGET_RLIMIT_DATA
:
994 case TARGET_RLIMIT_FSIZE
:
996 case TARGET_RLIMIT_LOCKS
:
998 case TARGET_RLIMIT_MEMLOCK
:
999 return RLIMIT_MEMLOCK
;
1000 case TARGET_RLIMIT_MSGQUEUE
:
1001 return RLIMIT_MSGQUEUE
;
1002 case TARGET_RLIMIT_NICE
:
1004 case TARGET_RLIMIT_NOFILE
:
1005 return RLIMIT_NOFILE
;
1006 case TARGET_RLIMIT_NPROC
:
1007 return RLIMIT_NPROC
;
1008 case TARGET_RLIMIT_RSS
:
1010 case TARGET_RLIMIT_RTPRIO
:
1011 return RLIMIT_RTPRIO
;
1012 case TARGET_RLIMIT_SIGPENDING
:
1013 return RLIMIT_SIGPENDING
;
1014 case TARGET_RLIMIT_STACK
:
1015 return RLIMIT_STACK
;
1021 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1022 abi_ulong target_tv_addr
)
1024 struct target_timeval
*target_tv
;
1026 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1027 return -TARGET_EFAULT
;
1029 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1030 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1032 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1037 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1038 const struct timeval
*tv
)
1040 struct target_timeval
*target_tv
;
1042 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1043 return -TARGET_EFAULT
;
1045 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1046 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1048 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1053 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1056 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1057 abi_ulong target_mq_attr_addr
)
1059 struct target_mq_attr
*target_mq_attr
;
1061 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1062 target_mq_attr_addr
, 1))
1063 return -TARGET_EFAULT
;
1065 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1066 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1067 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1068 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1070 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1075 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1076 const struct mq_attr
*attr
)
1078 struct target_mq_attr
*target_mq_attr
;
1080 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1081 target_mq_attr_addr
, 0))
1082 return -TARGET_EFAULT
;
1084 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1085 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1086 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1087 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1089 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1095 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1096 /* do_select() must return target values and target errnos. */
1097 static abi_long
do_select(int n
,
1098 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1099 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1101 fd_set rfds
, wfds
, efds
;
1102 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1103 struct timeval tv
, *tv_ptr
;
1106 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1110 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1114 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1119 if (target_tv_addr
) {
1120 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1121 return -TARGET_EFAULT
;
1127 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1129 if (!is_error(ret
)) {
1130 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1131 return -TARGET_EFAULT
;
1132 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1133 return -TARGET_EFAULT
;
1134 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1135 return -TARGET_EFAULT
;
1137 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1138 return -TARGET_EFAULT
;
1145 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1148 return pipe2(host_pipe
, flags
);
1154 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1155 int flags
, int is_pipe2
)
1159 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1162 return get_errno(ret
);
1164 /* Several targets have special calling conventions for the original
1165 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1167 #if defined(TARGET_ALPHA)
1168 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1169 return host_pipe
[0];
1170 #elif defined(TARGET_MIPS)
1171 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1172 return host_pipe
[0];
1173 #elif defined(TARGET_SH4)
1174 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1175 return host_pipe
[0];
1179 if (put_user_s32(host_pipe
[0], pipedes
)
1180 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1181 return -TARGET_EFAULT
;
1182 return get_errno(ret
);
1185 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1186 abi_ulong target_addr
,
1189 struct target_ip_mreqn
*target_smreqn
;
1191 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1193 return -TARGET_EFAULT
;
1194 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1195 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1196 if (len
== sizeof(struct target_ip_mreqn
))
1197 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1198 unlock_user(target_smreqn
, target_addr
, 0);
1203 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1204 abi_ulong target_addr
,
1207 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1208 sa_family_t sa_family
;
1209 struct target_sockaddr
*target_saddr
;
1211 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1213 return -TARGET_EFAULT
;
1215 sa_family
= tswap16(target_saddr
->sa_family
);
1217 /* Oops. The caller might send a incomplete sun_path; sun_path
1218 * must be terminated by \0 (see the manual page), but
1219 * unfortunately it is quite common to specify sockaddr_un
1220 * length as "strlen(x->sun_path)" while it should be
1221 * "strlen(...) + 1". We'll fix that here if needed.
1222 * Linux kernel has a similar feature.
1225 if (sa_family
== AF_UNIX
) {
1226 if (len
< unix_maxlen
&& len
> 0) {
1227 char *cp
= (char*)target_saddr
;
1229 if ( cp
[len
-1] && !cp
[len
] )
1232 if (len
> unix_maxlen
)
1236 memcpy(addr
, target_saddr
, len
);
1237 addr
->sa_family
= sa_family
;
1238 unlock_user(target_saddr
, target_addr
, 0);
1243 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1244 struct sockaddr
*addr
,
1247 struct target_sockaddr
*target_saddr
;
1249 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1251 return -TARGET_EFAULT
;
1252 memcpy(target_saddr
, addr
, len
);
1253 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1254 unlock_user(target_saddr
, target_addr
, len
);
1259 /* ??? Should this also swap msgh->name? */
1260 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1261 struct target_msghdr
*target_msgh
)
1263 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1264 abi_long msg_controllen
;
1265 abi_ulong target_cmsg_addr
;
1266 struct target_cmsghdr
*target_cmsg
;
1267 socklen_t space
= 0;
1269 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1270 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1272 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1273 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1275 return -TARGET_EFAULT
;
1277 while (cmsg
&& target_cmsg
) {
1278 void *data
= CMSG_DATA(cmsg
);
1279 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1281 int len
= tswapal(target_cmsg
->cmsg_len
)
1282 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1284 space
+= CMSG_SPACE(len
);
1285 if (space
> msgh
->msg_controllen
) {
1286 space
-= CMSG_SPACE(len
);
1287 gemu_log("Host cmsg overflow\n");
1291 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1292 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1293 cmsg
->cmsg_len
= CMSG_LEN(len
);
1295 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1296 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1297 memcpy(data
, target_data
, len
);
1299 int *fd
= (int *)data
;
1300 int *target_fd
= (int *)target_data
;
1301 int i
, numfds
= len
/ sizeof(int);
1303 for (i
= 0; i
< numfds
; i
++)
1304 fd
[i
] = tswap32(target_fd
[i
]);
1307 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1308 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1310 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1312 msgh
->msg_controllen
= space
;
1316 /* ??? Should this also swap msgh->name? */
1317 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1318 struct msghdr
*msgh
)
1320 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1321 abi_long msg_controllen
;
1322 abi_ulong target_cmsg_addr
;
1323 struct target_cmsghdr
*target_cmsg
;
1324 socklen_t space
= 0;
1326 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1327 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1329 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1330 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1332 return -TARGET_EFAULT
;
1334 while (cmsg
&& target_cmsg
) {
1335 void *data
= CMSG_DATA(cmsg
);
1336 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1338 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1340 space
+= TARGET_CMSG_SPACE(len
);
1341 if (space
> msg_controllen
) {
1342 space
-= TARGET_CMSG_SPACE(len
);
1343 gemu_log("Target cmsg overflow\n");
1347 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1348 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1349 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1351 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1352 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1353 memcpy(target_data
, data
, len
);
1355 int *fd
= (int *)data
;
1356 int *target_fd
= (int *)target_data
;
1357 int i
, numfds
= len
/ sizeof(int);
1359 for (i
= 0; i
< numfds
; i
++)
1360 target_fd
[i
] = tswap32(fd
[i
]);
1363 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1364 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1366 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1368 target_msgh
->msg_controllen
= tswapal(space
);
1372 /* do_setsockopt() Must return target values and target errnos. */
1373 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1374 abi_ulong optval_addr
, socklen_t optlen
)
1378 struct ip_mreqn
*ip_mreq
;
1379 struct ip_mreq_source
*ip_mreq_source
;
1383 /* TCP options all take an 'int' value. */
1384 if (optlen
< sizeof(uint32_t))
1385 return -TARGET_EINVAL
;
1387 if (get_user_u32(val
, optval_addr
))
1388 return -TARGET_EFAULT
;
1389 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1396 case IP_ROUTER_ALERT
:
1400 case IP_MTU_DISCOVER
:
1406 case IP_MULTICAST_TTL
:
1407 case IP_MULTICAST_LOOP
:
1409 if (optlen
>= sizeof(uint32_t)) {
1410 if (get_user_u32(val
, optval_addr
))
1411 return -TARGET_EFAULT
;
1412 } else if (optlen
>= 1) {
1413 if (get_user_u8(val
, optval_addr
))
1414 return -TARGET_EFAULT
;
1416 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1418 case IP_ADD_MEMBERSHIP
:
1419 case IP_DROP_MEMBERSHIP
:
1420 if (optlen
< sizeof (struct target_ip_mreq
) ||
1421 optlen
> sizeof (struct target_ip_mreqn
))
1422 return -TARGET_EINVAL
;
1424 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1425 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1426 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1429 case IP_BLOCK_SOURCE
:
1430 case IP_UNBLOCK_SOURCE
:
1431 case IP_ADD_SOURCE_MEMBERSHIP
:
1432 case IP_DROP_SOURCE_MEMBERSHIP
:
1433 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1434 return -TARGET_EINVAL
;
1436 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1437 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1438 unlock_user (ip_mreq_source
, optval_addr
, 0);
1445 case TARGET_SOL_SOCKET
:
1447 /* Options with 'int' argument. */
1448 case TARGET_SO_DEBUG
:
1451 case TARGET_SO_REUSEADDR
:
1452 optname
= SO_REUSEADDR
;
1454 case TARGET_SO_TYPE
:
1457 case TARGET_SO_ERROR
:
1460 case TARGET_SO_DONTROUTE
:
1461 optname
= SO_DONTROUTE
;
1463 case TARGET_SO_BROADCAST
:
1464 optname
= SO_BROADCAST
;
1466 case TARGET_SO_SNDBUF
:
1467 optname
= SO_SNDBUF
;
1469 case TARGET_SO_RCVBUF
:
1470 optname
= SO_RCVBUF
;
1472 case TARGET_SO_KEEPALIVE
:
1473 optname
= SO_KEEPALIVE
;
1475 case TARGET_SO_OOBINLINE
:
1476 optname
= SO_OOBINLINE
;
1478 case TARGET_SO_NO_CHECK
:
1479 optname
= SO_NO_CHECK
;
1481 case TARGET_SO_PRIORITY
:
1482 optname
= SO_PRIORITY
;
1485 case TARGET_SO_BSDCOMPAT
:
1486 optname
= SO_BSDCOMPAT
;
1489 case TARGET_SO_PASSCRED
:
1490 optname
= SO_PASSCRED
;
1492 case TARGET_SO_TIMESTAMP
:
1493 optname
= SO_TIMESTAMP
;
1495 case TARGET_SO_RCVLOWAT
:
1496 optname
= SO_RCVLOWAT
;
1498 case TARGET_SO_RCVTIMEO
:
1499 optname
= SO_RCVTIMEO
;
1501 case TARGET_SO_SNDTIMEO
:
1502 optname
= SO_SNDTIMEO
;
1508 if (optlen
< sizeof(uint32_t))
1509 return -TARGET_EINVAL
;
1511 if (get_user_u32(val
, optval_addr
))
1512 return -TARGET_EFAULT
;
1513 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1517 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1518 ret
= -TARGET_ENOPROTOOPT
;
1523 /* do_getsockopt() Must return target values and target errnos. */
1524 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1525 abi_ulong optval_addr
, abi_ulong optlen
)
1532 case TARGET_SOL_SOCKET
:
1535 /* These don't just return a single integer */
1536 case TARGET_SO_LINGER
:
1537 case TARGET_SO_RCVTIMEO
:
1538 case TARGET_SO_SNDTIMEO
:
1539 case TARGET_SO_PEERNAME
:
1541 case TARGET_SO_PEERCRED
: {
1544 struct target_ucred
*tcr
;
1546 if (get_user_u32(len
, optlen
)) {
1547 return -TARGET_EFAULT
;
1550 return -TARGET_EINVAL
;
1554 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1562 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1563 return -TARGET_EFAULT
;
1565 __put_user(cr
.pid
, &tcr
->pid
);
1566 __put_user(cr
.uid
, &tcr
->uid
);
1567 __put_user(cr
.gid
, &tcr
->gid
);
1568 unlock_user_struct(tcr
, optval_addr
, 1);
1569 if (put_user_u32(len
, optlen
)) {
1570 return -TARGET_EFAULT
;
1574 /* Options with 'int' argument. */
1575 case TARGET_SO_DEBUG
:
1578 case TARGET_SO_REUSEADDR
:
1579 optname
= SO_REUSEADDR
;
1581 case TARGET_SO_TYPE
:
1584 case TARGET_SO_ERROR
:
1587 case TARGET_SO_DONTROUTE
:
1588 optname
= SO_DONTROUTE
;
1590 case TARGET_SO_BROADCAST
:
1591 optname
= SO_BROADCAST
;
1593 case TARGET_SO_SNDBUF
:
1594 optname
= SO_SNDBUF
;
1596 case TARGET_SO_RCVBUF
:
1597 optname
= SO_RCVBUF
;
1599 case TARGET_SO_KEEPALIVE
:
1600 optname
= SO_KEEPALIVE
;
1602 case TARGET_SO_OOBINLINE
:
1603 optname
= SO_OOBINLINE
;
1605 case TARGET_SO_NO_CHECK
:
1606 optname
= SO_NO_CHECK
;
1608 case TARGET_SO_PRIORITY
:
1609 optname
= SO_PRIORITY
;
1612 case TARGET_SO_BSDCOMPAT
:
1613 optname
= SO_BSDCOMPAT
;
1616 case TARGET_SO_PASSCRED
:
1617 optname
= SO_PASSCRED
;
1619 case TARGET_SO_TIMESTAMP
:
1620 optname
= SO_TIMESTAMP
;
1622 case TARGET_SO_RCVLOWAT
:
1623 optname
= SO_RCVLOWAT
;
1630 /* TCP options all take an 'int' value. */
1632 if (get_user_u32(len
, optlen
))
1633 return -TARGET_EFAULT
;
1635 return -TARGET_EINVAL
;
1637 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1643 if (put_user_u32(val
, optval_addr
))
1644 return -TARGET_EFAULT
;
1646 if (put_user_u8(val
, optval_addr
))
1647 return -TARGET_EFAULT
;
1649 if (put_user_u32(len
, optlen
))
1650 return -TARGET_EFAULT
;
1657 case IP_ROUTER_ALERT
:
1661 case IP_MTU_DISCOVER
:
1667 case IP_MULTICAST_TTL
:
1668 case IP_MULTICAST_LOOP
:
1669 if (get_user_u32(len
, optlen
))
1670 return -TARGET_EFAULT
;
1672 return -TARGET_EINVAL
;
1674 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1677 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1679 if (put_user_u32(len
, optlen
)
1680 || put_user_u8(val
, optval_addr
))
1681 return -TARGET_EFAULT
;
1683 if (len
> sizeof(int))
1685 if (put_user_u32(len
, optlen
)
1686 || put_user_u32(val
, optval_addr
))
1687 return -TARGET_EFAULT
;
1691 ret
= -TARGET_ENOPROTOOPT
;
1697 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1699 ret
= -TARGET_EOPNOTSUPP
;
1706 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1707 * other lock functions have a return code of 0 for failure.
1709 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1710 int count
, int copy
)
1712 struct target_iovec
*target_vec
;
1716 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1718 return -TARGET_EFAULT
;
1719 for(i
= 0;i
< count
; i
++) {
1720 base
= tswapal(target_vec
[i
].iov_base
);
1721 vec
[i
].iov_len
= tswapal(target_vec
[i
].iov_len
);
1722 if (vec
[i
].iov_len
!= 0) {
1723 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1724 /* Don't check lock_user return value. We must call writev even
1725 if a element has invalid base address. */
1727 /* zero length pointer is ignored */
1728 vec
[i
].iov_base
= NULL
;
1731 unlock_user (target_vec
, target_addr
, 0);
1735 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1736 int count
, int copy
)
1738 struct target_iovec
*target_vec
;
1742 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1744 return -TARGET_EFAULT
;
1745 for(i
= 0;i
< count
; i
++) {
1746 if (target_vec
[i
].iov_base
) {
1747 base
= tswapal(target_vec
[i
].iov_base
);
1748 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1751 unlock_user (target_vec
, target_addr
, 0);
1756 /* do_socket() Must return target values and target errnos. */
1757 static abi_long
do_socket(int domain
, int type
, int protocol
)
1759 #if defined(TARGET_MIPS)
1761 case TARGET_SOCK_DGRAM
:
1764 case TARGET_SOCK_STREAM
:
1767 case TARGET_SOCK_RAW
:
1770 case TARGET_SOCK_RDM
:
1773 case TARGET_SOCK_SEQPACKET
:
1774 type
= SOCK_SEQPACKET
;
1776 case TARGET_SOCK_PACKET
:
1781 if (domain
== PF_NETLINK
)
1782 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1783 return get_errno(socket(domain
, type
, protocol
));
1786 /* do_bind() Must return target values and target errnos. */
1787 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1793 if ((int)addrlen
< 0) {
1794 return -TARGET_EINVAL
;
1797 addr
= alloca(addrlen
+1);
1799 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1803 return get_errno(bind(sockfd
, addr
, addrlen
));
1806 /* do_connect() Must return target values and target errnos. */
1807 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1813 if ((int)addrlen
< 0) {
1814 return -TARGET_EINVAL
;
1817 addr
= alloca(addrlen
);
1819 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1823 return get_errno(connect(sockfd
, addr
, addrlen
));
1826 /* do_sendrecvmsg() Must return target values and target errnos. */
1827 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1828 int flags
, int send
)
1831 struct target_msghdr
*msgp
;
1835 abi_ulong target_vec
;
1838 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1842 return -TARGET_EFAULT
;
1843 if (msgp
->msg_name
) {
1844 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1845 msg
.msg_name
= alloca(msg
.msg_namelen
);
1846 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1849 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1853 msg
.msg_name
= NULL
;
1854 msg
.msg_namelen
= 0;
1856 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1857 msg
.msg_control
= alloca(msg
.msg_controllen
);
1858 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1860 count
= tswapal(msgp
->msg_iovlen
);
1861 vec
= alloca(count
* sizeof(struct iovec
));
1862 target_vec
= tswapal(msgp
->msg_iov
);
1863 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1864 msg
.msg_iovlen
= count
;
1868 ret
= target_to_host_cmsg(&msg
, msgp
);
1870 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1872 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1873 if (!is_error(ret
)) {
1875 ret
= host_to_target_cmsg(msgp
, &msg
);
1880 unlock_iovec(vec
, target_vec
, count
, !send
);
1881 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1885 /* do_accept() Must return target values and target errnos. */
1886 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1887 abi_ulong target_addrlen_addr
)
1893 if (target_addr
== 0)
1894 return get_errno(accept(fd
, NULL
, NULL
));
1896 /* linux returns EINVAL if addrlen pointer is invalid */
1897 if (get_user_u32(addrlen
, target_addrlen_addr
))
1898 return -TARGET_EINVAL
;
1900 if ((int)addrlen
< 0) {
1901 return -TARGET_EINVAL
;
1904 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1905 return -TARGET_EINVAL
;
1907 addr
= alloca(addrlen
);
1909 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1910 if (!is_error(ret
)) {
1911 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1912 if (put_user_u32(addrlen
, target_addrlen_addr
))
1913 ret
= -TARGET_EFAULT
;
1918 /* do_getpeername() Must return target values and target errnos. */
1919 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1920 abi_ulong target_addrlen_addr
)
1926 if (get_user_u32(addrlen
, target_addrlen_addr
))
1927 return -TARGET_EFAULT
;
1929 if ((int)addrlen
< 0) {
1930 return -TARGET_EINVAL
;
1933 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1934 return -TARGET_EFAULT
;
1936 addr
= alloca(addrlen
);
1938 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1939 if (!is_error(ret
)) {
1940 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1941 if (put_user_u32(addrlen
, target_addrlen_addr
))
1942 ret
= -TARGET_EFAULT
;
1947 /* do_getsockname() Must return target values and target errnos. */
1948 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1949 abi_ulong target_addrlen_addr
)
1955 if (get_user_u32(addrlen
, target_addrlen_addr
))
1956 return -TARGET_EFAULT
;
1958 if ((int)addrlen
< 0) {
1959 return -TARGET_EINVAL
;
1962 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1963 return -TARGET_EFAULT
;
1965 addr
= alloca(addrlen
);
1967 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1968 if (!is_error(ret
)) {
1969 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1970 if (put_user_u32(addrlen
, target_addrlen_addr
))
1971 ret
= -TARGET_EFAULT
;
1976 /* do_socketpair() Must return target values and target errnos. */
1977 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1978 abi_ulong target_tab_addr
)
1983 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1984 if (!is_error(ret
)) {
1985 if (put_user_s32(tab
[0], target_tab_addr
)
1986 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1987 ret
= -TARGET_EFAULT
;
1992 /* do_sendto() Must return target values and target errnos. */
1993 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1994 abi_ulong target_addr
, socklen_t addrlen
)
2000 if ((int)addrlen
< 0) {
2001 return -TARGET_EINVAL
;
2004 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2006 return -TARGET_EFAULT
;
2008 addr
= alloca(addrlen
);
2009 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2011 unlock_user(host_msg
, msg
, 0);
2014 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2016 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2018 unlock_user(host_msg
, msg
, 0);
2022 /* do_recvfrom() Must return target values and target errnos. */
2023 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2024 abi_ulong target_addr
,
2025 abi_ulong target_addrlen
)
2032 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2034 return -TARGET_EFAULT
;
2036 if (get_user_u32(addrlen
, target_addrlen
)) {
2037 ret
= -TARGET_EFAULT
;
2040 if ((int)addrlen
< 0) {
2041 ret
= -TARGET_EINVAL
;
2044 addr
= alloca(addrlen
);
2045 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2047 addr
= NULL
; /* To keep compiler quiet. */
2048 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2050 if (!is_error(ret
)) {
2052 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2053 if (put_user_u32(addrlen
, target_addrlen
)) {
2054 ret
= -TARGET_EFAULT
;
2058 unlock_user(host_msg
, msg
, len
);
2061 unlock_user(host_msg
, msg
, 0);
2066 #ifdef TARGET_NR_socketcall
2067 /* do_socketcall() Must return target values and target errnos. */
2068 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2071 const int n
= sizeof(abi_ulong
);
2076 abi_ulong domain
, type
, protocol
;
2078 if (get_user_ual(domain
, vptr
)
2079 || get_user_ual(type
, vptr
+ n
)
2080 || get_user_ual(protocol
, vptr
+ 2 * n
))
2081 return -TARGET_EFAULT
;
2083 ret
= do_socket(domain
, type
, protocol
);
2089 abi_ulong target_addr
;
2092 if (get_user_ual(sockfd
, vptr
)
2093 || get_user_ual(target_addr
, vptr
+ n
)
2094 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2095 return -TARGET_EFAULT
;
2097 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2100 case SOCKOP_connect
:
2103 abi_ulong target_addr
;
2106 if (get_user_ual(sockfd
, vptr
)
2107 || get_user_ual(target_addr
, vptr
+ n
)
2108 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2109 return -TARGET_EFAULT
;
2111 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2116 abi_ulong sockfd
, backlog
;
2118 if (get_user_ual(sockfd
, vptr
)
2119 || get_user_ual(backlog
, vptr
+ n
))
2120 return -TARGET_EFAULT
;
2122 ret
= get_errno(listen(sockfd
, backlog
));
2128 abi_ulong target_addr
, target_addrlen
;
2130 if (get_user_ual(sockfd
, vptr
)
2131 || get_user_ual(target_addr
, vptr
+ n
)
2132 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2133 return -TARGET_EFAULT
;
2135 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2138 case SOCKOP_getsockname
:
2141 abi_ulong target_addr
, target_addrlen
;
2143 if (get_user_ual(sockfd
, vptr
)
2144 || get_user_ual(target_addr
, vptr
+ n
)
2145 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2146 return -TARGET_EFAULT
;
2148 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2151 case SOCKOP_getpeername
:
2154 abi_ulong target_addr
, target_addrlen
;
2156 if (get_user_ual(sockfd
, vptr
)
2157 || get_user_ual(target_addr
, vptr
+ n
)
2158 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2159 return -TARGET_EFAULT
;
2161 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2164 case SOCKOP_socketpair
:
2166 abi_ulong domain
, type
, protocol
;
2169 if (get_user_ual(domain
, vptr
)
2170 || get_user_ual(type
, vptr
+ n
)
2171 || get_user_ual(protocol
, vptr
+ 2 * n
)
2172 || get_user_ual(tab
, vptr
+ 3 * n
))
2173 return -TARGET_EFAULT
;
2175 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2185 if (get_user_ual(sockfd
, vptr
)
2186 || get_user_ual(msg
, vptr
+ n
)
2187 || get_user_ual(len
, vptr
+ 2 * n
)
2188 || get_user_ual(flags
, vptr
+ 3 * n
))
2189 return -TARGET_EFAULT
;
2191 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2201 if (get_user_ual(sockfd
, vptr
)
2202 || get_user_ual(msg
, vptr
+ n
)
2203 || get_user_ual(len
, vptr
+ 2 * n
)
2204 || get_user_ual(flags
, vptr
+ 3 * n
))
2205 return -TARGET_EFAULT
;
2207 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2219 if (get_user_ual(sockfd
, vptr
)
2220 || get_user_ual(msg
, vptr
+ n
)
2221 || get_user_ual(len
, vptr
+ 2 * n
)
2222 || get_user_ual(flags
, vptr
+ 3 * n
)
2223 || get_user_ual(addr
, vptr
+ 4 * n
)
2224 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2225 return -TARGET_EFAULT
;
2227 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2230 case SOCKOP_recvfrom
:
2239 if (get_user_ual(sockfd
, vptr
)
2240 || get_user_ual(msg
, vptr
+ n
)
2241 || get_user_ual(len
, vptr
+ 2 * n
)
2242 || get_user_ual(flags
, vptr
+ 3 * n
)
2243 || get_user_ual(addr
, vptr
+ 4 * n
)
2244 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2245 return -TARGET_EFAULT
;
2247 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2250 case SOCKOP_shutdown
:
2252 abi_ulong sockfd
, how
;
2254 if (get_user_ual(sockfd
, vptr
)
2255 || get_user_ual(how
, vptr
+ n
))
2256 return -TARGET_EFAULT
;
2258 ret
= get_errno(shutdown(sockfd
, how
));
2261 case SOCKOP_sendmsg
:
2262 case SOCKOP_recvmsg
:
2265 abi_ulong target_msg
;
2268 if (get_user_ual(fd
, vptr
)
2269 || get_user_ual(target_msg
, vptr
+ n
)
2270 || get_user_ual(flags
, vptr
+ 2 * n
))
2271 return -TARGET_EFAULT
;
2273 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2274 (num
== SOCKOP_sendmsg
));
2277 case SOCKOP_setsockopt
:
2285 if (get_user_ual(sockfd
, vptr
)
2286 || get_user_ual(level
, vptr
+ n
)
2287 || get_user_ual(optname
, vptr
+ 2 * n
)
2288 || get_user_ual(optval
, vptr
+ 3 * n
)
2289 || get_user_ual(optlen
, vptr
+ 4 * n
))
2290 return -TARGET_EFAULT
;
2292 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2295 case SOCKOP_getsockopt
:
2303 if (get_user_ual(sockfd
, vptr
)
2304 || get_user_ual(level
, vptr
+ n
)
2305 || get_user_ual(optname
, vptr
+ 2 * n
)
2306 || get_user_ual(optval
, vptr
+ 3 * n
)
2307 || get_user_ual(optlen
, vptr
+ 4 * n
))
2308 return -TARGET_EFAULT
;
2310 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2314 gemu_log("Unsupported socketcall: %d\n", num
);
2315 ret
= -TARGET_ENOSYS
;
2322 #define N_SHM_REGIONS 32
2324 static struct shm_region
{
2327 } shm_regions
[N_SHM_REGIONS
];
2329 struct target_ipc_perm
2336 unsigned short int mode
;
2337 unsigned short int __pad1
;
2338 unsigned short int __seq
;
2339 unsigned short int __pad2
;
2340 abi_ulong __unused1
;
2341 abi_ulong __unused2
;
2344 struct target_semid_ds
2346 struct target_ipc_perm sem_perm
;
2347 abi_ulong sem_otime
;
2348 abi_ulong __unused1
;
2349 abi_ulong sem_ctime
;
2350 abi_ulong __unused2
;
2351 abi_ulong sem_nsems
;
2352 abi_ulong __unused3
;
2353 abi_ulong __unused4
;
2356 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2357 abi_ulong target_addr
)
2359 struct target_ipc_perm
*target_ip
;
2360 struct target_semid_ds
*target_sd
;
2362 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2363 return -TARGET_EFAULT
;
2364 target_ip
= &(target_sd
->sem_perm
);
2365 host_ip
->__key
= tswapal(target_ip
->__key
);
2366 host_ip
->uid
= tswapal(target_ip
->uid
);
2367 host_ip
->gid
= tswapal(target_ip
->gid
);
2368 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2369 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2370 host_ip
->mode
= tswap16(target_ip
->mode
);
2371 unlock_user_struct(target_sd
, target_addr
, 0);
2375 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2376 struct ipc_perm
*host_ip
)
2378 struct target_ipc_perm
*target_ip
;
2379 struct target_semid_ds
*target_sd
;
2381 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2382 return -TARGET_EFAULT
;
2383 target_ip
= &(target_sd
->sem_perm
);
2384 target_ip
->__key
= tswapal(host_ip
->__key
);
2385 target_ip
->uid
= tswapal(host_ip
->uid
);
2386 target_ip
->gid
= tswapal(host_ip
->gid
);
2387 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2388 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2389 target_ip
->mode
= tswap16(host_ip
->mode
);
2390 unlock_user_struct(target_sd
, target_addr
, 1);
2394 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2395 abi_ulong target_addr
)
2397 struct target_semid_ds
*target_sd
;
2399 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2400 return -TARGET_EFAULT
;
2401 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2402 return -TARGET_EFAULT
;
2403 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2404 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2405 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2406 unlock_user_struct(target_sd
, target_addr
, 0);
2410 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2411 struct semid_ds
*host_sd
)
2413 struct target_semid_ds
*target_sd
;
2415 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2416 return -TARGET_EFAULT
;
2417 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2418 return -TARGET_EFAULT
;
2419 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2420 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2421 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2422 unlock_user_struct(target_sd
, target_addr
, 1);
2426 struct target_seminfo
{
2439 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2440 struct seminfo
*host_seminfo
)
2442 struct target_seminfo
*target_seminfo
;
2443 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2444 return -TARGET_EFAULT
;
2445 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2446 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2447 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2448 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2449 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2450 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2451 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2452 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2453 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2454 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2455 unlock_user_struct(target_seminfo
, target_addr
, 1);
2461 struct semid_ds
*buf
;
2462 unsigned short *array
;
2463 struct seminfo
*__buf
;
2466 union target_semun
{
2473 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2474 abi_ulong target_addr
)
2477 unsigned short *array
;
2479 struct semid_ds semid_ds
;
2482 semun
.buf
= &semid_ds
;
2484 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2486 return get_errno(ret
);
2488 nsems
= semid_ds
.sem_nsems
;
2490 *host_array
= malloc(nsems
*sizeof(unsigned short));
2491 array
= lock_user(VERIFY_READ
, target_addr
,
2492 nsems
*sizeof(unsigned short), 1);
2494 return -TARGET_EFAULT
;
2496 for(i
=0; i
<nsems
; i
++) {
2497 __get_user((*host_array
)[i
], &array
[i
]);
2499 unlock_user(array
, target_addr
, 0);
2504 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2505 unsigned short **host_array
)
2508 unsigned short *array
;
2510 struct semid_ds semid_ds
;
2513 semun
.buf
= &semid_ds
;
2515 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2517 return get_errno(ret
);
2519 nsems
= semid_ds
.sem_nsems
;
2521 array
= lock_user(VERIFY_WRITE
, target_addr
,
2522 nsems
*sizeof(unsigned short), 0);
2524 return -TARGET_EFAULT
;
2526 for(i
=0; i
<nsems
; i
++) {
2527 __put_user((*host_array
)[i
], &array
[i
]);
2530 unlock_user(array
, target_addr
, 1);
2535 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2536 union target_semun target_su
)
2539 struct semid_ds dsarg
;
2540 unsigned short *array
= NULL
;
2541 struct seminfo seminfo
;
2542 abi_long ret
= -TARGET_EINVAL
;
2549 arg
.val
= tswap32(target_su
.val
);
2550 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2551 target_su
.val
= tswap32(arg
.val
);
2555 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2559 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2560 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2567 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2571 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2572 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2578 arg
.__buf
= &seminfo
;
2579 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2580 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2588 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2595 struct target_sembuf
{
2596 unsigned short sem_num
;
2601 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2602 abi_ulong target_addr
,
2605 struct target_sembuf
*target_sembuf
;
2608 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2609 nsops
*sizeof(struct target_sembuf
), 1);
2611 return -TARGET_EFAULT
;
2613 for(i
=0; i
<nsops
; i
++) {
2614 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2615 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2616 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2619 unlock_user(target_sembuf
, target_addr
, 0);
2624 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2626 struct sembuf sops
[nsops
];
2628 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2629 return -TARGET_EFAULT
;
2631 return semop(semid
, sops
, nsops
);
2634 struct target_msqid_ds
2636 struct target_ipc_perm msg_perm
;
2637 abi_ulong msg_stime
;
2638 #if TARGET_ABI_BITS == 32
2639 abi_ulong __unused1
;
2641 abi_ulong msg_rtime
;
2642 #if TARGET_ABI_BITS == 32
2643 abi_ulong __unused2
;
2645 abi_ulong msg_ctime
;
2646 #if TARGET_ABI_BITS == 32
2647 abi_ulong __unused3
;
2649 abi_ulong __msg_cbytes
;
2651 abi_ulong msg_qbytes
;
2652 abi_ulong msg_lspid
;
2653 abi_ulong msg_lrpid
;
2654 abi_ulong __unused4
;
2655 abi_ulong __unused5
;
2658 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2659 abi_ulong target_addr
)
2661 struct target_msqid_ds
*target_md
;
2663 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2664 return -TARGET_EFAULT
;
2665 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2666 return -TARGET_EFAULT
;
2667 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2668 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2669 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2670 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2671 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2672 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2673 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2674 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2675 unlock_user_struct(target_md
, target_addr
, 0);
2679 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2680 struct msqid_ds
*host_md
)
2682 struct target_msqid_ds
*target_md
;
2684 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2685 return -TARGET_EFAULT
;
2686 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2687 return -TARGET_EFAULT
;
2688 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2689 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2690 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2691 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2692 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2693 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2694 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2695 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2696 unlock_user_struct(target_md
, target_addr
, 1);
2700 struct target_msginfo
{
2708 unsigned short int msgseg
;
2711 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2712 struct msginfo
*host_msginfo
)
2714 struct target_msginfo
*target_msginfo
;
2715 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2716 return -TARGET_EFAULT
;
2717 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2718 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2719 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2720 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2721 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2722 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2723 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2724 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2725 unlock_user_struct(target_msginfo
, target_addr
, 1);
2729 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2731 struct msqid_ds dsarg
;
2732 struct msginfo msginfo
;
2733 abi_long ret
= -TARGET_EINVAL
;
2741 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2742 return -TARGET_EFAULT
;
2743 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2744 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2745 return -TARGET_EFAULT
;
2748 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2752 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2753 if (host_to_target_msginfo(ptr
, &msginfo
))
2754 return -TARGET_EFAULT
;
2761 struct target_msgbuf
{
2766 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2767 unsigned int msgsz
, int msgflg
)
2769 struct target_msgbuf
*target_mb
;
2770 struct msgbuf
*host_mb
;
2773 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2774 return -TARGET_EFAULT
;
2775 host_mb
= malloc(msgsz
+sizeof(long));
2776 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2777 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2778 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2780 unlock_user_struct(target_mb
, msgp
, 0);
2785 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2786 unsigned int msgsz
, abi_long msgtyp
,
2789 struct target_msgbuf
*target_mb
;
2791 struct msgbuf
*host_mb
;
2794 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2795 return -TARGET_EFAULT
;
2797 host_mb
= malloc(msgsz
+sizeof(long));
2798 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapal(msgtyp
), msgflg
));
2801 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2802 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2803 if (!target_mtext
) {
2804 ret
= -TARGET_EFAULT
;
2807 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2808 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2811 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2816 unlock_user_struct(target_mb
, msgp
, 1);
2820 struct target_shmid_ds
2822 struct target_ipc_perm shm_perm
;
2823 abi_ulong shm_segsz
;
2824 abi_ulong shm_atime
;
2825 #if TARGET_ABI_BITS == 32
2826 abi_ulong __unused1
;
2828 abi_ulong shm_dtime
;
2829 #if TARGET_ABI_BITS == 32
2830 abi_ulong __unused2
;
2832 abi_ulong shm_ctime
;
2833 #if TARGET_ABI_BITS == 32
2834 abi_ulong __unused3
;
2838 abi_ulong shm_nattch
;
2839 unsigned long int __unused4
;
2840 unsigned long int __unused5
;
2843 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2844 abi_ulong target_addr
)
2846 struct target_shmid_ds
*target_sd
;
2848 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2849 return -TARGET_EFAULT
;
2850 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2851 return -TARGET_EFAULT
;
2852 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2853 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2854 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2855 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2856 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2857 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2858 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2859 unlock_user_struct(target_sd
, target_addr
, 0);
2863 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2864 struct shmid_ds
*host_sd
)
2866 struct target_shmid_ds
*target_sd
;
2868 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2869 return -TARGET_EFAULT
;
2870 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2871 return -TARGET_EFAULT
;
2872 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2873 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2874 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2875 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2876 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2877 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2878 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2879 unlock_user_struct(target_sd
, target_addr
, 1);
2883 struct target_shminfo
{
2891 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2892 struct shminfo
*host_shminfo
)
2894 struct target_shminfo
*target_shminfo
;
2895 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2896 return -TARGET_EFAULT
;
2897 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2898 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2899 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2900 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2901 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2902 unlock_user_struct(target_shminfo
, target_addr
, 1);
2906 struct target_shm_info
{
2911 abi_ulong swap_attempts
;
2912 abi_ulong swap_successes
;
2915 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2916 struct shm_info
*host_shm_info
)
2918 struct target_shm_info
*target_shm_info
;
2919 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2920 return -TARGET_EFAULT
;
2921 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2922 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2923 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2924 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2925 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2926 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2927 unlock_user_struct(target_shm_info
, target_addr
, 1);
2931 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2933 struct shmid_ds dsarg
;
2934 struct shminfo shminfo
;
2935 struct shm_info shm_info
;
2936 abi_long ret
= -TARGET_EINVAL
;
2944 if (target_to_host_shmid_ds(&dsarg
, buf
))
2945 return -TARGET_EFAULT
;
2946 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2947 if (host_to_target_shmid_ds(buf
, &dsarg
))
2948 return -TARGET_EFAULT
;
2951 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2952 if (host_to_target_shminfo(buf
, &shminfo
))
2953 return -TARGET_EFAULT
;
2956 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2957 if (host_to_target_shm_info(buf
, &shm_info
))
2958 return -TARGET_EFAULT
;
2963 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2970 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2974 struct shmid_ds shm_info
;
2977 /* find out the length of the shared memory segment */
2978 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2979 if (is_error(ret
)) {
2980 /* can't get length, bail out */
2987 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2989 abi_ulong mmap_start
;
2991 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2993 if (mmap_start
== -1) {
2995 host_raddr
= (void *)-1;
2997 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3000 if (host_raddr
== (void *)-1) {
3002 return get_errno((long)host_raddr
);
3004 raddr
=h2g((unsigned long)host_raddr
);
3006 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3007 PAGE_VALID
| PAGE_READ
|
3008 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3010 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3011 if (shm_regions
[i
].start
== 0) {
3012 shm_regions
[i
].start
= raddr
;
3013 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3023 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3027 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3028 if (shm_regions
[i
].start
== shmaddr
) {
3029 shm_regions
[i
].start
= 0;
3030 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3035 return get_errno(shmdt(g2h(shmaddr
)));
3038 #ifdef TARGET_NR_ipc
3039 /* ??? This only works with linear mappings. */
3040 /* do_ipc() must return target values and target errnos. */
3041 static abi_long
do_ipc(unsigned int call
, int first
,
3042 int second
, int third
,
3043 abi_long ptr
, abi_long fifth
)
3048 version
= call
>> 16;
3053 ret
= do_semop(first
, ptr
, second
);
3057 ret
= get_errno(semget(first
, second
, third
));
3061 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3065 ret
= get_errno(msgget(first
, second
));
3069 ret
= do_msgsnd(first
, ptr
, second
, third
);
3073 ret
= do_msgctl(first
, second
, ptr
);
3080 struct target_ipc_kludge
{
3085 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3086 ret
= -TARGET_EFAULT
;
3090 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
3092 unlock_user_struct(tmp
, ptr
, 0);
3096 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3105 raddr
= do_shmat(first
, ptr
, second
);
3106 if (is_error(raddr
))
3107 return get_errno(raddr
);
3108 if (put_user_ual(raddr
, third
))
3109 return -TARGET_EFAULT
;
3113 ret
= -TARGET_EINVAL
;
3118 ret
= do_shmdt(ptr
);
3122 /* IPC_* flag values are the same on all linux platforms */
3123 ret
= get_errno(shmget(first
, second
, third
));
3126 /* IPC_* and SHM_* command values are the same on all linux platforms */
3128 ret
= do_shmctl(first
, second
, third
);
3131 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3132 ret
= -TARGET_ENOSYS
;
3139 /* kernel structure types definitions */
3141 #define STRUCT(name, ...) STRUCT_ ## name,
3142 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3144 #include "syscall_types.h"
3147 #undef STRUCT_SPECIAL
3149 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3150 #define STRUCT_SPECIAL(name)
3151 #include "syscall_types.h"
3153 #undef STRUCT_SPECIAL
3155 typedef struct IOCTLEntry IOCTLEntry
;
3157 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3158 int fd
, abi_long cmd
, abi_long arg
);
3161 unsigned int target_cmd
;
3162 unsigned int host_cmd
;
3165 do_ioctl_fn
*do_ioctl
;
3166 const argtype arg_type
[5];
3169 #define IOC_R 0x0001
3170 #define IOC_W 0x0002
3171 #define IOC_RW (IOC_R | IOC_W)
3173 #define MAX_STRUCT_SIZE 4096
3175 #ifdef CONFIG_FIEMAP
3176 /* So fiemap access checks don't overflow on 32 bit systems.
3177 * This is very slightly smaller than the limit imposed by
3178 * the underlying kernel.
3180 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3181 / sizeof(struct fiemap_extent))
3183 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3184 int fd
, abi_long cmd
, abi_long arg
)
3186 /* The parameter for this ioctl is a struct fiemap followed
3187 * by an array of struct fiemap_extent whose size is set
3188 * in fiemap->fm_extent_count. The array is filled in by the
3191 int target_size_in
, target_size_out
;
3193 const argtype
*arg_type
= ie
->arg_type
;
3194 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3197 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3201 assert(arg_type
[0] == TYPE_PTR
);
3202 assert(ie
->access
== IOC_RW
);
3204 target_size_in
= thunk_type_size(arg_type
, 0);
3205 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3207 return -TARGET_EFAULT
;
3209 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3210 unlock_user(argptr
, arg
, 0);
3211 fm
= (struct fiemap
*)buf_temp
;
3212 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3213 return -TARGET_EINVAL
;
3216 outbufsz
= sizeof (*fm
) +
3217 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3219 if (outbufsz
> MAX_STRUCT_SIZE
) {
3220 /* We can't fit all the extents into the fixed size buffer.
3221 * Allocate one that is large enough and use it instead.
3223 fm
= malloc(outbufsz
);
3225 return -TARGET_ENOMEM
;
3227 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3230 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3231 if (!is_error(ret
)) {
3232 target_size_out
= target_size_in
;
3233 /* An extent_count of 0 means we were only counting the extents
3234 * so there are no structs to copy
3236 if (fm
->fm_extent_count
!= 0) {
3237 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3239 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3241 ret
= -TARGET_EFAULT
;
3243 /* Convert the struct fiemap */
3244 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3245 if (fm
->fm_extent_count
!= 0) {
3246 p
= argptr
+ target_size_in
;
3247 /* ...and then all the struct fiemap_extents */
3248 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3249 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3254 unlock_user(argptr
, arg
, target_size_out
);
3264 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3265 int fd
, abi_long cmd
, abi_long arg
)
3267 const argtype
*arg_type
= ie
->arg_type
;
3271 struct ifconf
*host_ifconf
;
3273 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3274 int target_ifreq_size
;
3279 abi_long target_ifc_buf
;
3283 assert(arg_type
[0] == TYPE_PTR
);
3284 assert(ie
->access
== IOC_RW
);
3287 target_size
= thunk_type_size(arg_type
, 0);
3289 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3291 return -TARGET_EFAULT
;
3292 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3293 unlock_user(argptr
, arg
, 0);
3295 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3296 target_ifc_len
= host_ifconf
->ifc_len
;
3297 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3299 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3300 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3301 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3303 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3304 if (outbufsz
> MAX_STRUCT_SIZE
) {
3305 /* We can't fit all the extents into the fixed size buffer.
3306 * Allocate one that is large enough and use it instead.
3308 host_ifconf
= malloc(outbufsz
);
3310 return -TARGET_ENOMEM
;
3312 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3315 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3317 host_ifconf
->ifc_len
= host_ifc_len
;
3318 host_ifconf
->ifc_buf
= host_ifc_buf
;
3320 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3321 if (!is_error(ret
)) {
3322 /* convert host ifc_len to target ifc_len */
3324 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3325 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3326 host_ifconf
->ifc_len
= target_ifc_len
;
3328 /* restore target ifc_buf */
3330 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3332 /* copy struct ifconf to target user */
3334 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3336 return -TARGET_EFAULT
;
3337 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3338 unlock_user(argptr
, arg
, target_size
);
3340 /* copy ifreq[] to target user */
3342 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3343 for (i
= 0; i
< nb_ifreq
; i
++) {
3344 thunk_convert(argptr
+ i
* target_ifreq_size
,
3345 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3346 ifreq_arg_type
, THUNK_TARGET
);
3348 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3358 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3359 abi_long cmd
, abi_long arg
)
3362 struct dm_ioctl
*host_dm
;
3363 abi_long guest_data
;
3364 uint32_t guest_data_size
;
3366 const argtype
*arg_type
= ie
->arg_type
;
3368 void *big_buf
= NULL
;
3372 target_size
= thunk_type_size(arg_type
, 0);
3373 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3375 ret
= -TARGET_EFAULT
;
3378 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3379 unlock_user(argptr
, arg
, 0);
3381 /* buf_temp is too small, so fetch things into a bigger buffer */
3382 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3383 memcpy(big_buf
, buf_temp
, target_size
);
3387 guest_data
= arg
+ host_dm
->data_start
;
3388 if ((guest_data
- arg
) < 0) {
3392 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3393 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3395 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3396 switch (ie
->host_cmd
) {
3398 case DM_LIST_DEVICES
:
3401 case DM_DEV_SUSPEND
:
3404 case DM_TABLE_STATUS
:
3405 case DM_TABLE_CLEAR
:
3407 case DM_LIST_VERSIONS
:
3411 case DM_DEV_SET_GEOMETRY
:
3412 /* data contains only strings */
3413 memcpy(host_data
, argptr
, guest_data_size
);
3416 memcpy(host_data
, argptr
, guest_data_size
);
3417 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3421 void *gspec
= argptr
;
3422 void *cur_data
= host_data
;
3423 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3424 int spec_size
= thunk_type_size(arg_type
, 0);
3427 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3428 struct dm_target_spec
*spec
= cur_data
;
3432 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3433 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3435 spec
->next
= sizeof(*spec
) + slen
;
3436 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3438 cur_data
+= spec
->next
;
3443 ret
= -TARGET_EINVAL
;
3446 unlock_user(argptr
, guest_data
, 0);
3448 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3449 if (!is_error(ret
)) {
3450 guest_data
= arg
+ host_dm
->data_start
;
3451 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3452 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3453 switch (ie
->host_cmd
) {
3458 case DM_DEV_SUSPEND
:
3461 case DM_TABLE_CLEAR
:
3463 case DM_DEV_SET_GEOMETRY
:
3464 /* no return data */
3466 case DM_LIST_DEVICES
:
3468 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3469 uint32_t remaining_data
= guest_data_size
;
3470 void *cur_data
= argptr
;
3471 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3472 int nl_size
= 12; /* can't use thunk_size due to alignment */
3475 uint32_t next
= nl
->next
;
3477 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3479 if (remaining_data
< nl
->next
) {
3480 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3483 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3484 strcpy(cur_data
+ nl_size
, nl
->name
);
3485 cur_data
+= nl
->next
;
3486 remaining_data
-= nl
->next
;
3490 nl
= (void*)nl
+ next
;
3495 case DM_TABLE_STATUS
:
3497 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3498 void *cur_data
= argptr
;
3499 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3500 int spec_size
= thunk_type_size(arg_type
, 0);
3503 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3504 uint32_t next
= spec
->next
;
3505 int slen
= strlen((char*)&spec
[1]) + 1;
3506 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3507 if (guest_data_size
< spec
->next
) {
3508 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3511 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3512 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3513 cur_data
= argptr
+ spec
->next
;
3514 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3520 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3521 int count
= *(uint32_t*)hdata
;
3522 uint64_t *hdev
= hdata
+ 8;
3523 uint64_t *gdev
= argptr
+ 8;
3526 *(uint32_t*)argptr
= tswap32(count
);
3527 for (i
= 0; i
< count
; i
++) {
3528 *gdev
= tswap64(*hdev
);
3534 case DM_LIST_VERSIONS
:
3536 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3537 uint32_t remaining_data
= guest_data_size
;
3538 void *cur_data
= argptr
;
3539 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3540 int vers_size
= thunk_type_size(arg_type
, 0);
3543 uint32_t next
= vers
->next
;
3545 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3547 if (remaining_data
< vers
->next
) {
3548 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3551 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3552 strcpy(cur_data
+ vers_size
, vers
->name
);
3553 cur_data
+= vers
->next
;
3554 remaining_data
-= vers
->next
;
3558 vers
= (void*)vers
+ next
;
3563 ret
= -TARGET_EINVAL
;
3566 unlock_user(argptr
, guest_data
, guest_data_size
);
3568 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3570 ret
= -TARGET_EFAULT
;
3573 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3574 unlock_user(argptr
, arg
, target_size
);
3583 static IOCTLEntry ioctl_entries
[] = {
3584 #define IOCTL(cmd, access, ...) \
3585 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3586 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3587 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3592 /* ??? Implement proper locking for ioctls. */
3593 /* do_ioctl() Must return target values and target errnos. */
3594 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3596 const IOCTLEntry
*ie
;
3597 const argtype
*arg_type
;
3599 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3605 if (ie
->target_cmd
== 0) {
3606 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3607 return -TARGET_ENOSYS
;
3609 if (ie
->target_cmd
== cmd
)
3613 arg_type
= ie
->arg_type
;
3615 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3618 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3621 switch(arg_type
[0]) {
3624 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3629 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3633 target_size
= thunk_type_size(arg_type
, 0);
3634 switch(ie
->access
) {
3636 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3637 if (!is_error(ret
)) {
3638 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3640 return -TARGET_EFAULT
;
3641 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3642 unlock_user(argptr
, arg
, target_size
);
3646 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3648 return -TARGET_EFAULT
;
3649 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3650 unlock_user(argptr
, arg
, 0);
3651 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3655 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3657 return -TARGET_EFAULT
;
3658 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3659 unlock_user(argptr
, arg
, 0);
3660 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3661 if (!is_error(ret
)) {
3662 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3664 return -TARGET_EFAULT
;
3665 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3666 unlock_user(argptr
, arg
, target_size
);
3672 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3673 (long)cmd
, arg_type
[0]);
3674 ret
= -TARGET_ENOSYS
;
3680 static const bitmask_transtbl iflag_tbl
[] = {
3681 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3682 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3683 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3684 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3685 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3686 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3687 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3688 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3689 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3690 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3691 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3692 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3693 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3694 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3698 static const bitmask_transtbl oflag_tbl
[] = {
3699 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3700 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3701 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3702 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3703 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3704 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3705 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3706 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3707 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3708 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3709 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3710 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3711 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3712 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3713 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3714 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3715 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3716 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3717 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3718 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3719 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3720 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3721 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3722 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3726 static const bitmask_transtbl cflag_tbl
[] = {
3727 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3728 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3729 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3730 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3731 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3732 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3733 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3734 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3735 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3736 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3737 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3738 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3739 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3740 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3741 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3742 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3743 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3744 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3745 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3746 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3747 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3748 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3749 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3750 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3751 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3752 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3753 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3754 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3755 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3756 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3757 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3761 static const bitmask_transtbl lflag_tbl
[] = {
3762 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3763 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3764 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3765 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3766 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3767 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3768 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3769 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3770 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3771 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3772 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3773 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3774 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3775 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3776 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3780 static void target_to_host_termios (void *dst
, const void *src
)
3782 struct host_termios
*host
= dst
;
3783 const struct target_termios
*target
= src
;
3786 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3788 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3790 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3792 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3793 host
->c_line
= target
->c_line
;
3795 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3796 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3797 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3798 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3799 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3800 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3801 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3802 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3803 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3804 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3805 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3806 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3807 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3808 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3809 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3810 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3811 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3812 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3815 static void host_to_target_termios (void *dst
, const void *src
)
3817 struct target_termios
*target
= dst
;
3818 const struct host_termios
*host
= src
;
3821 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3823 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3825 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3827 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3828 target
->c_line
= host
->c_line
;
3830 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3831 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3832 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3833 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3834 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3835 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3836 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3837 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3838 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3839 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3840 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3841 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3842 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3843 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3844 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3845 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3846 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3847 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3850 static const StructEntry struct_termios_def
= {
3851 .convert
= { host_to_target_termios
, target_to_host_termios
},
3852 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3853 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3856 static bitmask_transtbl mmap_flags_tbl
[] = {
3857 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3858 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3859 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3860 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3861 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3862 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3863 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3864 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3868 #if defined(TARGET_I386)
3870 /* NOTE: there is really one LDT for all the threads */
3871 static uint8_t *ldt_table
;
3873 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3880 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3881 if (size
> bytecount
)
3883 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3885 return -TARGET_EFAULT
;
3886 /* ??? Should this by byteswapped? */
3887 memcpy(p
, ldt_table
, size
);
3888 unlock_user(p
, ptr
, size
);
3892 /* XXX: add locking support */
3893 static abi_long
write_ldt(CPUX86State
*env
,
3894 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3896 struct target_modify_ldt_ldt_s ldt_info
;
3897 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3898 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3899 int seg_not_present
, useable
, lm
;
3900 uint32_t *lp
, entry_1
, entry_2
;
3902 if (bytecount
!= sizeof(ldt_info
))
3903 return -TARGET_EINVAL
;
3904 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3905 return -TARGET_EFAULT
;
3906 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3907 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
3908 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3909 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3910 unlock_user_struct(target_ldt_info
, ptr
, 0);
3912 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3913 return -TARGET_EINVAL
;
3914 seg_32bit
= ldt_info
.flags
& 1;
3915 contents
= (ldt_info
.flags
>> 1) & 3;
3916 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3917 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3918 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3919 useable
= (ldt_info
.flags
>> 6) & 1;
3923 lm
= (ldt_info
.flags
>> 7) & 1;
3925 if (contents
== 3) {
3927 return -TARGET_EINVAL
;
3928 if (seg_not_present
== 0)
3929 return -TARGET_EINVAL
;
3931 /* allocate the LDT */
3933 env
->ldt
.base
= target_mmap(0,
3934 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3935 PROT_READ
|PROT_WRITE
,
3936 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3937 if (env
->ldt
.base
== -1)
3938 return -TARGET_ENOMEM
;
3939 memset(g2h(env
->ldt
.base
), 0,
3940 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3941 env
->ldt
.limit
= 0xffff;
3942 ldt_table
= g2h(env
->ldt
.base
);
3945 /* NOTE: same code as Linux kernel */
3946 /* Allow LDTs to be cleared by the user. */
3947 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3950 read_exec_only
== 1 &&
3952 limit_in_pages
== 0 &&
3953 seg_not_present
== 1 &&
3961 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3962 (ldt_info
.limit
& 0x0ffff);
3963 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3964 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3965 (ldt_info
.limit
& 0xf0000) |
3966 ((read_exec_only
^ 1) << 9) |
3968 ((seg_not_present
^ 1) << 15) |
3970 (limit_in_pages
<< 23) |
3974 entry_2
|= (useable
<< 20);
3976 /* Install the new entry ... */
3978 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3979 lp
[0] = tswap32(entry_1
);
3980 lp
[1] = tswap32(entry_2
);
3984 /* specific and weird i386 syscalls */
3985 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3986 unsigned long bytecount
)
3992 ret
= read_ldt(ptr
, bytecount
);
3995 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3998 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4001 ret
= -TARGET_ENOSYS
;
4007 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4008 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4010 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4011 struct target_modify_ldt_ldt_s ldt_info
;
4012 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4013 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4014 int seg_not_present
, useable
, lm
;
4015 uint32_t *lp
, entry_1
, entry_2
;
4018 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4019 if (!target_ldt_info
)
4020 return -TARGET_EFAULT
;
4021 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4022 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4023 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4024 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4025 if (ldt_info
.entry_number
== -1) {
4026 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4027 if (gdt_table
[i
] == 0) {
4028 ldt_info
.entry_number
= i
;
4029 target_ldt_info
->entry_number
= tswap32(i
);
4034 unlock_user_struct(target_ldt_info
, ptr
, 1);
4036 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4037 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4038 return -TARGET_EINVAL
;
4039 seg_32bit
= ldt_info
.flags
& 1;
4040 contents
= (ldt_info
.flags
>> 1) & 3;
4041 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4042 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4043 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4044 useable
= (ldt_info
.flags
>> 6) & 1;
4048 lm
= (ldt_info
.flags
>> 7) & 1;
4051 if (contents
== 3) {
4052 if (seg_not_present
== 0)
4053 return -TARGET_EINVAL
;
4056 /* NOTE: same code as Linux kernel */
4057 /* Allow LDTs to be cleared by the user. */
4058 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4059 if ((contents
== 0 &&
4060 read_exec_only
== 1 &&
4062 limit_in_pages
== 0 &&
4063 seg_not_present
== 1 &&
4071 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4072 (ldt_info
.limit
& 0x0ffff);
4073 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4074 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4075 (ldt_info
.limit
& 0xf0000) |
4076 ((read_exec_only
^ 1) << 9) |
4078 ((seg_not_present
^ 1) << 15) |
4080 (limit_in_pages
<< 23) |
4085 /* Install the new entry ... */
4087 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4088 lp
[0] = tswap32(entry_1
);
4089 lp
[1] = tswap32(entry_2
);
4093 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4095 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4096 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4097 uint32_t base_addr
, limit
, flags
;
4098 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4099 int seg_not_present
, useable
, lm
;
4100 uint32_t *lp
, entry_1
, entry_2
;
4102 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4103 if (!target_ldt_info
)
4104 return -TARGET_EFAULT
;
4105 idx
= tswap32(target_ldt_info
->entry_number
);
4106 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4107 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4108 unlock_user_struct(target_ldt_info
, ptr
, 1);
4109 return -TARGET_EINVAL
;
4111 lp
= (uint32_t *)(gdt_table
+ idx
);
4112 entry_1
= tswap32(lp
[0]);
4113 entry_2
= tswap32(lp
[1]);
4115 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4116 contents
= (entry_2
>> 10) & 3;
4117 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4118 seg_32bit
= (entry_2
>> 22) & 1;
4119 limit_in_pages
= (entry_2
>> 23) & 1;
4120 useable
= (entry_2
>> 20) & 1;
4124 lm
= (entry_2
>> 21) & 1;
4126 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4127 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4128 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4129 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4130 base_addr
= (entry_1
>> 16) |
4131 (entry_2
& 0xff000000) |
4132 ((entry_2
& 0xff) << 16);
4133 target_ldt_info
->base_addr
= tswapal(base_addr
);
4134 target_ldt_info
->limit
= tswap32(limit
);
4135 target_ldt_info
->flags
= tswap32(flags
);
4136 unlock_user_struct(target_ldt_info
, ptr
, 1);
4139 #endif /* TARGET_I386 && TARGET_ABI32 */
4141 #ifndef TARGET_ABI32
4142 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4149 case TARGET_ARCH_SET_GS
:
4150 case TARGET_ARCH_SET_FS
:
4151 if (code
== TARGET_ARCH_SET_GS
)
4155 cpu_x86_load_seg(env
, idx
, 0);
4156 env
->segs
[idx
].base
= addr
;
4158 case TARGET_ARCH_GET_GS
:
4159 case TARGET_ARCH_GET_FS
:
4160 if (code
== TARGET_ARCH_GET_GS
)
4164 val
= env
->segs
[idx
].base
;
4165 if (put_user(val
, addr
, abi_ulong
))
4166 ret
= -TARGET_EFAULT
;
4169 ret
= -TARGET_EINVAL
;
4176 #endif /* defined(TARGET_I386) */
4178 #define NEW_STACK_SIZE 0x40000
4180 #if defined(CONFIG_USE_NPTL)
4182 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4185 pthread_mutex_t mutex
;
4186 pthread_cond_t cond
;
4189 abi_ulong child_tidptr
;
4190 abi_ulong parent_tidptr
;
4194 static void *clone_func(void *arg
)
4196 new_thread_info
*info
= arg
;
4202 ts
= (TaskState
*)thread_env
->opaque
;
4203 info
->tid
= gettid();
4204 env
->host_tid
= info
->tid
;
4206 if (info
->child_tidptr
)
4207 put_user_u32(info
->tid
, info
->child_tidptr
);
4208 if (info
->parent_tidptr
)
4209 put_user_u32(info
->tid
, info
->parent_tidptr
);
4210 /* Enable signals. */
4211 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4212 /* Signal to the parent that we're ready. */
4213 pthread_mutex_lock(&info
->mutex
);
4214 pthread_cond_broadcast(&info
->cond
);
4215 pthread_mutex_unlock(&info
->mutex
);
4216 /* Wait until the parent has finshed initializing the tls state. */
4217 pthread_mutex_lock(&clone_lock
);
4218 pthread_mutex_unlock(&clone_lock
);
4225 static int clone_func(void *arg
)
4227 CPUArchState
*env
= arg
;
4234 /* do_fork() Must return host values and target errnos (unlike most
4235 do_*() functions). */
4236 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4237 abi_ulong parent_tidptr
, target_ulong newtls
,
4238 abi_ulong child_tidptr
)
4242 CPUArchState
*new_env
;
4243 #if defined(CONFIG_USE_NPTL)
4244 unsigned int nptl_flags
;
4250 /* Emulate vfork() with fork() */
4251 if (flags
& CLONE_VFORK
)
4252 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4254 if (flags
& CLONE_VM
) {
4255 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4256 #if defined(CONFIG_USE_NPTL)
4257 new_thread_info info
;
4258 pthread_attr_t attr
;
4260 ts
= g_malloc0(sizeof(TaskState
));
4261 init_task_state(ts
);
4262 /* we create a new CPU instance. */
4263 new_env
= cpu_copy(env
);
4264 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4265 cpu_reset(ENV_GET_CPU(new_env
));
4267 /* Init regs that differ from the parent. */
4268 cpu_clone_regs(new_env
, newsp
);
4269 new_env
->opaque
= ts
;
4270 ts
->bprm
= parent_ts
->bprm
;
4271 ts
->info
= parent_ts
->info
;
4272 #if defined(CONFIG_USE_NPTL)
4274 flags
&= ~CLONE_NPTL_FLAGS2
;
4276 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4277 ts
->child_tidptr
= child_tidptr
;
4280 if (nptl_flags
& CLONE_SETTLS
)
4281 cpu_set_tls (new_env
, newtls
);
4283 /* Grab a mutex so that thread setup appears atomic. */
4284 pthread_mutex_lock(&clone_lock
);
4286 memset(&info
, 0, sizeof(info
));
4287 pthread_mutex_init(&info
.mutex
, NULL
);
4288 pthread_mutex_lock(&info
.mutex
);
4289 pthread_cond_init(&info
.cond
, NULL
);
4291 if (nptl_flags
& CLONE_CHILD_SETTID
)
4292 info
.child_tidptr
= child_tidptr
;
4293 if (nptl_flags
& CLONE_PARENT_SETTID
)
4294 info
.parent_tidptr
= parent_tidptr
;
4296 ret
= pthread_attr_init(&attr
);
4297 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4298 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4299 /* It is not safe to deliver signals until the child has finished
4300 initializing, so temporarily block all signals. */
4301 sigfillset(&sigmask
);
4302 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4304 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4305 /* TODO: Free new CPU state if thread creation failed. */
4307 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4308 pthread_attr_destroy(&attr
);
4310 /* Wait for the child to initialize. */
4311 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4313 if (flags
& CLONE_PARENT_SETTID
)
4314 put_user_u32(ret
, parent_tidptr
);
4318 pthread_mutex_unlock(&info
.mutex
);
4319 pthread_cond_destroy(&info
.cond
);
4320 pthread_mutex_destroy(&info
.mutex
);
4321 pthread_mutex_unlock(&clone_lock
);
4323 if (flags
& CLONE_NPTL_FLAGS2
)
4325 /* This is probably going to die very quickly, but do it anyway. */
4326 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4328 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4330 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4334 /* if no CLONE_VM, we consider it is a fork */
4335 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4340 /* Child Process. */
4341 cpu_clone_regs(env
, newsp
);
4343 #if defined(CONFIG_USE_NPTL)
4344 /* There is a race condition here. The parent process could
4345 theoretically read the TID in the child process before the child
4346 tid is set. This would require using either ptrace
4347 (not implemented) or having *_tidptr to point at a shared memory
4348 mapping. We can't repeat the spinlock hack used above because
4349 the child process gets its own copy of the lock. */
4350 if (flags
& CLONE_CHILD_SETTID
)
4351 put_user_u32(gettid(), child_tidptr
);
4352 if (flags
& CLONE_PARENT_SETTID
)
4353 put_user_u32(gettid(), parent_tidptr
);
4354 ts
= (TaskState
*)env
->opaque
;
4355 if (flags
& CLONE_SETTLS
)
4356 cpu_set_tls (env
, newtls
);
4357 if (flags
& CLONE_CHILD_CLEARTID
)
4358 ts
->child_tidptr
= child_tidptr
;
4367 /* warning : doesn't handle linux specific flags... */
4368 static int target_to_host_fcntl_cmd(int cmd
)
4371 case TARGET_F_DUPFD
:
4372 case TARGET_F_GETFD
:
4373 case TARGET_F_SETFD
:
4374 case TARGET_F_GETFL
:
4375 case TARGET_F_SETFL
:
4377 case TARGET_F_GETLK
:
4379 case TARGET_F_SETLK
:
4381 case TARGET_F_SETLKW
:
4383 case TARGET_F_GETOWN
:
4385 case TARGET_F_SETOWN
:
4387 case TARGET_F_GETSIG
:
4389 case TARGET_F_SETSIG
:
4391 #if TARGET_ABI_BITS == 32
4392 case TARGET_F_GETLK64
:
4394 case TARGET_F_SETLK64
:
4396 case TARGET_F_SETLKW64
:
4399 case TARGET_F_SETLEASE
:
4401 case TARGET_F_GETLEASE
:
4403 #ifdef F_DUPFD_CLOEXEC
4404 case TARGET_F_DUPFD_CLOEXEC
:
4405 return F_DUPFD_CLOEXEC
;
4407 case TARGET_F_NOTIFY
:
4410 return -TARGET_EINVAL
;
4412 return -TARGET_EINVAL
;
4415 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4418 struct target_flock
*target_fl
;
4419 struct flock64 fl64
;
4420 struct target_flock64
*target_fl64
;
4422 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4424 if (host_cmd
== -TARGET_EINVAL
)
4428 case TARGET_F_GETLK
:
4429 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4430 return -TARGET_EFAULT
;
4431 fl
.l_type
= tswap16(target_fl
->l_type
);
4432 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4433 fl
.l_start
= tswapal(target_fl
->l_start
);
4434 fl
.l_len
= tswapal(target_fl
->l_len
);
4435 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4436 unlock_user_struct(target_fl
, arg
, 0);
4437 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4439 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4440 return -TARGET_EFAULT
;
4441 target_fl
->l_type
= tswap16(fl
.l_type
);
4442 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4443 target_fl
->l_start
= tswapal(fl
.l_start
);
4444 target_fl
->l_len
= tswapal(fl
.l_len
);
4445 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4446 unlock_user_struct(target_fl
, arg
, 1);
4450 case TARGET_F_SETLK
:
4451 case TARGET_F_SETLKW
:
4452 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4453 return -TARGET_EFAULT
;
4454 fl
.l_type
= tswap16(target_fl
->l_type
);
4455 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4456 fl
.l_start
= tswapal(target_fl
->l_start
);
4457 fl
.l_len
= tswapal(target_fl
->l_len
);
4458 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4459 unlock_user_struct(target_fl
, arg
, 0);
4460 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4463 case TARGET_F_GETLK64
:
4464 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4465 return -TARGET_EFAULT
;
4466 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4467 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4468 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4469 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4470 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4471 unlock_user_struct(target_fl64
, arg
, 0);
4472 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4474 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4475 return -TARGET_EFAULT
;
4476 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4477 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4478 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4479 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4480 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4481 unlock_user_struct(target_fl64
, arg
, 1);
4484 case TARGET_F_SETLK64
:
4485 case TARGET_F_SETLKW64
:
4486 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4487 return -TARGET_EFAULT
;
4488 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4489 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4490 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4491 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4492 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4493 unlock_user_struct(target_fl64
, arg
, 0);
4494 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4497 case TARGET_F_GETFL
:
4498 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4500 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4504 case TARGET_F_SETFL
:
4505 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4508 case TARGET_F_SETOWN
:
4509 case TARGET_F_GETOWN
:
4510 case TARGET_F_SETSIG
:
4511 case TARGET_F_GETSIG
:
4512 case TARGET_F_SETLEASE
:
4513 case TARGET_F_GETLEASE
:
4514 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4518 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4526 static inline int high2lowuid(int uid
)
4534 static inline int high2lowgid(int gid
)
4542 static inline int low2highuid(int uid
)
4544 if ((int16_t)uid
== -1)
4550 static inline int low2highgid(int gid
)
4552 if ((int16_t)gid
== -1)
4557 static inline int tswapid(int id
)
4561 #else /* !USE_UID16 */
4562 static inline int high2lowuid(int uid
)
4566 static inline int high2lowgid(int gid
)
4570 static inline int low2highuid(int uid
)
4574 static inline int low2highgid(int gid
)
4578 static inline int tswapid(int id
)
4582 #endif /* USE_UID16 */
4584 void syscall_init(void)
4587 const argtype
*arg_type
;
4591 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4592 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4593 #include "syscall_types.h"
4595 #undef STRUCT_SPECIAL
4597 /* we patch the ioctl size if necessary. We rely on the fact that
4598 no ioctl has all the bits at '1' in the size field */
4600 while (ie
->target_cmd
!= 0) {
4601 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4602 TARGET_IOC_SIZEMASK
) {
4603 arg_type
= ie
->arg_type
;
4604 if (arg_type
[0] != TYPE_PTR
) {
4605 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4610 size
= thunk_type_size(arg_type
, 0);
4611 ie
->target_cmd
= (ie
->target_cmd
&
4612 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4613 (size
<< TARGET_IOC_SIZESHIFT
);
4616 /* Build target_to_host_errno_table[] table from
4617 * host_to_target_errno_table[]. */
4618 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4619 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4621 /* automatic consistency check if same arch */
4622 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4623 (defined(__x86_64__) && defined(TARGET_X86_64))
4624 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4625 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4626 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4633 #if TARGET_ABI_BITS == 32
4634 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4636 #ifdef TARGET_WORDS_BIGENDIAN
4637 return ((uint64_t)word0
<< 32) | word1
;
4639 return ((uint64_t)word1
<< 32) | word0
;
4642 #else /* TARGET_ABI_BITS == 32 */
4643 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4647 #endif /* TARGET_ABI_BITS != 32 */
4649 #ifdef TARGET_NR_truncate64
4650 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4655 if (regpairs_aligned(cpu_env
)) {
4659 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4663 #ifdef TARGET_NR_ftruncate64
4664 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4669 if (regpairs_aligned(cpu_env
)) {
4673 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4677 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4678 abi_ulong target_addr
)
4680 struct target_timespec
*target_ts
;
4682 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4683 return -TARGET_EFAULT
;
4684 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4685 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4686 unlock_user_struct(target_ts
, target_addr
, 0);
4690 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4691 struct timespec
*host_ts
)
4693 struct target_timespec
*target_ts
;
4695 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4696 return -TARGET_EFAULT
;
4697 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4698 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4699 unlock_user_struct(target_ts
, target_addr
, 1);
4703 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4704 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4705 abi_ulong target_addr
,
4706 struct stat
*host_st
)
4709 if (((CPUARMState
*)cpu_env
)->eabi
) {
4710 struct target_eabi_stat64
*target_st
;
4712 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4713 return -TARGET_EFAULT
;
4714 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4715 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4716 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4717 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4718 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4720 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4721 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4722 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4723 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4724 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4725 __put_user(host_st
->st_size
, &target_st
->st_size
);
4726 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4727 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4728 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4729 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4730 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4731 unlock_user_struct(target_st
, target_addr
, 1);
4735 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4736 struct target_stat
*target_st
;
4738 struct target_stat64
*target_st
;
4741 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4742 return -TARGET_EFAULT
;
4743 memset(target_st
, 0, sizeof(*target_st
));
4744 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4745 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4746 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4747 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4749 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4750 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4751 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4752 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4753 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4754 /* XXX: better use of kernel struct */
4755 __put_user(host_st
->st_size
, &target_st
->st_size
);
4756 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4757 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4758 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4759 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4760 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4761 unlock_user_struct(target_st
, target_addr
, 1);
4768 #if defined(CONFIG_USE_NPTL)
4769 /* ??? Using host futex calls even when target atomic operations
4770 are not really atomic probably breaks things. However implementing
4771 futexes locally would make futexes shared between multiple processes
4772 tricky. However they're probably useless because guest atomic
4773 operations won't work either. */
4774 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4775 target_ulong uaddr2
, int val3
)
4777 struct timespec ts
, *pts
;
4780 /* ??? We assume FUTEX_* constants are the same on both host
4782 #ifdef FUTEX_CMD_MASK
4783 base_op
= op
& FUTEX_CMD_MASK
;
4791 target_to_host_timespec(pts
, timeout
);
4795 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4798 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4800 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4802 case FUTEX_CMP_REQUEUE
:
4804 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4805 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4806 But the prototype takes a `struct timespec *'; insert casts
4807 to satisfy the compiler. We do not need to tswap TIMEOUT
4808 since it's not compared to guest memory. */
4809 pts
= (struct timespec
*)(uintptr_t) timeout
;
4810 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4812 (base_op
== FUTEX_CMP_REQUEUE
4816 return -TARGET_ENOSYS
;
4821 /* Map host to target signal numbers for the wait family of syscalls.
4822 Assume all other status bits are the same. */
4823 static int host_to_target_waitstatus(int status
)
4825 if (WIFSIGNALED(status
)) {
4826 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4828 if (WIFSTOPPED(status
)) {
4829 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4835 int get_osversion(void)
4837 static int osversion
;
4838 struct new_utsname buf
;
4843 if (qemu_uname_release
&& *qemu_uname_release
) {
4844 s
= qemu_uname_release
;
4846 if (sys_uname(&buf
))
4851 for (i
= 0; i
< 3; i
++) {
4853 while (*s
>= '0' && *s
<= '9') {
4858 tmp
= (tmp
<< 8) + n
;
4867 static int open_self_maps(void *cpu_env
, int fd
)
4869 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4870 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4877 fp
= fopen("/proc/self/maps", "r");
4882 while ((read
= getline(&line
, &len
, fp
)) != -1) {
4883 int fields
, dev_maj
, dev_min
, inode
;
4884 uint64_t min
, max
, offset
;
4885 char flag_r
, flag_w
, flag_x
, flag_p
;
4886 char path
[512] = "";
4887 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
4888 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
4889 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
4891 if ((fields
< 10) || (fields
> 11)) {
4894 if (!strncmp(path
, "[stack]", 7)) {
4897 if (h2g_valid(min
) && h2g_valid(max
)) {
4898 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
4899 " %c%c%c%c %08" PRIx64
" %02x:%02x %d%s%s\n",
4900 h2g(min
), h2g(max
), flag_r
, flag_w
,
4901 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
4902 path
[0] ? " " : "", path
);
4909 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4910 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4911 (unsigned long long)ts
->info
->stack_limit
,
4912 (unsigned long long)(ts
->stack_base
+ (TARGET_PAGE_SIZE
- 1))
4914 (unsigned long long)0);
4920 static int open_self_stat(void *cpu_env
, int fd
)
4922 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4923 abi_ulong start_stack
= ts
->info
->start_stack
;
4926 for (i
= 0; i
< 44; i
++) {
4934 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
4935 } else if (i
== 1) {
4937 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
4938 } else if (i
== 27) {
4941 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
4943 /* for the rest, there is MasterCard */
4944 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
4948 if (write(fd
, buf
, len
) != len
) {
4956 static int open_self_auxv(void *cpu_env
, int fd
)
4958 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4959 abi_ulong auxv
= ts
->info
->saved_auxv
;
4960 abi_ulong len
= ts
->info
->auxv_len
;
4964 * Auxiliary vector is stored in target process stack.
4965 * read in whole auxv vector and copy it to file
4967 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
4971 r
= write(fd
, ptr
, len
);
4978 lseek(fd
, 0, SEEK_SET
);
4979 unlock_user(ptr
, auxv
, len
);
4985 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
4988 const char *filename
;
4989 int (*fill
)(void *cpu_env
, int fd
);
4991 const struct fake_open
*fake_open
;
4992 static const struct fake_open fakes
[] = {
4993 { "/proc/self/maps", open_self_maps
},
4994 { "/proc/self/stat", open_self_stat
},
4995 { "/proc/self/auxv", open_self_auxv
},
4999 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5000 if (!strncmp(pathname
, fake_open
->filename
,
5001 strlen(fake_open
->filename
))) {
5006 if (fake_open
->filename
) {
5008 char filename
[PATH_MAX
];
5011 /* create temporary file to map stat to */
5012 tmpdir
= getenv("TMPDIR");
5015 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5016 fd
= mkstemp(filename
);
5022 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5026 lseek(fd
, 0, SEEK_SET
);
5031 return get_errno(open(path(pathname
), flags
, mode
));
5034 /* do_syscall() should always have a single exit point at the end so
5035 that actions, such as logging of syscall results, can be performed.
5036 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5037 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5038 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5039 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5048 gemu_log("syscall %d", num
);
5051 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5054 case TARGET_NR_exit
:
5055 #ifdef CONFIG_USE_NPTL
5056 /* In old applications this may be used to implement _exit(2).
5057 However in threaded applictions it is used for thread termination,
5058 and _exit_group is used for application termination.
5059 Do thread termination if we have more then one thread. */
5060 /* FIXME: This probably breaks if a signal arrives. We should probably
5061 be disabling signals. */
5062 if (first_cpu
->next_cpu
) {
5064 CPUArchState
**lastp
;
5070 while (p
&& p
!= (CPUArchState
*)cpu_env
) {
5071 lastp
= &p
->next_cpu
;
5074 /* If we didn't find the CPU for this thread then something is
5078 /* Remove the CPU from the list. */
5079 *lastp
= p
->next_cpu
;
5081 ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5082 if (ts
->child_tidptr
) {
5083 put_user_u32(0, ts
->child_tidptr
);
5084 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5088 object_delete(OBJECT(ENV_GET_CPU(cpu_env
)));
5096 gdb_exit(cpu_env
, arg1
);
5098 ret
= 0; /* avoid warning */
5100 case TARGET_NR_read
:
5104 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5106 ret
= get_errno(read(arg1
, p
, arg3
));
5107 unlock_user(p
, arg2
, ret
);
5110 case TARGET_NR_write
:
5111 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5113 ret
= get_errno(write(arg1
, p
, arg3
));
5114 unlock_user(p
, arg2
, 0);
5116 case TARGET_NR_open
:
5117 if (!(p
= lock_user_string(arg1
)))
5119 ret
= get_errno(do_open(cpu_env
, p
,
5120 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5122 unlock_user(p
, arg1
, 0);
5124 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5125 case TARGET_NR_openat
:
5126 if (!(p
= lock_user_string(arg2
)))
5128 ret
= get_errno(sys_openat(arg1
,
5130 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5132 unlock_user(p
, arg2
, 0);
5135 case TARGET_NR_close
:
5136 ret
= get_errno(close(arg1
));
5141 case TARGET_NR_fork
:
5142 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5144 #ifdef TARGET_NR_waitpid
5145 case TARGET_NR_waitpid
:
5148 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5149 if (!is_error(ret
) && arg2
&& ret
5150 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5155 #ifdef TARGET_NR_waitid
5156 case TARGET_NR_waitid
:
5160 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5161 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5162 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5164 host_to_target_siginfo(p
, &info
);
5165 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5170 #ifdef TARGET_NR_creat /* not on alpha */
5171 case TARGET_NR_creat
:
5172 if (!(p
= lock_user_string(arg1
)))
5174 ret
= get_errno(creat(p
, arg2
));
5175 unlock_user(p
, arg1
, 0);
5178 case TARGET_NR_link
:
5181 p
= lock_user_string(arg1
);
5182 p2
= lock_user_string(arg2
);
5184 ret
= -TARGET_EFAULT
;
5186 ret
= get_errno(link(p
, p2
));
5187 unlock_user(p2
, arg2
, 0);
5188 unlock_user(p
, arg1
, 0);
5191 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5192 case TARGET_NR_linkat
:
5197 p
= lock_user_string(arg2
);
5198 p2
= lock_user_string(arg4
);
5200 ret
= -TARGET_EFAULT
;
5202 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
5203 unlock_user(p
, arg2
, 0);
5204 unlock_user(p2
, arg4
, 0);
5208 case TARGET_NR_unlink
:
5209 if (!(p
= lock_user_string(arg1
)))
5211 ret
= get_errno(unlink(p
));
5212 unlock_user(p
, arg1
, 0);
5214 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5215 case TARGET_NR_unlinkat
:
5216 if (!(p
= lock_user_string(arg2
)))
5218 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
5219 unlock_user(p
, arg2
, 0);
5222 case TARGET_NR_execve
:
5224 char **argp
, **envp
;
5227 abi_ulong guest_argp
;
5228 abi_ulong guest_envp
;
5235 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5236 if (get_user_ual(addr
, gp
))
5244 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5245 if (get_user_ual(addr
, gp
))
5252 argp
= alloca((argc
+ 1) * sizeof(void *));
5253 envp
= alloca((envc
+ 1) * sizeof(void *));
5255 for (gp
= guest_argp
, q
= argp
; gp
;
5256 gp
+= sizeof(abi_ulong
), q
++) {
5257 if (get_user_ual(addr
, gp
))
5261 if (!(*q
= lock_user_string(addr
)))
5263 total_size
+= strlen(*q
) + 1;
5267 for (gp
= guest_envp
, q
= envp
; gp
;
5268 gp
+= sizeof(abi_ulong
), q
++) {
5269 if (get_user_ual(addr
, gp
))
5273 if (!(*q
= lock_user_string(addr
)))
5275 total_size
+= strlen(*q
) + 1;
5279 /* This case will not be caught by the host's execve() if its
5280 page size is bigger than the target's. */
5281 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5282 ret
= -TARGET_E2BIG
;
5285 if (!(p
= lock_user_string(arg1
)))
5287 ret
= get_errno(execve(p
, argp
, envp
));
5288 unlock_user(p
, arg1
, 0);
5293 ret
= -TARGET_EFAULT
;
5296 for (gp
= guest_argp
, q
= argp
; *q
;
5297 gp
+= sizeof(abi_ulong
), q
++) {
5298 if (get_user_ual(addr
, gp
)
5301 unlock_user(*q
, addr
, 0);
5303 for (gp
= guest_envp
, q
= envp
; *q
;
5304 gp
+= sizeof(abi_ulong
), q
++) {
5305 if (get_user_ual(addr
, gp
)
5308 unlock_user(*q
, addr
, 0);
5312 case TARGET_NR_chdir
:
5313 if (!(p
= lock_user_string(arg1
)))
5315 ret
= get_errno(chdir(p
));
5316 unlock_user(p
, arg1
, 0);
5318 #ifdef TARGET_NR_time
5319 case TARGET_NR_time
:
5322 ret
= get_errno(time(&host_time
));
5325 && put_user_sal(host_time
, arg1
))
5330 case TARGET_NR_mknod
:
5331 if (!(p
= lock_user_string(arg1
)))
5333 ret
= get_errno(mknod(p
, arg2
, arg3
));
5334 unlock_user(p
, arg1
, 0);
5336 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5337 case TARGET_NR_mknodat
:
5338 if (!(p
= lock_user_string(arg2
)))
5340 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
5341 unlock_user(p
, arg2
, 0);
5344 case TARGET_NR_chmod
:
5345 if (!(p
= lock_user_string(arg1
)))
5347 ret
= get_errno(chmod(p
, arg2
));
5348 unlock_user(p
, arg1
, 0);
5350 #ifdef TARGET_NR_break
5351 case TARGET_NR_break
:
5354 #ifdef TARGET_NR_oldstat
5355 case TARGET_NR_oldstat
:
5358 case TARGET_NR_lseek
:
5359 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5361 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5362 /* Alpha specific */
5363 case TARGET_NR_getxpid
:
5364 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5365 ret
= get_errno(getpid());
5368 #ifdef TARGET_NR_getpid
5369 case TARGET_NR_getpid
:
5370 ret
= get_errno(getpid());
5373 case TARGET_NR_mount
:
5375 /* need to look at the data field */
5377 p
= lock_user_string(arg1
);
5378 p2
= lock_user_string(arg2
);
5379 p3
= lock_user_string(arg3
);
5380 if (!p
|| !p2
|| !p3
)
5381 ret
= -TARGET_EFAULT
;
5383 /* FIXME - arg5 should be locked, but it isn't clear how to
5384 * do that since it's not guaranteed to be a NULL-terminated
5388 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5390 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5392 unlock_user(p
, arg1
, 0);
5393 unlock_user(p2
, arg2
, 0);
5394 unlock_user(p3
, arg3
, 0);
5397 #ifdef TARGET_NR_umount
5398 case TARGET_NR_umount
:
5399 if (!(p
= lock_user_string(arg1
)))
5401 ret
= get_errno(umount(p
));
5402 unlock_user(p
, arg1
, 0);
5405 #ifdef TARGET_NR_stime /* not on alpha */
5406 case TARGET_NR_stime
:
5409 if (get_user_sal(host_time
, arg1
))
5411 ret
= get_errno(stime(&host_time
));
5415 case TARGET_NR_ptrace
:
5417 #ifdef TARGET_NR_alarm /* not on alpha */
5418 case TARGET_NR_alarm
:
5422 #ifdef TARGET_NR_oldfstat
5423 case TARGET_NR_oldfstat
:
5426 #ifdef TARGET_NR_pause /* not on alpha */
5427 case TARGET_NR_pause
:
5428 ret
= get_errno(pause());
5431 #ifdef TARGET_NR_utime
5432 case TARGET_NR_utime
:
5434 struct utimbuf tbuf
, *host_tbuf
;
5435 struct target_utimbuf
*target_tbuf
;
5437 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5439 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5440 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5441 unlock_user_struct(target_tbuf
, arg2
, 0);
5446 if (!(p
= lock_user_string(arg1
)))
5448 ret
= get_errno(utime(p
, host_tbuf
));
5449 unlock_user(p
, arg1
, 0);
5453 case TARGET_NR_utimes
:
5455 struct timeval
*tvp
, tv
[2];
5457 if (copy_from_user_timeval(&tv
[0], arg2
)
5458 || copy_from_user_timeval(&tv
[1],
5459 arg2
+ sizeof(struct target_timeval
)))
5465 if (!(p
= lock_user_string(arg1
)))
5467 ret
= get_errno(utimes(p
, tvp
));
5468 unlock_user(p
, arg1
, 0);
5471 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5472 case TARGET_NR_futimesat
:
5474 struct timeval
*tvp
, tv
[2];
5476 if (copy_from_user_timeval(&tv
[0], arg3
)
5477 || copy_from_user_timeval(&tv
[1],
5478 arg3
+ sizeof(struct target_timeval
)))
5484 if (!(p
= lock_user_string(arg2
)))
5486 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5487 unlock_user(p
, arg2
, 0);
5491 #ifdef TARGET_NR_stty
5492 case TARGET_NR_stty
:
5495 #ifdef TARGET_NR_gtty
5496 case TARGET_NR_gtty
:
5499 case TARGET_NR_access
:
5500 if (!(p
= lock_user_string(arg1
)))
5502 ret
= get_errno(access(path(p
), arg2
));
5503 unlock_user(p
, arg1
, 0);
5505 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5506 case TARGET_NR_faccessat
:
5507 if (!(p
= lock_user_string(arg2
)))
5509 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5510 unlock_user(p
, arg2
, 0);
5513 #ifdef TARGET_NR_nice /* not on alpha */
5514 case TARGET_NR_nice
:
5515 ret
= get_errno(nice(arg1
));
5518 #ifdef TARGET_NR_ftime
5519 case TARGET_NR_ftime
:
5522 case TARGET_NR_sync
:
5526 case TARGET_NR_kill
:
5527 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5529 case TARGET_NR_rename
:
5532 p
= lock_user_string(arg1
);
5533 p2
= lock_user_string(arg2
);
5535 ret
= -TARGET_EFAULT
;
5537 ret
= get_errno(rename(p
, p2
));
5538 unlock_user(p2
, arg2
, 0);
5539 unlock_user(p
, arg1
, 0);
5542 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5543 case TARGET_NR_renameat
:
5546 p
= lock_user_string(arg2
);
5547 p2
= lock_user_string(arg4
);
5549 ret
= -TARGET_EFAULT
;
5551 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5552 unlock_user(p2
, arg4
, 0);
5553 unlock_user(p
, arg2
, 0);
5557 case TARGET_NR_mkdir
:
5558 if (!(p
= lock_user_string(arg1
)))
5560 ret
= get_errno(mkdir(p
, arg2
));
5561 unlock_user(p
, arg1
, 0);
5563 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5564 case TARGET_NR_mkdirat
:
5565 if (!(p
= lock_user_string(arg2
)))
5567 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5568 unlock_user(p
, arg2
, 0);
5571 case TARGET_NR_rmdir
:
5572 if (!(p
= lock_user_string(arg1
)))
5574 ret
= get_errno(rmdir(p
));
5575 unlock_user(p
, arg1
, 0);
5578 ret
= get_errno(dup(arg1
));
5580 case TARGET_NR_pipe
:
5581 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5583 #ifdef TARGET_NR_pipe2
5584 case TARGET_NR_pipe2
:
5585 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5588 case TARGET_NR_times
:
5590 struct target_tms
*tmsp
;
5592 ret
= get_errno(times(&tms
));
5594 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5597 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5598 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5599 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5600 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5603 ret
= host_to_target_clock_t(ret
);
5606 #ifdef TARGET_NR_prof
5607 case TARGET_NR_prof
:
5610 #ifdef TARGET_NR_signal
5611 case TARGET_NR_signal
:
5614 case TARGET_NR_acct
:
5616 ret
= get_errno(acct(NULL
));
5618 if (!(p
= lock_user_string(arg1
)))
5620 ret
= get_errno(acct(path(p
)));
5621 unlock_user(p
, arg1
, 0);
5624 #ifdef TARGET_NR_umount2 /* not on alpha */
5625 case TARGET_NR_umount2
:
5626 if (!(p
= lock_user_string(arg1
)))
5628 ret
= get_errno(umount2(p
, arg2
));
5629 unlock_user(p
, arg1
, 0);
5632 #ifdef TARGET_NR_lock
5633 case TARGET_NR_lock
:
5636 case TARGET_NR_ioctl
:
5637 ret
= do_ioctl(arg1
, arg2
, arg3
);
5639 case TARGET_NR_fcntl
:
5640 ret
= do_fcntl(arg1
, arg2
, arg3
);
5642 #ifdef TARGET_NR_mpx
5646 case TARGET_NR_setpgid
:
5647 ret
= get_errno(setpgid(arg1
, arg2
));
5649 #ifdef TARGET_NR_ulimit
5650 case TARGET_NR_ulimit
:
5653 #ifdef TARGET_NR_oldolduname
5654 case TARGET_NR_oldolduname
:
5657 case TARGET_NR_umask
:
5658 ret
= get_errno(umask(arg1
));
5660 case TARGET_NR_chroot
:
5661 if (!(p
= lock_user_string(arg1
)))
5663 ret
= get_errno(chroot(p
));
5664 unlock_user(p
, arg1
, 0);
5666 case TARGET_NR_ustat
:
5668 case TARGET_NR_dup2
:
5669 ret
= get_errno(dup2(arg1
, arg2
));
5671 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5672 case TARGET_NR_dup3
:
5673 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5676 #ifdef TARGET_NR_getppid /* not on alpha */
5677 case TARGET_NR_getppid
:
5678 ret
= get_errno(getppid());
5681 case TARGET_NR_getpgrp
:
5682 ret
= get_errno(getpgrp());
5684 case TARGET_NR_setsid
:
5685 ret
= get_errno(setsid());
5687 #ifdef TARGET_NR_sigaction
5688 case TARGET_NR_sigaction
:
5690 #if defined(TARGET_ALPHA)
5691 struct target_sigaction act
, oact
, *pact
= 0;
5692 struct target_old_sigaction
*old_act
;
5694 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5696 act
._sa_handler
= old_act
->_sa_handler
;
5697 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5698 act
.sa_flags
= old_act
->sa_flags
;
5699 act
.sa_restorer
= 0;
5700 unlock_user_struct(old_act
, arg2
, 0);
5703 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5704 if (!is_error(ret
) && arg3
) {
5705 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5707 old_act
->_sa_handler
= oact
._sa_handler
;
5708 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5709 old_act
->sa_flags
= oact
.sa_flags
;
5710 unlock_user_struct(old_act
, arg3
, 1);
5712 #elif defined(TARGET_MIPS)
5713 struct target_sigaction act
, oact
, *pact
, *old_act
;
5716 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5718 act
._sa_handler
= old_act
->_sa_handler
;
5719 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5720 act
.sa_flags
= old_act
->sa_flags
;
5721 unlock_user_struct(old_act
, arg2
, 0);
5727 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5729 if (!is_error(ret
) && arg3
) {
5730 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5732 old_act
->_sa_handler
= oact
._sa_handler
;
5733 old_act
->sa_flags
= oact
.sa_flags
;
5734 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5735 old_act
->sa_mask
.sig
[1] = 0;
5736 old_act
->sa_mask
.sig
[2] = 0;
5737 old_act
->sa_mask
.sig
[3] = 0;
5738 unlock_user_struct(old_act
, arg3
, 1);
5741 struct target_old_sigaction
*old_act
;
5742 struct target_sigaction act
, oact
, *pact
;
5744 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5746 act
._sa_handler
= old_act
->_sa_handler
;
5747 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5748 act
.sa_flags
= old_act
->sa_flags
;
5749 act
.sa_restorer
= old_act
->sa_restorer
;
5750 unlock_user_struct(old_act
, arg2
, 0);
5755 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5756 if (!is_error(ret
) && arg3
) {
5757 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5759 old_act
->_sa_handler
= oact
._sa_handler
;
5760 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5761 old_act
->sa_flags
= oact
.sa_flags
;
5762 old_act
->sa_restorer
= oact
.sa_restorer
;
5763 unlock_user_struct(old_act
, arg3
, 1);
5769 case TARGET_NR_rt_sigaction
:
5771 #if defined(TARGET_ALPHA)
5772 struct target_sigaction act
, oact
, *pact
= 0;
5773 struct target_rt_sigaction
*rt_act
;
5774 /* ??? arg4 == sizeof(sigset_t). */
5776 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5778 act
._sa_handler
= rt_act
->_sa_handler
;
5779 act
.sa_mask
= rt_act
->sa_mask
;
5780 act
.sa_flags
= rt_act
->sa_flags
;
5781 act
.sa_restorer
= arg5
;
5782 unlock_user_struct(rt_act
, arg2
, 0);
5785 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5786 if (!is_error(ret
) && arg3
) {
5787 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5789 rt_act
->_sa_handler
= oact
._sa_handler
;
5790 rt_act
->sa_mask
= oact
.sa_mask
;
5791 rt_act
->sa_flags
= oact
.sa_flags
;
5792 unlock_user_struct(rt_act
, arg3
, 1);
5795 struct target_sigaction
*act
;
5796 struct target_sigaction
*oact
;
5799 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5804 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5805 ret
= -TARGET_EFAULT
;
5806 goto rt_sigaction_fail
;
5810 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5813 unlock_user_struct(act
, arg2
, 0);
5815 unlock_user_struct(oact
, arg3
, 1);
5819 #ifdef TARGET_NR_sgetmask /* not on alpha */
5820 case TARGET_NR_sgetmask
:
5823 abi_ulong target_set
;
5824 sigprocmask(0, NULL
, &cur_set
);
5825 host_to_target_old_sigset(&target_set
, &cur_set
);
5830 #ifdef TARGET_NR_ssetmask /* not on alpha */
5831 case TARGET_NR_ssetmask
:
5833 sigset_t set
, oset
, cur_set
;
5834 abi_ulong target_set
= arg1
;
5835 sigprocmask(0, NULL
, &cur_set
);
5836 target_to_host_old_sigset(&set
, &target_set
);
5837 sigorset(&set
, &set
, &cur_set
);
5838 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5839 host_to_target_old_sigset(&target_set
, &oset
);
5844 #ifdef TARGET_NR_sigprocmask
5845 case TARGET_NR_sigprocmask
:
5847 #if defined(TARGET_ALPHA)
5848 sigset_t set
, oldset
;
5853 case TARGET_SIG_BLOCK
:
5856 case TARGET_SIG_UNBLOCK
:
5859 case TARGET_SIG_SETMASK
:
5863 ret
= -TARGET_EINVAL
;
5867 target_to_host_old_sigset(&set
, &mask
);
5869 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5871 if (!is_error(ret
)) {
5872 host_to_target_old_sigset(&mask
, &oldset
);
5874 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5877 sigset_t set
, oldset
, *set_ptr
;
5882 case TARGET_SIG_BLOCK
:
5885 case TARGET_SIG_UNBLOCK
:
5888 case TARGET_SIG_SETMASK
:
5892 ret
= -TARGET_EINVAL
;
5895 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5897 target_to_host_old_sigset(&set
, p
);
5898 unlock_user(p
, arg2
, 0);
5904 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5905 if (!is_error(ret
) && arg3
) {
5906 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5908 host_to_target_old_sigset(p
, &oldset
);
5909 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5915 case TARGET_NR_rt_sigprocmask
:
5918 sigset_t set
, oldset
, *set_ptr
;
5922 case TARGET_SIG_BLOCK
:
5925 case TARGET_SIG_UNBLOCK
:
5928 case TARGET_SIG_SETMASK
:
5932 ret
= -TARGET_EINVAL
;
5935 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5937 target_to_host_sigset(&set
, p
);
5938 unlock_user(p
, arg2
, 0);
5944 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5945 if (!is_error(ret
) && arg3
) {
5946 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5948 host_to_target_sigset(p
, &oldset
);
5949 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5953 #ifdef TARGET_NR_sigpending
5954 case TARGET_NR_sigpending
:
5957 ret
= get_errno(sigpending(&set
));
5958 if (!is_error(ret
)) {
5959 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5961 host_to_target_old_sigset(p
, &set
);
5962 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5967 case TARGET_NR_rt_sigpending
:
5970 ret
= get_errno(sigpending(&set
));
5971 if (!is_error(ret
)) {
5972 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5974 host_to_target_sigset(p
, &set
);
5975 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5979 #ifdef TARGET_NR_sigsuspend
5980 case TARGET_NR_sigsuspend
:
5983 #if defined(TARGET_ALPHA)
5984 abi_ulong mask
= arg1
;
5985 target_to_host_old_sigset(&set
, &mask
);
5987 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5989 target_to_host_old_sigset(&set
, p
);
5990 unlock_user(p
, arg1
, 0);
5992 ret
= get_errno(sigsuspend(&set
));
5996 case TARGET_NR_rt_sigsuspend
:
5999 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6001 target_to_host_sigset(&set
, p
);
6002 unlock_user(p
, arg1
, 0);
6003 ret
= get_errno(sigsuspend(&set
));
6006 case TARGET_NR_rt_sigtimedwait
:
6009 struct timespec uts
, *puts
;
6012 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6014 target_to_host_sigset(&set
, p
);
6015 unlock_user(p
, arg1
, 0);
6018 target_to_host_timespec(puts
, arg3
);
6022 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6023 if (!is_error(ret
) && arg2
) {
6024 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
6026 host_to_target_siginfo(p
, &uinfo
);
6027 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6031 case TARGET_NR_rt_sigqueueinfo
:
6034 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6036 target_to_host_siginfo(&uinfo
, p
);
6037 unlock_user(p
, arg1
, 0);
6038 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6041 #ifdef TARGET_NR_sigreturn
6042 case TARGET_NR_sigreturn
:
6043 /* NOTE: ret is eax, so not transcoding must be done */
6044 ret
= do_sigreturn(cpu_env
);
6047 case TARGET_NR_rt_sigreturn
:
6048 /* NOTE: ret is eax, so not transcoding must be done */
6049 ret
= do_rt_sigreturn(cpu_env
);
6051 case TARGET_NR_sethostname
:
6052 if (!(p
= lock_user_string(arg1
)))
6054 ret
= get_errno(sethostname(p
, arg2
));
6055 unlock_user(p
, arg1
, 0);
6057 case TARGET_NR_setrlimit
:
6059 int resource
= target_to_host_resource(arg1
);
6060 struct target_rlimit
*target_rlim
;
6062 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6064 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6065 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6066 unlock_user_struct(target_rlim
, arg2
, 0);
6067 ret
= get_errno(setrlimit(resource
, &rlim
));
6070 case TARGET_NR_getrlimit
:
6072 int resource
= target_to_host_resource(arg1
);
6073 struct target_rlimit
*target_rlim
;
6076 ret
= get_errno(getrlimit(resource
, &rlim
));
6077 if (!is_error(ret
)) {
6078 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6080 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6081 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6082 unlock_user_struct(target_rlim
, arg2
, 1);
6086 case TARGET_NR_getrusage
:
6088 struct rusage rusage
;
6089 ret
= get_errno(getrusage(arg1
, &rusage
));
6090 if (!is_error(ret
)) {
6091 host_to_target_rusage(arg2
, &rusage
);
6095 case TARGET_NR_gettimeofday
:
6098 ret
= get_errno(gettimeofday(&tv
, NULL
));
6099 if (!is_error(ret
)) {
6100 if (copy_to_user_timeval(arg1
, &tv
))
6105 case TARGET_NR_settimeofday
:
6108 if (copy_from_user_timeval(&tv
, arg1
))
6110 ret
= get_errno(settimeofday(&tv
, NULL
));
6113 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6114 case TARGET_NR_select
:
6116 struct target_sel_arg_struct
*sel
;
6117 abi_ulong inp
, outp
, exp
, tvp
;
6120 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6122 nsel
= tswapal(sel
->n
);
6123 inp
= tswapal(sel
->inp
);
6124 outp
= tswapal(sel
->outp
);
6125 exp
= tswapal(sel
->exp
);
6126 tvp
= tswapal(sel
->tvp
);
6127 unlock_user_struct(sel
, arg1
, 0);
6128 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6132 #ifdef TARGET_NR_pselect6
6133 case TARGET_NR_pselect6
:
6135 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6136 fd_set rfds
, wfds
, efds
;
6137 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6138 struct timespec ts
, *ts_ptr
;
6141 * The 6th arg is actually two args smashed together,
6142 * so we cannot use the C library.
6150 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6151 target_sigset_t
*target_sigset
;
6159 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6163 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6167 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6173 * This takes a timespec, and not a timeval, so we cannot
6174 * use the do_select() helper ...
6177 if (target_to_host_timespec(&ts
, ts_addr
)) {
6185 /* Extract the two packed args for the sigset */
6188 sig
.size
= _NSIG
/ 8;
6190 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6194 arg_sigset
= tswapal(arg7
[0]);
6195 arg_sigsize
= tswapal(arg7
[1]);
6196 unlock_user(arg7
, arg6
, 0);
6200 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6201 /* Like the kernel, we enforce correct size sigsets */
6202 ret
= -TARGET_EINVAL
;
6205 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6206 sizeof(*target_sigset
), 1);
6207 if (!target_sigset
) {
6210 target_to_host_sigset(&set
, target_sigset
);
6211 unlock_user(target_sigset
, arg_sigset
, 0);
6219 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6222 if (!is_error(ret
)) {
6223 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6225 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6227 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6230 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6236 case TARGET_NR_symlink
:
6239 p
= lock_user_string(arg1
);
6240 p2
= lock_user_string(arg2
);
6242 ret
= -TARGET_EFAULT
;
6244 ret
= get_errno(symlink(p
, p2
));
6245 unlock_user(p2
, arg2
, 0);
6246 unlock_user(p
, arg1
, 0);
6249 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6250 case TARGET_NR_symlinkat
:
6253 p
= lock_user_string(arg1
);
6254 p2
= lock_user_string(arg3
);
6256 ret
= -TARGET_EFAULT
;
6258 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
6259 unlock_user(p2
, arg3
, 0);
6260 unlock_user(p
, arg1
, 0);
6264 #ifdef TARGET_NR_oldlstat
6265 case TARGET_NR_oldlstat
:
6268 case TARGET_NR_readlink
:
6271 p
= lock_user_string(arg1
);
6272 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6274 ret
= -TARGET_EFAULT
;
6276 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
6277 char real
[PATH_MAX
];
6278 temp
= realpath(exec_path
,real
);
6279 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
6280 snprintf((char *)p2
, arg3
, "%s", real
);
6283 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6285 unlock_user(p2
, arg2
, ret
);
6286 unlock_user(p
, arg1
, 0);
6289 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6290 case TARGET_NR_readlinkat
:
6293 p
= lock_user_string(arg2
);
6294 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6296 ret
= -TARGET_EFAULT
;
6298 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
6299 unlock_user(p2
, arg3
, ret
);
6300 unlock_user(p
, arg2
, 0);
6304 #ifdef TARGET_NR_uselib
6305 case TARGET_NR_uselib
:
6308 #ifdef TARGET_NR_swapon
6309 case TARGET_NR_swapon
:
6310 if (!(p
= lock_user_string(arg1
)))
6312 ret
= get_errno(swapon(p
, arg2
));
6313 unlock_user(p
, arg1
, 0);
6316 case TARGET_NR_reboot
:
6317 if (!(p
= lock_user_string(arg4
)))
6319 ret
= reboot(arg1
, arg2
, arg3
, p
);
6320 unlock_user(p
, arg4
, 0);
6322 #ifdef TARGET_NR_readdir
6323 case TARGET_NR_readdir
:
6326 #ifdef TARGET_NR_mmap
6327 case TARGET_NR_mmap
:
6328 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6329 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6330 || defined(TARGET_S390X)
6333 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6334 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6342 unlock_user(v
, arg1
, 0);
6343 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6344 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6348 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6349 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6355 #ifdef TARGET_NR_mmap2
6356 case TARGET_NR_mmap2
:
6358 #define MMAP_SHIFT 12
6360 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6361 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6363 arg6
<< MMAP_SHIFT
));
6366 case TARGET_NR_munmap
:
6367 ret
= get_errno(target_munmap(arg1
, arg2
));
6369 case TARGET_NR_mprotect
:
6371 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
6372 /* Special hack to detect libc making the stack executable. */
6373 if ((arg3
& PROT_GROWSDOWN
)
6374 && arg1
>= ts
->info
->stack_limit
6375 && arg1
<= ts
->info
->start_stack
) {
6376 arg3
&= ~PROT_GROWSDOWN
;
6377 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6378 arg1
= ts
->info
->stack_limit
;
6381 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6383 #ifdef TARGET_NR_mremap
6384 case TARGET_NR_mremap
:
6385 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6388 /* ??? msync/mlock/munlock are broken for softmmu. */
6389 #ifdef TARGET_NR_msync
6390 case TARGET_NR_msync
:
6391 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6394 #ifdef TARGET_NR_mlock
6395 case TARGET_NR_mlock
:
6396 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6399 #ifdef TARGET_NR_munlock
6400 case TARGET_NR_munlock
:
6401 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6404 #ifdef TARGET_NR_mlockall
6405 case TARGET_NR_mlockall
:
6406 ret
= get_errno(mlockall(arg1
));
6409 #ifdef TARGET_NR_munlockall
6410 case TARGET_NR_munlockall
:
6411 ret
= get_errno(munlockall());
6414 case TARGET_NR_truncate
:
6415 if (!(p
= lock_user_string(arg1
)))
6417 ret
= get_errno(truncate(p
, arg2
));
6418 unlock_user(p
, arg1
, 0);
6420 case TARGET_NR_ftruncate
:
6421 ret
= get_errno(ftruncate(arg1
, arg2
));
6423 case TARGET_NR_fchmod
:
6424 ret
= get_errno(fchmod(arg1
, arg2
));
6426 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6427 case TARGET_NR_fchmodat
:
6428 if (!(p
= lock_user_string(arg2
)))
6430 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
6431 unlock_user(p
, arg2
, 0);
6434 case TARGET_NR_getpriority
:
6435 /* libc does special remapping of the return value of
6436 * sys_getpriority() so it's just easiest to call
6437 * sys_getpriority() directly rather than through libc. */
6438 ret
= get_errno(sys_getpriority(arg1
, arg2
));
6440 case TARGET_NR_setpriority
:
6441 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6443 #ifdef TARGET_NR_profil
6444 case TARGET_NR_profil
:
6447 case TARGET_NR_statfs
:
6448 if (!(p
= lock_user_string(arg1
)))
6450 ret
= get_errno(statfs(path(p
), &stfs
));
6451 unlock_user(p
, arg1
, 0);
6453 if (!is_error(ret
)) {
6454 struct target_statfs
*target_stfs
;
6456 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6458 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6459 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6460 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6461 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6462 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6463 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6464 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6465 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6466 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6467 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6468 unlock_user_struct(target_stfs
, arg2
, 1);
6471 case TARGET_NR_fstatfs
:
6472 ret
= get_errno(fstatfs(arg1
, &stfs
));
6473 goto convert_statfs
;
6474 #ifdef TARGET_NR_statfs64
6475 case TARGET_NR_statfs64
:
6476 if (!(p
= lock_user_string(arg1
)))
6478 ret
= get_errno(statfs(path(p
), &stfs
));
6479 unlock_user(p
, arg1
, 0);
6481 if (!is_error(ret
)) {
6482 struct target_statfs64
*target_stfs
;
6484 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6486 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6487 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6488 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6489 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6490 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6491 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6492 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6493 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6494 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6495 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6496 unlock_user_struct(target_stfs
, arg3
, 1);
6499 case TARGET_NR_fstatfs64
:
6500 ret
= get_errno(fstatfs(arg1
, &stfs
));
6501 goto convert_statfs64
;
6503 #ifdef TARGET_NR_ioperm
6504 case TARGET_NR_ioperm
:
6507 #ifdef TARGET_NR_socketcall
6508 case TARGET_NR_socketcall
:
6509 ret
= do_socketcall(arg1
, arg2
);
6512 #ifdef TARGET_NR_accept
6513 case TARGET_NR_accept
:
6514 ret
= do_accept(arg1
, arg2
, arg3
);
6517 #ifdef TARGET_NR_bind
6518 case TARGET_NR_bind
:
6519 ret
= do_bind(arg1
, arg2
, arg3
);
6522 #ifdef TARGET_NR_connect
6523 case TARGET_NR_connect
:
6524 ret
= do_connect(arg1
, arg2
, arg3
);
6527 #ifdef TARGET_NR_getpeername
6528 case TARGET_NR_getpeername
:
6529 ret
= do_getpeername(arg1
, arg2
, arg3
);
6532 #ifdef TARGET_NR_getsockname
6533 case TARGET_NR_getsockname
:
6534 ret
= do_getsockname(arg1
, arg2
, arg3
);
6537 #ifdef TARGET_NR_getsockopt
6538 case TARGET_NR_getsockopt
:
6539 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6542 #ifdef TARGET_NR_listen
6543 case TARGET_NR_listen
:
6544 ret
= get_errno(listen(arg1
, arg2
));
6547 #ifdef TARGET_NR_recv
6548 case TARGET_NR_recv
:
6549 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6552 #ifdef TARGET_NR_recvfrom
6553 case TARGET_NR_recvfrom
:
6554 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6557 #ifdef TARGET_NR_recvmsg
6558 case TARGET_NR_recvmsg
:
6559 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6562 #ifdef TARGET_NR_send
6563 case TARGET_NR_send
:
6564 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6567 #ifdef TARGET_NR_sendmsg
6568 case TARGET_NR_sendmsg
:
6569 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6572 #ifdef TARGET_NR_sendto
6573 case TARGET_NR_sendto
:
6574 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6577 #ifdef TARGET_NR_shutdown
6578 case TARGET_NR_shutdown
:
6579 ret
= get_errno(shutdown(arg1
, arg2
));
6582 #ifdef TARGET_NR_socket
6583 case TARGET_NR_socket
:
6584 ret
= do_socket(arg1
, arg2
, arg3
);
6587 #ifdef TARGET_NR_socketpair
6588 case TARGET_NR_socketpair
:
6589 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6592 #ifdef TARGET_NR_setsockopt
6593 case TARGET_NR_setsockopt
:
6594 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6598 case TARGET_NR_syslog
:
6599 if (!(p
= lock_user_string(arg2
)))
6601 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6602 unlock_user(p
, arg2
, 0);
6605 case TARGET_NR_setitimer
:
6607 struct itimerval value
, ovalue
, *pvalue
;
6611 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6612 || copy_from_user_timeval(&pvalue
->it_value
,
6613 arg2
+ sizeof(struct target_timeval
)))
6618 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6619 if (!is_error(ret
) && arg3
) {
6620 if (copy_to_user_timeval(arg3
,
6621 &ovalue
.it_interval
)
6622 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6628 case TARGET_NR_getitimer
:
6630 struct itimerval value
;
6632 ret
= get_errno(getitimer(arg1
, &value
));
6633 if (!is_error(ret
) && arg2
) {
6634 if (copy_to_user_timeval(arg2
,
6636 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6642 case TARGET_NR_stat
:
6643 if (!(p
= lock_user_string(arg1
)))
6645 ret
= get_errno(stat(path(p
), &st
));
6646 unlock_user(p
, arg1
, 0);
6648 case TARGET_NR_lstat
:
6649 if (!(p
= lock_user_string(arg1
)))
6651 ret
= get_errno(lstat(path(p
), &st
));
6652 unlock_user(p
, arg1
, 0);
6654 case TARGET_NR_fstat
:
6656 ret
= get_errno(fstat(arg1
, &st
));
6658 if (!is_error(ret
)) {
6659 struct target_stat
*target_st
;
6661 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6663 memset(target_st
, 0, sizeof(*target_st
));
6664 __put_user(st
.st_dev
, &target_st
->st_dev
);
6665 __put_user(st
.st_ino
, &target_st
->st_ino
);
6666 __put_user(st
.st_mode
, &target_st
->st_mode
);
6667 __put_user(st
.st_uid
, &target_st
->st_uid
);
6668 __put_user(st
.st_gid
, &target_st
->st_gid
);
6669 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6670 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6671 __put_user(st
.st_size
, &target_st
->st_size
);
6672 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6673 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6674 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6675 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6676 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6677 unlock_user_struct(target_st
, arg2
, 1);
6681 #ifdef TARGET_NR_olduname
6682 case TARGET_NR_olduname
:
6685 #ifdef TARGET_NR_iopl
6686 case TARGET_NR_iopl
:
6689 case TARGET_NR_vhangup
:
6690 ret
= get_errno(vhangup());
6692 #ifdef TARGET_NR_idle
6693 case TARGET_NR_idle
:
6696 #ifdef TARGET_NR_syscall
6697 case TARGET_NR_syscall
:
6698 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6699 arg6
, arg7
, arg8
, 0);
6702 case TARGET_NR_wait4
:
6705 abi_long status_ptr
= arg2
;
6706 struct rusage rusage
, *rusage_ptr
;
6707 abi_ulong target_rusage
= arg4
;
6709 rusage_ptr
= &rusage
;
6712 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6713 if (!is_error(ret
)) {
6714 if (status_ptr
&& ret
) {
6715 status
= host_to_target_waitstatus(status
);
6716 if (put_user_s32(status
, status_ptr
))
6720 host_to_target_rusage(target_rusage
, &rusage
);
6724 #ifdef TARGET_NR_swapoff
6725 case TARGET_NR_swapoff
:
6726 if (!(p
= lock_user_string(arg1
)))
6728 ret
= get_errno(swapoff(p
));
6729 unlock_user(p
, arg1
, 0);
6732 case TARGET_NR_sysinfo
:
6734 struct target_sysinfo
*target_value
;
6735 struct sysinfo value
;
6736 ret
= get_errno(sysinfo(&value
));
6737 if (!is_error(ret
) && arg1
)
6739 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6741 __put_user(value
.uptime
, &target_value
->uptime
);
6742 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6743 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6744 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6745 __put_user(value
.totalram
, &target_value
->totalram
);
6746 __put_user(value
.freeram
, &target_value
->freeram
);
6747 __put_user(value
.sharedram
, &target_value
->sharedram
);
6748 __put_user(value
.bufferram
, &target_value
->bufferram
);
6749 __put_user(value
.totalswap
, &target_value
->totalswap
);
6750 __put_user(value
.freeswap
, &target_value
->freeswap
);
6751 __put_user(value
.procs
, &target_value
->procs
);
6752 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6753 __put_user(value
.freehigh
, &target_value
->freehigh
);
6754 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6755 unlock_user_struct(target_value
, arg1
, 1);
6759 #ifdef TARGET_NR_ipc
6761 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6764 #ifdef TARGET_NR_semget
6765 case TARGET_NR_semget
:
6766 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6769 #ifdef TARGET_NR_semop
6770 case TARGET_NR_semop
:
6771 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6774 #ifdef TARGET_NR_semctl
6775 case TARGET_NR_semctl
:
6776 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6779 #ifdef TARGET_NR_msgctl
6780 case TARGET_NR_msgctl
:
6781 ret
= do_msgctl(arg1
, arg2
, arg3
);
6784 #ifdef TARGET_NR_msgget
6785 case TARGET_NR_msgget
:
6786 ret
= get_errno(msgget(arg1
, arg2
));
6789 #ifdef TARGET_NR_msgrcv
6790 case TARGET_NR_msgrcv
:
6791 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6794 #ifdef TARGET_NR_msgsnd
6795 case TARGET_NR_msgsnd
:
6796 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6799 #ifdef TARGET_NR_shmget
6800 case TARGET_NR_shmget
:
6801 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6804 #ifdef TARGET_NR_shmctl
6805 case TARGET_NR_shmctl
:
6806 ret
= do_shmctl(arg1
, arg2
, arg3
);
6809 #ifdef TARGET_NR_shmat
6810 case TARGET_NR_shmat
:
6811 ret
= do_shmat(arg1
, arg2
, arg3
);
6814 #ifdef TARGET_NR_shmdt
6815 case TARGET_NR_shmdt
:
6816 ret
= do_shmdt(arg1
);
6819 case TARGET_NR_fsync
:
6820 ret
= get_errno(fsync(arg1
));
6822 case TARGET_NR_clone
:
6823 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6824 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6825 #elif defined(TARGET_CRIS)
6826 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6827 #elif defined(TARGET_S390X)
6828 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6830 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6833 #ifdef __NR_exit_group
6834 /* new thread calls */
6835 case TARGET_NR_exit_group
:
6839 gdb_exit(cpu_env
, arg1
);
6840 ret
= get_errno(exit_group(arg1
));
6843 case TARGET_NR_setdomainname
:
6844 if (!(p
= lock_user_string(arg1
)))
6846 ret
= get_errno(setdomainname(p
, arg2
));
6847 unlock_user(p
, arg1
, 0);
6849 case TARGET_NR_uname
:
6850 /* no need to transcode because we use the linux syscall */
6852 struct new_utsname
* buf
;
6854 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6856 ret
= get_errno(sys_uname(buf
));
6857 if (!is_error(ret
)) {
6858 /* Overrite the native machine name with whatever is being
6860 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6861 /* Allow the user to override the reported release. */
6862 if (qemu_uname_release
&& *qemu_uname_release
)
6863 strcpy (buf
->release
, qemu_uname_release
);
6865 unlock_user_struct(buf
, arg1
, 1);
6869 case TARGET_NR_modify_ldt
:
6870 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6872 #if !defined(TARGET_X86_64)
6873 case TARGET_NR_vm86old
:
6875 case TARGET_NR_vm86
:
6876 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6880 case TARGET_NR_adjtimex
:
6882 #ifdef TARGET_NR_create_module
6883 case TARGET_NR_create_module
:
6885 case TARGET_NR_init_module
:
6886 case TARGET_NR_delete_module
:
6887 #ifdef TARGET_NR_get_kernel_syms
6888 case TARGET_NR_get_kernel_syms
:
6891 case TARGET_NR_quotactl
:
6893 case TARGET_NR_getpgid
:
6894 ret
= get_errno(getpgid(arg1
));
6896 case TARGET_NR_fchdir
:
6897 ret
= get_errno(fchdir(arg1
));
6899 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6900 case TARGET_NR_bdflush
:
6903 #ifdef TARGET_NR_sysfs
6904 case TARGET_NR_sysfs
:
6907 case TARGET_NR_personality
:
6908 ret
= get_errno(personality(arg1
));
6910 #ifdef TARGET_NR_afs_syscall
6911 case TARGET_NR_afs_syscall
:
6914 #ifdef TARGET_NR__llseek /* Not on alpha */
6915 case TARGET_NR__llseek
:
6918 #if !defined(__NR_llseek)
6919 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6921 ret
= get_errno(res
);
6926 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6928 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6934 case TARGET_NR_getdents
:
6935 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6937 struct target_dirent
*target_dirp
;
6938 struct linux_dirent
*dirp
;
6939 abi_long count
= arg3
;
6941 dirp
= malloc(count
);
6943 ret
= -TARGET_ENOMEM
;
6947 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6948 if (!is_error(ret
)) {
6949 struct linux_dirent
*de
;
6950 struct target_dirent
*tde
;
6952 int reclen
, treclen
;
6953 int count1
, tnamelen
;
6957 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6961 reclen
= de
->d_reclen
;
6962 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6963 tde
->d_reclen
= tswap16(treclen
);
6964 tde
->d_ino
= tswapal(de
->d_ino
);
6965 tde
->d_off
= tswapal(de
->d_off
);
6966 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6969 /* XXX: may not be correct */
6970 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6971 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6973 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6977 unlock_user(target_dirp
, arg2
, ret
);
6983 struct linux_dirent
*dirp
;
6984 abi_long count
= arg3
;
6986 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6988 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6989 if (!is_error(ret
)) {
6990 struct linux_dirent
*de
;
6995 reclen
= de
->d_reclen
;
6998 de
->d_reclen
= tswap16(reclen
);
6999 tswapls(&de
->d_ino
);
7000 tswapls(&de
->d_off
);
7001 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7005 unlock_user(dirp
, arg2
, ret
);
7009 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7010 case TARGET_NR_getdents64
:
7012 struct linux_dirent64
*dirp
;
7013 abi_long count
= arg3
;
7014 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7016 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7017 if (!is_error(ret
)) {
7018 struct linux_dirent64
*de
;
7023 reclen
= de
->d_reclen
;
7026 de
->d_reclen
= tswap16(reclen
);
7027 tswap64s((uint64_t *)&de
->d_ino
);
7028 tswap64s((uint64_t *)&de
->d_off
);
7029 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7033 unlock_user(dirp
, arg2
, ret
);
7036 #endif /* TARGET_NR_getdents64 */
7037 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7039 case TARGET_NR_select
:
7041 case TARGET_NR__newselect
:
7043 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7046 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7047 # ifdef TARGET_NR_poll
7048 case TARGET_NR_poll
:
7050 # ifdef TARGET_NR_ppoll
7051 case TARGET_NR_ppoll
:
7054 struct target_pollfd
*target_pfd
;
7055 unsigned int nfds
= arg2
;
7060 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7064 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7065 for(i
= 0; i
< nfds
; i
++) {
7066 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7067 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7070 # ifdef TARGET_NR_ppoll
7071 if (num
== TARGET_NR_ppoll
) {
7072 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7073 target_sigset_t
*target_set
;
7074 sigset_t _set
, *set
= &_set
;
7077 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7078 unlock_user(target_pfd
, arg1
, 0);
7086 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7088 unlock_user(target_pfd
, arg1
, 0);
7091 target_to_host_sigset(set
, target_set
);
7096 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7098 if (!is_error(ret
) && arg3
) {
7099 host_to_target_timespec(arg3
, timeout_ts
);
7102 unlock_user(target_set
, arg4
, 0);
7106 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7108 if (!is_error(ret
)) {
7109 for(i
= 0; i
< nfds
; i
++) {
7110 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7113 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7117 case TARGET_NR_flock
:
7118 /* NOTE: the flock constant seems to be the same for every
7120 ret
= get_errno(flock(arg1
, arg2
));
7122 case TARGET_NR_readv
:
7127 vec
= alloca(count
* sizeof(struct iovec
));
7128 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
7130 ret
= get_errno(readv(arg1
, vec
, count
));
7131 unlock_iovec(vec
, arg2
, count
, 1);
7134 case TARGET_NR_writev
:
7139 vec
= alloca(count
* sizeof(struct iovec
));
7140 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7142 ret
= get_errno(writev(arg1
, vec
, count
));
7143 unlock_iovec(vec
, arg2
, count
, 0);
7146 case TARGET_NR_getsid
:
7147 ret
= get_errno(getsid(arg1
));
7149 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7150 case TARGET_NR_fdatasync
:
7151 ret
= get_errno(fdatasync(arg1
));
7154 case TARGET_NR__sysctl
:
7155 /* We don't implement this, but ENOTDIR is always a safe
7157 ret
= -TARGET_ENOTDIR
;
7159 case TARGET_NR_sched_getaffinity
:
7161 unsigned int mask_size
;
7162 unsigned long *mask
;
7165 * sched_getaffinity needs multiples of ulong, so need to take
7166 * care of mismatches between target ulong and host ulong sizes.
7168 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7169 ret
= -TARGET_EINVAL
;
7172 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7174 mask
= alloca(mask_size
);
7175 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7177 if (!is_error(ret
)) {
7178 if (copy_to_user(arg3
, mask
, ret
)) {
7184 case TARGET_NR_sched_setaffinity
:
7186 unsigned int mask_size
;
7187 unsigned long *mask
;
7190 * sched_setaffinity needs multiples of ulong, so need to take
7191 * care of mismatches between target ulong and host ulong sizes.
7193 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7194 ret
= -TARGET_EINVAL
;
7197 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7199 mask
= alloca(mask_size
);
7200 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7203 memcpy(mask
, p
, arg2
);
7204 unlock_user_struct(p
, arg2
, 0);
7206 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7209 case TARGET_NR_sched_setparam
:
7211 struct sched_param
*target_schp
;
7212 struct sched_param schp
;
7214 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7216 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7217 unlock_user_struct(target_schp
, arg2
, 0);
7218 ret
= get_errno(sched_setparam(arg1
, &schp
));
7221 case TARGET_NR_sched_getparam
:
7223 struct sched_param
*target_schp
;
7224 struct sched_param schp
;
7225 ret
= get_errno(sched_getparam(arg1
, &schp
));
7226 if (!is_error(ret
)) {
7227 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7229 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7230 unlock_user_struct(target_schp
, arg2
, 1);
7234 case TARGET_NR_sched_setscheduler
:
7236 struct sched_param
*target_schp
;
7237 struct sched_param schp
;
7238 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7240 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7241 unlock_user_struct(target_schp
, arg3
, 0);
7242 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7245 case TARGET_NR_sched_getscheduler
:
7246 ret
= get_errno(sched_getscheduler(arg1
));
7248 case TARGET_NR_sched_yield
:
7249 ret
= get_errno(sched_yield());
7251 case TARGET_NR_sched_get_priority_max
:
7252 ret
= get_errno(sched_get_priority_max(arg1
));
7254 case TARGET_NR_sched_get_priority_min
:
7255 ret
= get_errno(sched_get_priority_min(arg1
));
7257 case TARGET_NR_sched_rr_get_interval
:
7260 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7261 if (!is_error(ret
)) {
7262 host_to_target_timespec(arg2
, &ts
);
7266 case TARGET_NR_nanosleep
:
7268 struct timespec req
, rem
;
7269 target_to_host_timespec(&req
, arg1
);
7270 ret
= get_errno(nanosleep(&req
, &rem
));
7271 if (is_error(ret
) && arg2
) {
7272 host_to_target_timespec(arg2
, &rem
);
7276 #ifdef TARGET_NR_query_module
7277 case TARGET_NR_query_module
:
7280 #ifdef TARGET_NR_nfsservctl
7281 case TARGET_NR_nfsservctl
:
7284 case TARGET_NR_prctl
:
7286 case PR_GET_PDEATHSIG
:
7289 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7290 if (!is_error(ret
) && arg2
7291 && put_user_ual(deathsig
, arg2
)) {
7299 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7303 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7305 unlock_user(name
, arg2
, 16);
7310 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7314 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7316 unlock_user(name
, arg2
, 0);
7321 /* Most prctl options have no pointer arguments */
7322 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7326 #ifdef TARGET_NR_arch_prctl
7327 case TARGET_NR_arch_prctl
:
7328 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7329 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7335 #ifdef TARGET_NR_pread
7336 case TARGET_NR_pread
:
7337 if (regpairs_aligned(cpu_env
))
7339 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7341 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
7342 unlock_user(p
, arg2
, ret
);
7344 case TARGET_NR_pwrite
:
7345 if (regpairs_aligned(cpu_env
))
7347 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7349 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
7350 unlock_user(p
, arg2
, 0);
7353 #ifdef TARGET_NR_pread64
7354 case TARGET_NR_pread64
:
7355 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7357 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7358 unlock_user(p
, arg2
, ret
);
7360 case TARGET_NR_pwrite64
:
7361 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7363 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7364 unlock_user(p
, arg2
, 0);
7367 case TARGET_NR_getcwd
:
7368 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7370 ret
= get_errno(sys_getcwd1(p
, arg2
));
7371 unlock_user(p
, arg1
, ret
);
7373 case TARGET_NR_capget
:
7375 case TARGET_NR_capset
:
7377 case TARGET_NR_sigaltstack
:
7378 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7379 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7380 defined(TARGET_M68K) || defined(TARGET_S390X)
7381 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7386 case TARGET_NR_sendfile
:
7388 #ifdef TARGET_NR_getpmsg
7389 case TARGET_NR_getpmsg
:
7392 #ifdef TARGET_NR_putpmsg
7393 case TARGET_NR_putpmsg
:
7396 #ifdef TARGET_NR_vfork
7397 case TARGET_NR_vfork
:
7398 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7402 #ifdef TARGET_NR_ugetrlimit
7403 case TARGET_NR_ugetrlimit
:
7406 int resource
= target_to_host_resource(arg1
);
7407 ret
= get_errno(getrlimit(resource
, &rlim
));
7408 if (!is_error(ret
)) {
7409 struct target_rlimit
*target_rlim
;
7410 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7412 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7413 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7414 unlock_user_struct(target_rlim
, arg2
, 1);
7419 #ifdef TARGET_NR_truncate64
7420 case TARGET_NR_truncate64
:
7421 if (!(p
= lock_user_string(arg1
)))
7423 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7424 unlock_user(p
, arg1
, 0);
7427 #ifdef TARGET_NR_ftruncate64
7428 case TARGET_NR_ftruncate64
:
7429 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7432 #ifdef TARGET_NR_stat64
7433 case TARGET_NR_stat64
:
7434 if (!(p
= lock_user_string(arg1
)))
7436 ret
= get_errno(stat(path(p
), &st
));
7437 unlock_user(p
, arg1
, 0);
7439 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7442 #ifdef TARGET_NR_lstat64
7443 case TARGET_NR_lstat64
:
7444 if (!(p
= lock_user_string(arg1
)))
7446 ret
= get_errno(lstat(path(p
), &st
));
7447 unlock_user(p
, arg1
, 0);
7449 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7452 #ifdef TARGET_NR_fstat64
7453 case TARGET_NR_fstat64
:
7454 ret
= get_errno(fstat(arg1
, &st
));
7456 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7459 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7460 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7461 #ifdef TARGET_NR_fstatat64
7462 case TARGET_NR_fstatat64
:
7464 #ifdef TARGET_NR_newfstatat
7465 case TARGET_NR_newfstatat
:
7467 if (!(p
= lock_user_string(arg2
)))
7469 #ifdef __NR_fstatat64
7470 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7472 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7475 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7478 case TARGET_NR_lchown
:
7479 if (!(p
= lock_user_string(arg1
)))
7481 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7482 unlock_user(p
, arg1
, 0);
7484 #ifdef TARGET_NR_getuid
7485 case TARGET_NR_getuid
:
7486 ret
= get_errno(high2lowuid(getuid()));
7489 #ifdef TARGET_NR_getgid
7490 case TARGET_NR_getgid
:
7491 ret
= get_errno(high2lowgid(getgid()));
7494 #ifdef TARGET_NR_geteuid
7495 case TARGET_NR_geteuid
:
7496 ret
= get_errno(high2lowuid(geteuid()));
7499 #ifdef TARGET_NR_getegid
7500 case TARGET_NR_getegid
:
7501 ret
= get_errno(high2lowgid(getegid()));
7504 case TARGET_NR_setreuid
:
7505 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7507 case TARGET_NR_setregid
:
7508 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7510 case TARGET_NR_getgroups
:
7512 int gidsetsize
= arg1
;
7513 target_id
*target_grouplist
;
7517 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7518 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7519 if (gidsetsize
== 0)
7521 if (!is_error(ret
)) {
7522 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7523 if (!target_grouplist
)
7525 for(i
= 0;i
< ret
; i
++)
7526 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7527 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7531 case TARGET_NR_setgroups
:
7533 int gidsetsize
= arg1
;
7534 target_id
*target_grouplist
;
7538 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7539 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7540 if (!target_grouplist
) {
7541 ret
= -TARGET_EFAULT
;
7544 for(i
= 0;i
< gidsetsize
; i
++)
7545 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7546 unlock_user(target_grouplist
, arg2
, 0);
7547 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7550 case TARGET_NR_fchown
:
7551 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7553 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7554 case TARGET_NR_fchownat
:
7555 if (!(p
= lock_user_string(arg2
)))
7557 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7558 unlock_user(p
, arg2
, 0);
7561 #ifdef TARGET_NR_setresuid
7562 case TARGET_NR_setresuid
:
7563 ret
= get_errno(setresuid(low2highuid(arg1
),
7565 low2highuid(arg3
)));
7568 #ifdef TARGET_NR_getresuid
7569 case TARGET_NR_getresuid
:
7571 uid_t ruid
, euid
, suid
;
7572 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7573 if (!is_error(ret
)) {
7574 if (put_user_u16(high2lowuid(ruid
), arg1
)
7575 || put_user_u16(high2lowuid(euid
), arg2
)
7576 || put_user_u16(high2lowuid(suid
), arg3
))
7582 #ifdef TARGET_NR_getresgid
7583 case TARGET_NR_setresgid
:
7584 ret
= get_errno(setresgid(low2highgid(arg1
),
7586 low2highgid(arg3
)));
7589 #ifdef TARGET_NR_getresgid
7590 case TARGET_NR_getresgid
:
7592 gid_t rgid
, egid
, sgid
;
7593 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7594 if (!is_error(ret
)) {
7595 if (put_user_u16(high2lowgid(rgid
), arg1
)
7596 || put_user_u16(high2lowgid(egid
), arg2
)
7597 || put_user_u16(high2lowgid(sgid
), arg3
))
7603 case TARGET_NR_chown
:
7604 if (!(p
= lock_user_string(arg1
)))
7606 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7607 unlock_user(p
, arg1
, 0);
7609 case TARGET_NR_setuid
:
7610 ret
= get_errno(setuid(low2highuid(arg1
)));
7612 case TARGET_NR_setgid
:
7613 ret
= get_errno(setgid(low2highgid(arg1
)));
7615 case TARGET_NR_setfsuid
:
7616 ret
= get_errno(setfsuid(arg1
));
7618 case TARGET_NR_setfsgid
:
7619 ret
= get_errno(setfsgid(arg1
));
7622 #ifdef TARGET_NR_lchown32
7623 case TARGET_NR_lchown32
:
7624 if (!(p
= lock_user_string(arg1
)))
7626 ret
= get_errno(lchown(p
, arg2
, arg3
));
7627 unlock_user(p
, arg1
, 0);
7630 #ifdef TARGET_NR_getuid32
7631 case TARGET_NR_getuid32
:
7632 ret
= get_errno(getuid());
7636 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7637 /* Alpha specific */
7638 case TARGET_NR_getxuid
:
7642 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7644 ret
= get_errno(getuid());
7647 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7648 /* Alpha specific */
7649 case TARGET_NR_getxgid
:
7653 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7655 ret
= get_errno(getgid());
7658 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7659 /* Alpha specific */
7660 case TARGET_NR_osf_getsysinfo
:
7661 ret
= -TARGET_EOPNOTSUPP
;
7663 case TARGET_GSI_IEEE_FP_CONTROL
:
7665 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7667 /* Copied from linux ieee_fpcr_to_swcr. */
7668 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7669 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7670 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7671 | SWCR_TRAP_ENABLE_DZE
7672 | SWCR_TRAP_ENABLE_OVF
);
7673 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7674 | SWCR_TRAP_ENABLE_INE
);
7675 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7676 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7678 if (put_user_u64 (swcr
, arg2
))
7684 /* case GSI_IEEE_STATE_AT_SIGNAL:
7685 -- Not implemented in linux kernel.
7687 -- Retrieves current unaligned access state; not much used.
7689 -- Retrieves implver information; surely not used.
7691 -- Grabs a copy of the HWRPB; surely not used.
7696 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7697 /* Alpha specific */
7698 case TARGET_NR_osf_setsysinfo
:
7699 ret
= -TARGET_EOPNOTSUPP
;
7701 case TARGET_SSI_IEEE_FP_CONTROL
:
7702 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7704 uint64_t swcr
, fpcr
, orig_fpcr
;
7706 if (get_user_u64 (swcr
, arg2
))
7708 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7709 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7711 /* Copied from linux ieee_swcr_to_fpcr. */
7712 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7713 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7714 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7715 | SWCR_TRAP_ENABLE_DZE
7716 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7717 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7718 | SWCR_TRAP_ENABLE_INE
)) << 57;
7719 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7720 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7722 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
7725 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
7726 /* Old exceptions are not signaled. */
7727 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7729 /* If any exceptions set by this call, and are unmasked,
7736 /* case SSI_NVPAIRS:
7737 -- Used with SSIN_UACPROC to enable unaligned accesses.
7738 case SSI_IEEE_STATE_AT_SIGNAL:
7739 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7740 -- Not implemented in linux kernel
7745 #ifdef TARGET_NR_osf_sigprocmask
7746 /* Alpha specific. */
7747 case TARGET_NR_osf_sigprocmask
:
7751 sigset_t set
, oldset
;
7754 case TARGET_SIG_BLOCK
:
7757 case TARGET_SIG_UNBLOCK
:
7760 case TARGET_SIG_SETMASK
:
7764 ret
= -TARGET_EINVAL
;
7768 target_to_host_old_sigset(&set
, &mask
);
7769 sigprocmask(how
, &set
, &oldset
);
7770 host_to_target_old_sigset(&mask
, &oldset
);
7776 #ifdef TARGET_NR_getgid32
7777 case TARGET_NR_getgid32
:
7778 ret
= get_errno(getgid());
7781 #ifdef TARGET_NR_geteuid32
7782 case TARGET_NR_geteuid32
:
7783 ret
= get_errno(geteuid());
7786 #ifdef TARGET_NR_getegid32
7787 case TARGET_NR_getegid32
:
7788 ret
= get_errno(getegid());
7791 #ifdef TARGET_NR_setreuid32
7792 case TARGET_NR_setreuid32
:
7793 ret
= get_errno(setreuid(arg1
, arg2
));
7796 #ifdef TARGET_NR_setregid32
7797 case TARGET_NR_setregid32
:
7798 ret
= get_errno(setregid(arg1
, arg2
));
7801 #ifdef TARGET_NR_getgroups32
7802 case TARGET_NR_getgroups32
:
7804 int gidsetsize
= arg1
;
7805 uint32_t *target_grouplist
;
7809 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7810 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7811 if (gidsetsize
== 0)
7813 if (!is_error(ret
)) {
7814 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7815 if (!target_grouplist
) {
7816 ret
= -TARGET_EFAULT
;
7819 for(i
= 0;i
< ret
; i
++)
7820 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7821 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7826 #ifdef TARGET_NR_setgroups32
7827 case TARGET_NR_setgroups32
:
7829 int gidsetsize
= arg1
;
7830 uint32_t *target_grouplist
;
7834 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7835 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7836 if (!target_grouplist
) {
7837 ret
= -TARGET_EFAULT
;
7840 for(i
= 0;i
< gidsetsize
; i
++)
7841 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7842 unlock_user(target_grouplist
, arg2
, 0);
7843 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7847 #ifdef TARGET_NR_fchown32
7848 case TARGET_NR_fchown32
:
7849 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7852 #ifdef TARGET_NR_setresuid32
7853 case TARGET_NR_setresuid32
:
7854 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7857 #ifdef TARGET_NR_getresuid32
7858 case TARGET_NR_getresuid32
:
7860 uid_t ruid
, euid
, suid
;
7861 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7862 if (!is_error(ret
)) {
7863 if (put_user_u32(ruid
, arg1
)
7864 || put_user_u32(euid
, arg2
)
7865 || put_user_u32(suid
, arg3
))
7871 #ifdef TARGET_NR_setresgid32
7872 case TARGET_NR_setresgid32
:
7873 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7876 #ifdef TARGET_NR_getresgid32
7877 case TARGET_NR_getresgid32
:
7879 gid_t rgid
, egid
, sgid
;
7880 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7881 if (!is_error(ret
)) {
7882 if (put_user_u32(rgid
, arg1
)
7883 || put_user_u32(egid
, arg2
)
7884 || put_user_u32(sgid
, arg3
))
7890 #ifdef TARGET_NR_chown32
7891 case TARGET_NR_chown32
:
7892 if (!(p
= lock_user_string(arg1
)))
7894 ret
= get_errno(chown(p
, arg2
, arg3
));
7895 unlock_user(p
, arg1
, 0);
7898 #ifdef TARGET_NR_setuid32
7899 case TARGET_NR_setuid32
:
7900 ret
= get_errno(setuid(arg1
));
7903 #ifdef TARGET_NR_setgid32
7904 case TARGET_NR_setgid32
:
7905 ret
= get_errno(setgid(arg1
));
7908 #ifdef TARGET_NR_setfsuid32
7909 case TARGET_NR_setfsuid32
:
7910 ret
= get_errno(setfsuid(arg1
));
7913 #ifdef TARGET_NR_setfsgid32
7914 case TARGET_NR_setfsgid32
:
7915 ret
= get_errno(setfsgid(arg1
));
7919 case TARGET_NR_pivot_root
:
7921 #ifdef TARGET_NR_mincore
7922 case TARGET_NR_mincore
:
7925 ret
= -TARGET_EFAULT
;
7926 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7928 if (!(p
= lock_user_string(arg3
)))
7930 ret
= get_errno(mincore(a
, arg2
, p
));
7931 unlock_user(p
, arg3
, ret
);
7933 unlock_user(a
, arg1
, 0);
7937 #ifdef TARGET_NR_arm_fadvise64_64
7938 case TARGET_NR_arm_fadvise64_64
:
7941 * arm_fadvise64_64 looks like fadvise64_64 but
7942 * with different argument order
7950 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7951 #ifdef TARGET_NR_fadvise64_64
7952 case TARGET_NR_fadvise64_64
:
7954 #ifdef TARGET_NR_fadvise64
7955 case TARGET_NR_fadvise64
:
7959 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7960 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7961 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7962 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7966 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7969 #ifdef TARGET_NR_madvise
7970 case TARGET_NR_madvise
:
7971 /* A straight passthrough may not be safe because qemu sometimes
7972 turns private flie-backed mappings into anonymous mappings.
7973 This will break MADV_DONTNEED.
7974 This is a hint, so ignoring and returning success is ok. */
7978 #if TARGET_ABI_BITS == 32
7979 case TARGET_NR_fcntl64
:
7983 struct target_flock64
*target_fl
;
7985 struct target_eabi_flock64
*target_efl
;
7988 cmd
= target_to_host_fcntl_cmd(arg2
);
7989 if (cmd
== -TARGET_EINVAL
) {
7995 case TARGET_F_GETLK64
:
7997 if (((CPUARMState
*)cpu_env
)->eabi
) {
7998 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8000 fl
.l_type
= tswap16(target_efl
->l_type
);
8001 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8002 fl
.l_start
= tswap64(target_efl
->l_start
);
8003 fl
.l_len
= tswap64(target_efl
->l_len
);
8004 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8005 unlock_user_struct(target_efl
, arg3
, 0);
8009 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8011 fl
.l_type
= tswap16(target_fl
->l_type
);
8012 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8013 fl
.l_start
= tswap64(target_fl
->l_start
);
8014 fl
.l_len
= tswap64(target_fl
->l_len
);
8015 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8016 unlock_user_struct(target_fl
, arg3
, 0);
8018 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8021 if (((CPUARMState
*)cpu_env
)->eabi
) {
8022 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8024 target_efl
->l_type
= tswap16(fl
.l_type
);
8025 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8026 target_efl
->l_start
= tswap64(fl
.l_start
);
8027 target_efl
->l_len
= tswap64(fl
.l_len
);
8028 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8029 unlock_user_struct(target_efl
, arg3
, 1);
8033 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8035 target_fl
->l_type
= tswap16(fl
.l_type
);
8036 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8037 target_fl
->l_start
= tswap64(fl
.l_start
);
8038 target_fl
->l_len
= tswap64(fl
.l_len
);
8039 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8040 unlock_user_struct(target_fl
, arg3
, 1);
8045 case TARGET_F_SETLK64
:
8046 case TARGET_F_SETLKW64
:
8048 if (((CPUARMState
*)cpu_env
)->eabi
) {
8049 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8051 fl
.l_type
= tswap16(target_efl
->l_type
);
8052 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8053 fl
.l_start
= tswap64(target_efl
->l_start
);
8054 fl
.l_len
= tswap64(target_efl
->l_len
);
8055 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8056 unlock_user_struct(target_efl
, arg3
, 0);
8060 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8062 fl
.l_type
= tswap16(target_fl
->l_type
);
8063 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8064 fl
.l_start
= tswap64(target_fl
->l_start
);
8065 fl
.l_len
= tswap64(target_fl
->l_len
);
8066 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8067 unlock_user_struct(target_fl
, arg3
, 0);
8069 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8072 ret
= do_fcntl(arg1
, arg2
, arg3
);
8078 #ifdef TARGET_NR_cacheflush
8079 case TARGET_NR_cacheflush
:
8080 /* self-modifying code is handled automatically, so nothing needed */
8084 #ifdef TARGET_NR_security
8085 case TARGET_NR_security
:
8088 #ifdef TARGET_NR_getpagesize
8089 case TARGET_NR_getpagesize
:
8090 ret
= TARGET_PAGE_SIZE
;
8093 case TARGET_NR_gettid
:
8094 ret
= get_errno(gettid());
8096 #ifdef TARGET_NR_readahead
8097 case TARGET_NR_readahead
:
8098 #if TARGET_ABI_BITS == 32
8099 if (regpairs_aligned(cpu_env
)) {
8104 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8106 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8111 #ifdef TARGET_NR_setxattr
8112 case TARGET_NR_listxattr
:
8113 case TARGET_NR_llistxattr
:
8117 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8119 ret
= -TARGET_EFAULT
;
8123 p
= lock_user_string(arg1
);
8125 if (num
== TARGET_NR_listxattr
) {
8126 ret
= get_errno(listxattr(p
, b
, arg3
));
8128 ret
= get_errno(llistxattr(p
, b
, arg3
));
8131 ret
= -TARGET_EFAULT
;
8133 unlock_user(p
, arg1
, 0);
8134 unlock_user(b
, arg2
, arg3
);
8137 case TARGET_NR_flistxattr
:
8141 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8143 ret
= -TARGET_EFAULT
;
8147 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8148 unlock_user(b
, arg2
, arg3
);
8151 case TARGET_NR_setxattr
:
8152 case TARGET_NR_lsetxattr
:
8154 void *p
, *n
, *v
= 0;
8156 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8158 ret
= -TARGET_EFAULT
;
8162 p
= lock_user_string(arg1
);
8163 n
= lock_user_string(arg2
);
8165 if (num
== TARGET_NR_setxattr
) {
8166 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8168 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8171 ret
= -TARGET_EFAULT
;
8173 unlock_user(p
, arg1
, 0);
8174 unlock_user(n
, arg2
, 0);
8175 unlock_user(v
, arg3
, 0);
8178 case TARGET_NR_fsetxattr
:
8182 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8184 ret
= -TARGET_EFAULT
;
8188 n
= lock_user_string(arg2
);
8190 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8192 ret
= -TARGET_EFAULT
;
8194 unlock_user(n
, arg2
, 0);
8195 unlock_user(v
, arg3
, 0);
8198 case TARGET_NR_getxattr
:
8199 case TARGET_NR_lgetxattr
:
8201 void *p
, *n
, *v
= 0;
8203 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8205 ret
= -TARGET_EFAULT
;
8209 p
= lock_user_string(arg1
);
8210 n
= lock_user_string(arg2
);
8212 if (num
== TARGET_NR_getxattr
) {
8213 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8215 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8218 ret
= -TARGET_EFAULT
;
8220 unlock_user(p
, arg1
, 0);
8221 unlock_user(n
, arg2
, 0);
8222 unlock_user(v
, arg3
, arg4
);
8225 case TARGET_NR_fgetxattr
:
8229 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8231 ret
= -TARGET_EFAULT
;
8235 n
= lock_user_string(arg2
);
8237 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8239 ret
= -TARGET_EFAULT
;
8241 unlock_user(n
, arg2
, 0);
8242 unlock_user(v
, arg3
, arg4
);
8245 case TARGET_NR_removexattr
:
8246 case TARGET_NR_lremovexattr
:
8249 p
= lock_user_string(arg1
);
8250 n
= lock_user_string(arg2
);
8252 if (num
== TARGET_NR_removexattr
) {
8253 ret
= get_errno(removexattr(p
, n
));
8255 ret
= get_errno(lremovexattr(p
, n
));
8258 ret
= -TARGET_EFAULT
;
8260 unlock_user(p
, arg1
, 0);
8261 unlock_user(n
, arg2
, 0);
8264 case TARGET_NR_fremovexattr
:
8267 n
= lock_user_string(arg2
);
8269 ret
= get_errno(fremovexattr(arg1
, n
));
8271 ret
= -TARGET_EFAULT
;
8273 unlock_user(n
, arg2
, 0);
8277 #endif /* CONFIG_ATTR */
8278 #ifdef TARGET_NR_set_thread_area
8279 case TARGET_NR_set_thread_area
:
8280 #if defined(TARGET_MIPS)
8281 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8284 #elif defined(TARGET_CRIS)
8286 ret
= -TARGET_EINVAL
;
8288 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8292 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8293 ret
= do_set_thread_area(cpu_env
, arg1
);
8296 goto unimplemented_nowarn
;
8299 #ifdef TARGET_NR_get_thread_area
8300 case TARGET_NR_get_thread_area
:
8301 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8302 ret
= do_get_thread_area(cpu_env
, arg1
);
8304 goto unimplemented_nowarn
;
8307 #ifdef TARGET_NR_getdomainname
8308 case TARGET_NR_getdomainname
:
8309 goto unimplemented_nowarn
;
8312 #ifdef TARGET_NR_clock_gettime
8313 case TARGET_NR_clock_gettime
:
8316 ret
= get_errno(clock_gettime(arg1
, &ts
));
8317 if (!is_error(ret
)) {
8318 host_to_target_timespec(arg2
, &ts
);
8323 #ifdef TARGET_NR_clock_getres
8324 case TARGET_NR_clock_getres
:
8327 ret
= get_errno(clock_getres(arg1
, &ts
));
8328 if (!is_error(ret
)) {
8329 host_to_target_timespec(arg2
, &ts
);
8334 #ifdef TARGET_NR_clock_nanosleep
8335 case TARGET_NR_clock_nanosleep
:
8338 target_to_host_timespec(&ts
, arg3
);
8339 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8341 host_to_target_timespec(arg4
, &ts
);
8346 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8347 case TARGET_NR_set_tid_address
:
8348 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8352 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8353 case TARGET_NR_tkill
:
8354 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8358 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8359 case TARGET_NR_tgkill
:
8360 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8361 target_to_host_signal(arg3
)));
8365 #ifdef TARGET_NR_set_robust_list
8366 case TARGET_NR_set_robust_list
:
8367 goto unimplemented_nowarn
;
8370 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8371 case TARGET_NR_utimensat
:
8373 struct timespec
*tsp
, ts
[2];
8377 target_to_host_timespec(ts
, arg3
);
8378 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8382 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8384 if (!(p
= lock_user_string(arg2
))) {
8385 ret
= -TARGET_EFAULT
;
8388 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8389 unlock_user(p
, arg2
, 0);
8394 #if defined(CONFIG_USE_NPTL)
8395 case TARGET_NR_futex
:
8396 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8399 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8400 case TARGET_NR_inotify_init
:
8401 ret
= get_errno(sys_inotify_init());
8404 #ifdef CONFIG_INOTIFY1
8405 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8406 case TARGET_NR_inotify_init1
:
8407 ret
= get_errno(sys_inotify_init1(arg1
));
8411 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8412 case TARGET_NR_inotify_add_watch
:
8413 p
= lock_user_string(arg2
);
8414 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8415 unlock_user(p
, arg2
, 0);
8418 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8419 case TARGET_NR_inotify_rm_watch
:
8420 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8424 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8425 case TARGET_NR_mq_open
:
8427 struct mq_attr posix_mq_attr
;
8429 p
= lock_user_string(arg1
- 1);
8431 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8432 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8433 unlock_user (p
, arg1
, 0);
8437 case TARGET_NR_mq_unlink
:
8438 p
= lock_user_string(arg1
- 1);
8439 ret
= get_errno(mq_unlink(p
));
8440 unlock_user (p
, arg1
, 0);
8443 case TARGET_NR_mq_timedsend
:
8447 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8449 target_to_host_timespec(&ts
, arg5
);
8450 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8451 host_to_target_timespec(arg5
, &ts
);
8454 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8455 unlock_user (p
, arg2
, arg3
);
8459 case TARGET_NR_mq_timedreceive
:
8464 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8466 target_to_host_timespec(&ts
, arg5
);
8467 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8468 host_to_target_timespec(arg5
, &ts
);
8471 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8472 unlock_user (p
, arg2
, arg3
);
8474 put_user_u32(prio
, arg4
);
8478 /* Not implemented for now... */
8479 /* case TARGET_NR_mq_notify: */
8482 case TARGET_NR_mq_getsetattr
:
8484 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8487 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8488 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8491 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8492 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8499 #ifdef CONFIG_SPLICE
8500 #ifdef TARGET_NR_tee
8503 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8507 #ifdef TARGET_NR_splice
8508 case TARGET_NR_splice
:
8510 loff_t loff_in
, loff_out
;
8511 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8513 get_user_u64(loff_in
, arg2
);
8514 ploff_in
= &loff_in
;
8517 get_user_u64(loff_out
, arg2
);
8518 ploff_out
= &loff_out
;
8520 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8524 #ifdef TARGET_NR_vmsplice
8525 case TARGET_NR_vmsplice
:
8530 vec
= alloca(count
* sizeof(struct iovec
));
8531 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
8533 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
8534 unlock_iovec(vec
, arg2
, count
, 0);
8538 #endif /* CONFIG_SPLICE */
8539 #ifdef CONFIG_EVENTFD
8540 #if defined(TARGET_NR_eventfd)
8541 case TARGET_NR_eventfd
:
8542 ret
= get_errno(eventfd(arg1
, 0));
8545 #if defined(TARGET_NR_eventfd2)
8546 case TARGET_NR_eventfd2
:
8547 ret
= get_errno(eventfd(arg1
, arg2
));
8550 #endif /* CONFIG_EVENTFD */
8551 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8552 case TARGET_NR_fallocate
:
8553 #if TARGET_ABI_BITS == 32
8554 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8555 target_offset64(arg5
, arg6
)));
8557 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8561 #if defined(CONFIG_SYNC_FILE_RANGE)
8562 #if defined(TARGET_NR_sync_file_range)
8563 case TARGET_NR_sync_file_range
:
8564 #if TARGET_ABI_BITS == 32
8565 #if defined(TARGET_MIPS)
8566 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8567 target_offset64(arg5
, arg6
), arg7
));
8569 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8570 target_offset64(arg4
, arg5
), arg6
));
8571 #endif /* !TARGET_MIPS */
8573 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8577 #if defined(TARGET_NR_sync_file_range2)
8578 case TARGET_NR_sync_file_range2
:
8579 /* This is like sync_file_range but the arguments are reordered */
8580 #if TARGET_ABI_BITS == 32
8581 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8582 target_offset64(arg5
, arg6
), arg2
));
8584 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8589 #if defined(CONFIG_EPOLL)
8590 #if defined(TARGET_NR_epoll_create)
8591 case TARGET_NR_epoll_create
:
8592 ret
= get_errno(epoll_create(arg1
));
8595 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8596 case TARGET_NR_epoll_create1
:
8597 ret
= get_errno(epoll_create1(arg1
));
8600 #if defined(TARGET_NR_epoll_ctl)
8601 case TARGET_NR_epoll_ctl
:
8603 struct epoll_event ep
;
8604 struct epoll_event
*epp
= 0;
8606 struct target_epoll_event
*target_ep
;
8607 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8610 ep
.events
= tswap32(target_ep
->events
);
8611 /* The epoll_data_t union is just opaque data to the kernel,
8612 * so we transfer all 64 bits across and need not worry what
8613 * actual data type it is.
8615 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8616 unlock_user_struct(target_ep
, arg4
, 0);
8619 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8624 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8625 #define IMPLEMENT_EPOLL_PWAIT
8627 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8628 #if defined(TARGET_NR_epoll_wait)
8629 case TARGET_NR_epoll_wait
:
8631 #if defined(IMPLEMENT_EPOLL_PWAIT)
8632 case TARGET_NR_epoll_pwait
:
8635 struct target_epoll_event
*target_ep
;
8636 struct epoll_event
*ep
;
8638 int maxevents
= arg3
;
8641 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8642 maxevents
* sizeof(struct target_epoll_event
), 1);
8647 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8650 #if defined(IMPLEMENT_EPOLL_PWAIT)
8651 case TARGET_NR_epoll_pwait
:
8653 target_sigset_t
*target_set
;
8654 sigset_t _set
, *set
= &_set
;
8657 target_set
= lock_user(VERIFY_READ
, arg5
,
8658 sizeof(target_sigset_t
), 1);
8660 unlock_user(target_ep
, arg2
, 0);
8663 target_to_host_sigset(set
, target_set
);
8664 unlock_user(target_set
, arg5
, 0);
8669 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8673 #if defined(TARGET_NR_epoll_wait)
8674 case TARGET_NR_epoll_wait
:
8675 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8679 ret
= -TARGET_ENOSYS
;
8681 if (!is_error(ret
)) {
8683 for (i
= 0; i
< ret
; i
++) {
8684 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8685 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8688 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8693 #ifdef TARGET_NR_prlimit64
8694 case TARGET_NR_prlimit64
:
8696 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8697 struct target_rlimit64
*target_rnew
, *target_rold
;
8698 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8700 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8703 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8704 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8705 unlock_user_struct(target_rnew
, arg3
, 0);
8709 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8710 if (!is_error(ret
) && arg4
) {
8711 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8714 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8715 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8716 unlock_user_struct(target_rold
, arg4
, 1);
8723 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8724 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8725 unimplemented_nowarn
:
8727 ret
= -TARGET_ENOSYS
;
8732 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8735 print_syscall_ret(num
, ret
);
8738 ret
= -TARGET_EFAULT
;