4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include "qemu-xattr.h"
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include "linux_loop.h"
100 #include "cpu-uname.h"
104 #if defined(CONFIG_USE_NPTL)
105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
108 /* XXX: Hardcode the above values. */
109 #define CLONE_NPTL_FLAGS2 0
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 #define _syscall0(type,name) \
128 static type name (void) \
130 return syscall(__NR_##name); \
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
136 return syscall(__NR_##name, arg1); \
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
142 return syscall(__NR_##name, arg1, arg2); \
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_faccessat __NR_faccessat
176 #define __NR_sys_fchmodat __NR_fchmodat
177 #define __NR_sys_fchownat __NR_fchownat
178 #define __NR_sys_fstatat64 __NR_fstatat64
179 #define __NR_sys_futimesat __NR_futimesat
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_linkat __NR_linkat
185 #define __NR_sys_mkdirat __NR_mkdirat
186 #define __NR_sys_mknodat __NR_mknodat
187 #define __NR_sys_newfstatat __NR_newfstatat
188 #define __NR_sys_openat __NR_openat
189 #define __NR_sys_readlinkat __NR_readlinkat
190 #define __NR_sys_renameat __NR_renameat
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_symlinkat __NR_symlinkat
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_unlinkat __NR_unlinkat
197 #define __NR_sys_utimensat __NR_utimensat
198 #define __NR_sys_futex __NR_futex
199 #define __NR_sys_inotify_init __NR_inotify_init
200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
205 #define __NR__llseek __NR_lseek
209 _syscall0(int, gettid
)
211 /* This is a replacement for the host gettid() and must return a host
213 static int gettid(void) {
217 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
219 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
221 _syscall2(int, sys_getpriority
, int, which
, int, who
);
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
224 loff_t
*, res
, uint
, wh
);
226 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
227 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group
,int,error_code
)
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address
,int *,tidptr
)
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
243 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
251 unsigned long *, user_mask_ptr
);
252 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
255 static bitmask_transtbl fcntl_flags_tbl
[] = {
256 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
257 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
258 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
259 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
260 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
261 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
262 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
263 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
264 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
265 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
266 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
267 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
268 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
275 #define COPY_UTSNAME_FIELD(dest, src) \
277 /* __NEW_UTS_LEN doesn't include terminating null */ \
278 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
279 (dest)[__NEW_UTS_LEN] = '\0'; \
282 static int sys_uname(struct new_utsname
*buf
)
284 struct utsname uts_buf
;
286 if (uname(&uts_buf
) < 0)
290 * Just in case these have some differences, we
291 * translate utsname to new_utsname (which is the
292 * struct linux kernel uses).
295 memset(buf
, 0, sizeof(*buf
));
296 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
297 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
298 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
299 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
300 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
302 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
306 #undef COPY_UTSNAME_FIELD
309 static int sys_getcwd1(char *buf
, size_t size
)
311 if (getcwd(buf
, size
) == NULL
) {
312 /* getcwd() sets errno */
315 return strlen(buf
)+1;
320 * Host system seems to have atfile syscall stubs available. We
321 * now enable them one by one as specified by target syscall_nr.h.
324 #ifdef TARGET_NR_faccessat
325 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
327 return (faccessat(dirfd
, pathname
, mode
, 0));
330 #ifdef TARGET_NR_fchmodat
331 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
333 return (fchmodat(dirfd
, pathname
, mode
, 0));
336 #if defined(TARGET_NR_fchownat)
337 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
338 gid_t group
, int flags
)
340 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
343 #ifdef __NR_fstatat64
344 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
347 return (fstatat(dirfd
, pathname
, buf
, flags
));
350 #ifdef __NR_newfstatat
351 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
354 return (fstatat(dirfd
, pathname
, buf
, flags
));
357 #ifdef TARGET_NR_futimesat
358 static int sys_futimesat(int dirfd
, const char *pathname
,
359 const struct timeval times
[2])
361 return (futimesat(dirfd
, pathname
, times
));
364 #ifdef TARGET_NR_linkat
365 static int sys_linkat(int olddirfd
, const char *oldpath
,
366 int newdirfd
, const char *newpath
, int flags
)
368 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
371 #ifdef TARGET_NR_mkdirat
372 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
374 return (mkdirat(dirfd
, pathname
, mode
));
377 #ifdef TARGET_NR_mknodat
378 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
381 return (mknodat(dirfd
, pathname
, mode
, dev
));
384 #ifdef TARGET_NR_openat
385 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
388 * open(2) has extra parameter 'mode' when called with
391 if ((flags
& O_CREAT
) != 0) {
392 return (openat(dirfd
, pathname
, flags
, mode
));
394 return (openat(dirfd
, pathname
, flags
));
397 #ifdef TARGET_NR_readlinkat
398 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
400 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
403 #ifdef TARGET_NR_renameat
404 static int sys_renameat(int olddirfd
, const char *oldpath
,
405 int newdirfd
, const char *newpath
)
407 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
410 #ifdef TARGET_NR_symlinkat
411 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
413 return (symlinkat(oldpath
, newdirfd
, newpath
));
416 #ifdef TARGET_NR_unlinkat
417 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
419 return (unlinkat(dirfd
, pathname
, flags
));
422 #else /* !CONFIG_ATFILE */
425 * Try direct syscalls instead
427 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
428 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
430 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
431 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
433 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
434 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
435 uid_t
,owner
,gid_t
,group
,int,flags
)
437 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
438 defined(__NR_fstatat64)
439 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
440 struct stat
*,buf
,int,flags
)
442 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
443 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
444 const struct timeval
*,times
)
446 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
447 defined(__NR_newfstatat)
448 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
449 struct stat
*,buf
,int,flags
)
451 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
452 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
453 int,newdirfd
,const char *,newpath
,int,flags
)
455 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
456 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
458 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
459 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
460 mode_t
,mode
,dev_t
,dev
)
462 #if defined(TARGET_NR_openat) && defined(__NR_openat)
463 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
465 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
466 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
467 char *,buf
,size_t,bufsize
)
469 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
470 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
471 int,newdirfd
,const char *,newpath
)
473 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
474 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
475 int,newdirfd
,const char *,newpath
)
477 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
478 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
481 #endif /* CONFIG_ATFILE */
483 #ifdef CONFIG_UTIMENSAT
484 static int sys_utimensat(int dirfd
, const char *pathname
,
485 const struct timespec times
[2], int flags
)
487 if (pathname
== NULL
)
488 return futimens(dirfd
, times
);
490 return utimensat(dirfd
, pathname
, times
, flags
);
493 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
494 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
495 const struct timespec
*,tsp
,int,flags
)
497 #endif /* CONFIG_UTIMENSAT */
499 #ifdef CONFIG_INOTIFY
500 #include <sys/inotify.h>
502 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
503 static int sys_inotify_init(void)
505 return (inotify_init());
508 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
509 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
511 return (inotify_add_watch(fd
, pathname
, mask
));
514 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
515 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
517 return (inotify_rm_watch(fd
, wd
));
520 #ifdef CONFIG_INOTIFY1
521 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
522 static int sys_inotify_init1(int flags
)
524 return (inotify_init1(flags
));
529 /* Userspace can usually survive runtime without inotify */
530 #undef TARGET_NR_inotify_init
531 #undef TARGET_NR_inotify_init1
532 #undef TARGET_NR_inotify_add_watch
533 #undef TARGET_NR_inotify_rm_watch
534 #endif /* CONFIG_INOTIFY */
536 #if defined(TARGET_NR_ppoll)
538 # define __NR_ppoll -1
540 #define __NR_sys_ppoll __NR_ppoll
541 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
542 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
546 #if defined(TARGET_NR_pselect6)
547 #ifndef __NR_pselect6
548 # define __NR_pselect6 -1
550 #define __NR_sys_pselect6 __NR_pselect6
551 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
552 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
555 #if defined(TARGET_NR_prlimit64)
556 #ifndef __NR_prlimit64
557 # define __NR_prlimit64 -1
559 #define __NR_sys_prlimit64 __NR_prlimit64
560 /* The glibc rlimit structure may not be that used by the underlying syscall */
561 struct host_rlimit64
{
565 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
566 const struct host_rlimit64
*, new_limit
,
567 struct host_rlimit64
*, old_limit
)
570 extern int personality(int);
571 extern int flock(int, int);
572 extern int setfsuid(int);
573 extern int setfsgid(int);
574 extern int setgroups(int, gid_t
*);
576 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
578 static inline int regpairs_aligned(void *cpu_env
) {
579 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
581 #elif defined(TARGET_MIPS)
582 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
584 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
587 #define ERRNO_TABLE_SIZE 1200
589 /* target_to_host_errno_table[] is initialized from
590 * host_to_target_errno_table[] in syscall_init(). */
591 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
595 * This list is the union of errno values overridden in asm-<arch>/errno.h
596 * minus the errnos that are not actually generic to all archs.
598 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
599 [EIDRM
] = TARGET_EIDRM
,
600 [ECHRNG
] = TARGET_ECHRNG
,
601 [EL2NSYNC
] = TARGET_EL2NSYNC
,
602 [EL3HLT
] = TARGET_EL3HLT
,
603 [EL3RST
] = TARGET_EL3RST
,
604 [ELNRNG
] = TARGET_ELNRNG
,
605 [EUNATCH
] = TARGET_EUNATCH
,
606 [ENOCSI
] = TARGET_ENOCSI
,
607 [EL2HLT
] = TARGET_EL2HLT
,
608 [EDEADLK
] = TARGET_EDEADLK
,
609 [ENOLCK
] = TARGET_ENOLCK
,
610 [EBADE
] = TARGET_EBADE
,
611 [EBADR
] = TARGET_EBADR
,
612 [EXFULL
] = TARGET_EXFULL
,
613 [ENOANO
] = TARGET_ENOANO
,
614 [EBADRQC
] = TARGET_EBADRQC
,
615 [EBADSLT
] = TARGET_EBADSLT
,
616 [EBFONT
] = TARGET_EBFONT
,
617 [ENOSTR
] = TARGET_ENOSTR
,
618 [ENODATA
] = TARGET_ENODATA
,
619 [ETIME
] = TARGET_ETIME
,
620 [ENOSR
] = TARGET_ENOSR
,
621 [ENONET
] = TARGET_ENONET
,
622 [ENOPKG
] = TARGET_ENOPKG
,
623 [EREMOTE
] = TARGET_EREMOTE
,
624 [ENOLINK
] = TARGET_ENOLINK
,
625 [EADV
] = TARGET_EADV
,
626 [ESRMNT
] = TARGET_ESRMNT
,
627 [ECOMM
] = TARGET_ECOMM
,
628 [EPROTO
] = TARGET_EPROTO
,
629 [EDOTDOT
] = TARGET_EDOTDOT
,
630 [EMULTIHOP
] = TARGET_EMULTIHOP
,
631 [EBADMSG
] = TARGET_EBADMSG
,
632 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
633 [EOVERFLOW
] = TARGET_EOVERFLOW
,
634 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
635 [EBADFD
] = TARGET_EBADFD
,
636 [EREMCHG
] = TARGET_EREMCHG
,
637 [ELIBACC
] = TARGET_ELIBACC
,
638 [ELIBBAD
] = TARGET_ELIBBAD
,
639 [ELIBSCN
] = TARGET_ELIBSCN
,
640 [ELIBMAX
] = TARGET_ELIBMAX
,
641 [ELIBEXEC
] = TARGET_ELIBEXEC
,
642 [EILSEQ
] = TARGET_EILSEQ
,
643 [ENOSYS
] = TARGET_ENOSYS
,
644 [ELOOP
] = TARGET_ELOOP
,
645 [ERESTART
] = TARGET_ERESTART
,
646 [ESTRPIPE
] = TARGET_ESTRPIPE
,
647 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
648 [EUSERS
] = TARGET_EUSERS
,
649 [ENOTSOCK
] = TARGET_ENOTSOCK
,
650 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
651 [EMSGSIZE
] = TARGET_EMSGSIZE
,
652 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
653 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
654 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
655 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
656 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
657 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
658 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
659 [EADDRINUSE
] = TARGET_EADDRINUSE
,
660 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
661 [ENETDOWN
] = TARGET_ENETDOWN
,
662 [ENETUNREACH
] = TARGET_ENETUNREACH
,
663 [ENETRESET
] = TARGET_ENETRESET
,
664 [ECONNABORTED
] = TARGET_ECONNABORTED
,
665 [ECONNRESET
] = TARGET_ECONNRESET
,
666 [ENOBUFS
] = TARGET_ENOBUFS
,
667 [EISCONN
] = TARGET_EISCONN
,
668 [ENOTCONN
] = TARGET_ENOTCONN
,
669 [EUCLEAN
] = TARGET_EUCLEAN
,
670 [ENOTNAM
] = TARGET_ENOTNAM
,
671 [ENAVAIL
] = TARGET_ENAVAIL
,
672 [EISNAM
] = TARGET_EISNAM
,
673 [EREMOTEIO
] = TARGET_EREMOTEIO
,
674 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
675 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
676 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
677 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
678 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
679 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
680 [EALREADY
] = TARGET_EALREADY
,
681 [EINPROGRESS
] = TARGET_EINPROGRESS
,
682 [ESTALE
] = TARGET_ESTALE
,
683 [ECANCELED
] = TARGET_ECANCELED
,
684 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
685 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
687 [ENOKEY
] = TARGET_ENOKEY
,
690 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
693 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
696 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
699 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
701 #ifdef ENOTRECOVERABLE
702 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
706 static inline int host_to_target_errno(int err
)
708 if(host_to_target_errno_table
[err
])
709 return host_to_target_errno_table
[err
];
713 static inline int target_to_host_errno(int err
)
715 if (target_to_host_errno_table
[err
])
716 return target_to_host_errno_table
[err
];
720 static inline abi_long
get_errno(abi_long ret
)
723 return -host_to_target_errno(errno
);
728 static inline int is_error(abi_long ret
)
730 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
733 char *target_strerror(int err
)
735 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
738 return strerror(target_to_host_errno(err
));
741 static abi_ulong target_brk
;
742 static abi_ulong target_original_brk
;
743 static abi_ulong brk_page
;
745 void target_set_brk(abi_ulong new_brk
)
747 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
748 brk_page
= HOST_PAGE_ALIGN(target_brk
);
751 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
752 #define DEBUGF_BRK(message, args...)
754 /* do_brk() must return target values and target errnos. */
755 abi_long
do_brk(abi_ulong new_brk
)
757 abi_long mapped_addr
;
760 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
763 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
766 if (new_brk
< target_original_brk
) {
767 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
772 /* If the new brk is less than the highest page reserved to the
773 * target heap allocation, set it and we're almost done... */
774 if (new_brk
<= brk_page
) {
775 /* Heap contents are initialized to zero, as for anonymous
777 if (new_brk
> target_brk
) {
778 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
780 target_brk
= new_brk
;
781 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
785 /* We need to allocate more memory after the brk... Note that
786 * we don't use MAP_FIXED because that will map over the top of
787 * any existing mapping (like the one with the host libc or qemu
788 * itself); instead we treat "mapped but at wrong address" as
789 * a failure and unmap again.
791 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
792 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
793 PROT_READ
|PROT_WRITE
,
794 MAP_ANON
|MAP_PRIVATE
, 0, 0));
796 if (mapped_addr
== brk_page
) {
797 /* Heap contents are initialized to zero, as for anonymous
798 * mapped pages. Technically the new pages are already
799 * initialized to zero since they *are* anonymous mapped
800 * pages, however we have to take care with the contents that
801 * come from the remaining part of the previous page: it may
802 * contains garbage data due to a previous heap usage (grown
804 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
806 target_brk
= new_brk
;
807 brk_page
= HOST_PAGE_ALIGN(target_brk
);
808 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
811 } else if (mapped_addr
!= -1) {
812 /* Mapped but at wrong address, meaning there wasn't actually
813 * enough space for this brk.
815 target_munmap(mapped_addr
, new_alloc_size
);
817 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
820 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
823 #if defined(TARGET_ALPHA)
824 /* We (partially) emulate OSF/1 on Alpha, which requires we
825 return a proper errno, not an unchanged brk value. */
826 return -TARGET_ENOMEM
;
828 /* For everything else, return the previous break. */
832 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
833 abi_ulong target_fds_addr
,
837 abi_ulong b
, *target_fds
;
839 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
840 if (!(target_fds
= lock_user(VERIFY_READ
,
842 sizeof(abi_ulong
) * nw
,
844 return -TARGET_EFAULT
;
848 for (i
= 0; i
< nw
; i
++) {
849 /* grab the abi_ulong */
850 __get_user(b
, &target_fds
[i
]);
851 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
852 /* check the bit inside the abi_ulong */
859 unlock_user(target_fds
, target_fds_addr
, 0);
864 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
865 abi_ulong target_fds_addr
,
868 if (target_fds_addr
) {
869 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
870 return -TARGET_EFAULT
;
878 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
884 abi_ulong
*target_fds
;
886 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
887 if (!(target_fds
= lock_user(VERIFY_WRITE
,
889 sizeof(abi_ulong
) * nw
,
891 return -TARGET_EFAULT
;
894 for (i
= 0; i
< nw
; i
++) {
896 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
897 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
900 __put_user(v
, &target_fds
[i
]);
903 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
908 #if defined(__alpha__)
914 static inline abi_long
host_to_target_clock_t(long ticks
)
916 #if HOST_HZ == TARGET_HZ
919 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
923 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
924 const struct rusage
*rusage
)
926 struct target_rusage
*target_rusage
;
928 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
929 return -TARGET_EFAULT
;
930 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
931 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
932 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
933 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
934 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
935 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
936 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
937 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
938 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
939 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
940 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
941 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
942 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
943 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
944 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
945 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
946 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
947 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
948 unlock_user_struct(target_rusage
, target_addr
, 1);
953 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
955 abi_ulong target_rlim_swap
;
958 target_rlim_swap
= tswapal(target_rlim
);
959 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
960 return RLIM_INFINITY
;
962 result
= target_rlim_swap
;
963 if (target_rlim_swap
!= (rlim_t
)result
)
964 return RLIM_INFINITY
;
969 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
971 abi_ulong target_rlim_swap
;
974 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
975 target_rlim_swap
= TARGET_RLIM_INFINITY
;
977 target_rlim_swap
= rlim
;
978 result
= tswapal(target_rlim_swap
);
983 static inline int target_to_host_resource(int code
)
986 case TARGET_RLIMIT_AS
:
988 case TARGET_RLIMIT_CORE
:
990 case TARGET_RLIMIT_CPU
:
992 case TARGET_RLIMIT_DATA
:
994 case TARGET_RLIMIT_FSIZE
:
996 case TARGET_RLIMIT_LOCKS
:
998 case TARGET_RLIMIT_MEMLOCK
:
999 return RLIMIT_MEMLOCK
;
1000 case TARGET_RLIMIT_MSGQUEUE
:
1001 return RLIMIT_MSGQUEUE
;
1002 case TARGET_RLIMIT_NICE
:
1004 case TARGET_RLIMIT_NOFILE
:
1005 return RLIMIT_NOFILE
;
1006 case TARGET_RLIMIT_NPROC
:
1007 return RLIMIT_NPROC
;
1008 case TARGET_RLIMIT_RSS
:
1010 case TARGET_RLIMIT_RTPRIO
:
1011 return RLIMIT_RTPRIO
;
1012 case TARGET_RLIMIT_SIGPENDING
:
1013 return RLIMIT_SIGPENDING
;
1014 case TARGET_RLIMIT_STACK
:
1015 return RLIMIT_STACK
;
1021 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1022 abi_ulong target_tv_addr
)
1024 struct target_timeval
*target_tv
;
1026 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1027 return -TARGET_EFAULT
;
1029 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1030 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1032 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1037 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1038 const struct timeval
*tv
)
1040 struct target_timeval
*target_tv
;
1042 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1043 return -TARGET_EFAULT
;
1045 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1046 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1048 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1053 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1056 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1057 abi_ulong target_mq_attr_addr
)
1059 struct target_mq_attr
*target_mq_attr
;
1061 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1062 target_mq_attr_addr
, 1))
1063 return -TARGET_EFAULT
;
1065 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1066 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1067 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1068 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1070 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1075 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1076 const struct mq_attr
*attr
)
1078 struct target_mq_attr
*target_mq_attr
;
1080 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1081 target_mq_attr_addr
, 0))
1082 return -TARGET_EFAULT
;
1084 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1085 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1086 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1087 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1089 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1095 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1096 /* do_select() must return target values and target errnos. */
1097 static abi_long
do_select(int n
,
1098 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1099 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1101 fd_set rfds
, wfds
, efds
;
1102 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1103 struct timeval tv
, *tv_ptr
;
1106 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1110 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1114 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1119 if (target_tv_addr
) {
1120 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1121 return -TARGET_EFAULT
;
1127 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1129 if (!is_error(ret
)) {
1130 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1131 return -TARGET_EFAULT
;
1132 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1133 return -TARGET_EFAULT
;
1134 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1135 return -TARGET_EFAULT
;
1137 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1138 return -TARGET_EFAULT
;
1145 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1148 return pipe2(host_pipe
, flags
);
1154 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1155 int flags
, int is_pipe2
)
1159 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1162 return get_errno(ret
);
1164 /* Several targets have special calling conventions for the original
1165 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1167 #if defined(TARGET_ALPHA)
1168 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1169 return host_pipe
[0];
1170 #elif defined(TARGET_MIPS)
1171 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1172 return host_pipe
[0];
1173 #elif defined(TARGET_SH4)
1174 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1175 return host_pipe
[0];
1179 if (put_user_s32(host_pipe
[0], pipedes
)
1180 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1181 return -TARGET_EFAULT
;
1182 return get_errno(ret
);
1185 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1186 abi_ulong target_addr
,
1189 struct target_ip_mreqn
*target_smreqn
;
1191 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1193 return -TARGET_EFAULT
;
1194 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1195 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1196 if (len
== sizeof(struct target_ip_mreqn
))
1197 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1198 unlock_user(target_smreqn
, target_addr
, 0);
1203 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1204 abi_ulong target_addr
,
1207 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1208 sa_family_t sa_family
;
1209 struct target_sockaddr
*target_saddr
;
1211 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1213 return -TARGET_EFAULT
;
1215 sa_family
= tswap16(target_saddr
->sa_family
);
1217 /* Oops. The caller might send a incomplete sun_path; sun_path
1218 * must be terminated by \0 (see the manual page), but
1219 * unfortunately it is quite common to specify sockaddr_un
1220 * length as "strlen(x->sun_path)" while it should be
1221 * "strlen(...) + 1". We'll fix that here if needed.
1222 * Linux kernel has a similar feature.
1225 if (sa_family
== AF_UNIX
) {
1226 if (len
< unix_maxlen
&& len
> 0) {
1227 char *cp
= (char*)target_saddr
;
1229 if ( cp
[len
-1] && !cp
[len
] )
1232 if (len
> unix_maxlen
)
1236 memcpy(addr
, target_saddr
, len
);
1237 addr
->sa_family
= sa_family
;
1238 unlock_user(target_saddr
, target_addr
, 0);
1243 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1244 struct sockaddr
*addr
,
1247 struct target_sockaddr
*target_saddr
;
1249 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1251 return -TARGET_EFAULT
;
1252 memcpy(target_saddr
, addr
, len
);
1253 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1254 unlock_user(target_saddr
, target_addr
, len
);
1259 /* ??? Should this also swap msgh->name? */
1260 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1261 struct target_msghdr
*target_msgh
)
1263 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1264 abi_long msg_controllen
;
1265 abi_ulong target_cmsg_addr
;
1266 struct target_cmsghdr
*target_cmsg
;
1267 socklen_t space
= 0;
1269 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1270 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1272 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1273 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1275 return -TARGET_EFAULT
;
1277 while (cmsg
&& target_cmsg
) {
1278 void *data
= CMSG_DATA(cmsg
);
1279 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1281 int len
= tswapal(target_cmsg
->cmsg_len
)
1282 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1284 space
+= CMSG_SPACE(len
);
1285 if (space
> msgh
->msg_controllen
) {
1286 space
-= CMSG_SPACE(len
);
1287 gemu_log("Host cmsg overflow\n");
1291 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1292 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1293 cmsg
->cmsg_len
= CMSG_LEN(len
);
1295 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1296 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1297 memcpy(data
, target_data
, len
);
1299 int *fd
= (int *)data
;
1300 int *target_fd
= (int *)target_data
;
1301 int i
, numfds
= len
/ sizeof(int);
1303 for (i
= 0; i
< numfds
; i
++)
1304 fd
[i
] = tswap32(target_fd
[i
]);
1307 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1308 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1310 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1312 msgh
->msg_controllen
= space
;
1316 /* ??? Should this also swap msgh->name? */
1317 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1318 struct msghdr
*msgh
)
1320 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1321 abi_long msg_controllen
;
1322 abi_ulong target_cmsg_addr
;
1323 struct target_cmsghdr
*target_cmsg
;
1324 socklen_t space
= 0;
1326 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1327 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1329 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1330 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1332 return -TARGET_EFAULT
;
1334 while (cmsg
&& target_cmsg
) {
1335 void *data
= CMSG_DATA(cmsg
);
1336 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1338 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1340 space
+= TARGET_CMSG_SPACE(len
);
1341 if (space
> msg_controllen
) {
1342 space
-= TARGET_CMSG_SPACE(len
);
1343 gemu_log("Target cmsg overflow\n");
1347 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1348 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1349 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1351 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1352 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1353 memcpy(target_data
, data
, len
);
1355 int *fd
= (int *)data
;
1356 int *target_fd
= (int *)target_data
;
1357 int i
, numfds
= len
/ sizeof(int);
1359 for (i
= 0; i
< numfds
; i
++)
1360 target_fd
[i
] = tswap32(fd
[i
]);
1363 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1364 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1366 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1368 target_msgh
->msg_controllen
= tswapal(space
);
1372 /* do_setsockopt() Must return target values and target errnos. */
1373 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1374 abi_ulong optval_addr
, socklen_t optlen
)
1378 struct ip_mreqn
*ip_mreq
;
1379 struct ip_mreq_source
*ip_mreq_source
;
1383 /* TCP options all take an 'int' value. */
1384 if (optlen
< sizeof(uint32_t))
1385 return -TARGET_EINVAL
;
1387 if (get_user_u32(val
, optval_addr
))
1388 return -TARGET_EFAULT
;
1389 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1396 case IP_ROUTER_ALERT
:
1400 case IP_MTU_DISCOVER
:
1406 case IP_MULTICAST_TTL
:
1407 case IP_MULTICAST_LOOP
:
1409 if (optlen
>= sizeof(uint32_t)) {
1410 if (get_user_u32(val
, optval_addr
))
1411 return -TARGET_EFAULT
;
1412 } else if (optlen
>= 1) {
1413 if (get_user_u8(val
, optval_addr
))
1414 return -TARGET_EFAULT
;
1416 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1418 case IP_ADD_MEMBERSHIP
:
1419 case IP_DROP_MEMBERSHIP
:
1420 if (optlen
< sizeof (struct target_ip_mreq
) ||
1421 optlen
> sizeof (struct target_ip_mreqn
))
1422 return -TARGET_EINVAL
;
1424 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1425 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1426 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1429 case IP_BLOCK_SOURCE
:
1430 case IP_UNBLOCK_SOURCE
:
1431 case IP_ADD_SOURCE_MEMBERSHIP
:
1432 case IP_DROP_SOURCE_MEMBERSHIP
:
1433 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1434 return -TARGET_EINVAL
;
1436 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1437 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1438 unlock_user (ip_mreq_source
, optval_addr
, 0);
1445 case TARGET_SOL_SOCKET
:
1447 /* Options with 'int' argument. */
1448 case TARGET_SO_DEBUG
:
1451 case TARGET_SO_REUSEADDR
:
1452 optname
= SO_REUSEADDR
;
1454 case TARGET_SO_TYPE
:
1457 case TARGET_SO_ERROR
:
1460 case TARGET_SO_DONTROUTE
:
1461 optname
= SO_DONTROUTE
;
1463 case TARGET_SO_BROADCAST
:
1464 optname
= SO_BROADCAST
;
1466 case TARGET_SO_SNDBUF
:
1467 optname
= SO_SNDBUF
;
1469 case TARGET_SO_RCVBUF
:
1470 optname
= SO_RCVBUF
;
1472 case TARGET_SO_KEEPALIVE
:
1473 optname
= SO_KEEPALIVE
;
1475 case TARGET_SO_OOBINLINE
:
1476 optname
= SO_OOBINLINE
;
1478 case TARGET_SO_NO_CHECK
:
1479 optname
= SO_NO_CHECK
;
1481 case TARGET_SO_PRIORITY
:
1482 optname
= SO_PRIORITY
;
1485 case TARGET_SO_BSDCOMPAT
:
1486 optname
= SO_BSDCOMPAT
;
1489 case TARGET_SO_PASSCRED
:
1490 optname
= SO_PASSCRED
;
1492 case TARGET_SO_TIMESTAMP
:
1493 optname
= SO_TIMESTAMP
;
1495 case TARGET_SO_RCVLOWAT
:
1496 optname
= SO_RCVLOWAT
;
1498 case TARGET_SO_RCVTIMEO
:
1499 optname
= SO_RCVTIMEO
;
1501 case TARGET_SO_SNDTIMEO
:
1502 optname
= SO_SNDTIMEO
;
1508 if (optlen
< sizeof(uint32_t))
1509 return -TARGET_EINVAL
;
1511 if (get_user_u32(val
, optval_addr
))
1512 return -TARGET_EFAULT
;
1513 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1517 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1518 ret
= -TARGET_ENOPROTOOPT
;
1523 /* do_getsockopt() Must return target values and target errnos. */
1524 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1525 abi_ulong optval_addr
, abi_ulong optlen
)
1532 case TARGET_SOL_SOCKET
:
1535 /* These don't just return a single integer */
1536 case TARGET_SO_LINGER
:
1537 case TARGET_SO_RCVTIMEO
:
1538 case TARGET_SO_SNDTIMEO
:
1539 case TARGET_SO_PEERNAME
:
1541 case TARGET_SO_PEERCRED
: {
1544 struct target_ucred
*tcr
;
1546 if (get_user_u32(len
, optlen
)) {
1547 return -TARGET_EFAULT
;
1550 return -TARGET_EINVAL
;
1554 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1562 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1563 return -TARGET_EFAULT
;
1565 __put_user(cr
.pid
, &tcr
->pid
);
1566 __put_user(cr
.uid
, &tcr
->uid
);
1567 __put_user(cr
.gid
, &tcr
->gid
);
1568 unlock_user_struct(tcr
, optval_addr
, 1);
1569 if (put_user_u32(len
, optlen
)) {
1570 return -TARGET_EFAULT
;
1574 /* Options with 'int' argument. */
1575 case TARGET_SO_DEBUG
:
1578 case TARGET_SO_REUSEADDR
:
1579 optname
= SO_REUSEADDR
;
1581 case TARGET_SO_TYPE
:
1584 case TARGET_SO_ERROR
:
1587 case TARGET_SO_DONTROUTE
:
1588 optname
= SO_DONTROUTE
;
1590 case TARGET_SO_BROADCAST
:
1591 optname
= SO_BROADCAST
;
1593 case TARGET_SO_SNDBUF
:
1594 optname
= SO_SNDBUF
;
1596 case TARGET_SO_RCVBUF
:
1597 optname
= SO_RCVBUF
;
1599 case TARGET_SO_KEEPALIVE
:
1600 optname
= SO_KEEPALIVE
;
1602 case TARGET_SO_OOBINLINE
:
1603 optname
= SO_OOBINLINE
;
1605 case TARGET_SO_NO_CHECK
:
1606 optname
= SO_NO_CHECK
;
1608 case TARGET_SO_PRIORITY
:
1609 optname
= SO_PRIORITY
;
1612 case TARGET_SO_BSDCOMPAT
:
1613 optname
= SO_BSDCOMPAT
;
1616 case TARGET_SO_PASSCRED
:
1617 optname
= SO_PASSCRED
;
1619 case TARGET_SO_TIMESTAMP
:
1620 optname
= SO_TIMESTAMP
;
1622 case TARGET_SO_RCVLOWAT
:
1623 optname
= SO_RCVLOWAT
;
1630 /* TCP options all take an 'int' value. */
1632 if (get_user_u32(len
, optlen
))
1633 return -TARGET_EFAULT
;
1635 return -TARGET_EINVAL
;
1637 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1643 if (put_user_u32(val
, optval_addr
))
1644 return -TARGET_EFAULT
;
1646 if (put_user_u8(val
, optval_addr
))
1647 return -TARGET_EFAULT
;
1649 if (put_user_u32(len
, optlen
))
1650 return -TARGET_EFAULT
;
1657 case IP_ROUTER_ALERT
:
1661 case IP_MTU_DISCOVER
:
1667 case IP_MULTICAST_TTL
:
1668 case IP_MULTICAST_LOOP
:
1669 if (get_user_u32(len
, optlen
))
1670 return -TARGET_EFAULT
;
1672 return -TARGET_EINVAL
;
1674 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1677 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1679 if (put_user_u32(len
, optlen
)
1680 || put_user_u8(val
, optval_addr
))
1681 return -TARGET_EFAULT
;
1683 if (len
> sizeof(int))
1685 if (put_user_u32(len
, optlen
)
1686 || put_user_u32(val
, optval_addr
))
1687 return -TARGET_EFAULT
;
1691 ret
= -TARGET_ENOPROTOOPT
;
1697 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1699 ret
= -TARGET_EOPNOTSUPP
;
1706 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1707 * other lock functions have a return code of 0 for failure.
1709 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1710 int count
, int copy
)
1712 struct target_iovec
*target_vec
;
1716 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1718 return -TARGET_EFAULT
;
1719 for(i
= 0;i
< count
; i
++) {
1720 base
= tswapal(target_vec
[i
].iov_base
);
1721 vec
[i
].iov_len
= tswapal(target_vec
[i
].iov_len
);
1722 if (vec
[i
].iov_len
!= 0) {
1723 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1724 /* Don't check lock_user return value. We must call writev even
1725 if a element has invalid base address. */
1727 /* zero length pointer is ignored */
1728 vec
[i
].iov_base
= NULL
;
1731 unlock_user (target_vec
, target_addr
, 0);
1735 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1736 int count
, int copy
)
1738 struct target_iovec
*target_vec
;
1742 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1744 return -TARGET_EFAULT
;
1745 for(i
= 0;i
< count
; i
++) {
1746 if (target_vec
[i
].iov_base
) {
1747 base
= tswapal(target_vec
[i
].iov_base
);
1748 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1751 unlock_user (target_vec
, target_addr
, 0);
1756 /* do_socket() Must return target values and target errnos. */
1757 static abi_long
do_socket(int domain
, int type
, int protocol
)
1759 #if defined(TARGET_MIPS)
1761 case TARGET_SOCK_DGRAM
:
1764 case TARGET_SOCK_STREAM
:
1767 case TARGET_SOCK_RAW
:
1770 case TARGET_SOCK_RDM
:
1773 case TARGET_SOCK_SEQPACKET
:
1774 type
= SOCK_SEQPACKET
;
1776 case TARGET_SOCK_PACKET
:
1781 if (domain
== PF_NETLINK
)
1782 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1783 return get_errno(socket(domain
, type
, protocol
));
1786 /* do_bind() Must return target values and target errnos. */
1787 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1793 if ((int)addrlen
< 0) {
1794 return -TARGET_EINVAL
;
1797 addr
= alloca(addrlen
+1);
1799 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1803 return get_errno(bind(sockfd
, addr
, addrlen
));
1806 /* do_connect() Must return target values and target errnos. */
1807 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1813 if ((int)addrlen
< 0) {
1814 return -TARGET_EINVAL
;
1817 addr
= alloca(addrlen
);
1819 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1823 return get_errno(connect(sockfd
, addr
, addrlen
));
1826 /* do_sendrecvmsg() Must return target values and target errnos. */
1827 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1828 int flags
, int send
)
1831 struct target_msghdr
*msgp
;
1835 abi_ulong target_vec
;
1838 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1842 return -TARGET_EFAULT
;
1843 if (msgp
->msg_name
) {
1844 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1845 msg
.msg_name
= alloca(msg
.msg_namelen
);
1846 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1849 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1853 msg
.msg_name
= NULL
;
1854 msg
.msg_namelen
= 0;
1856 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1857 msg
.msg_control
= alloca(msg
.msg_controllen
);
1858 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1860 count
= tswapal(msgp
->msg_iovlen
);
1861 vec
= alloca(count
* sizeof(struct iovec
));
1862 target_vec
= tswapal(msgp
->msg_iov
);
1863 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1864 msg
.msg_iovlen
= count
;
1868 ret
= target_to_host_cmsg(&msg
, msgp
);
1870 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1872 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1873 if (!is_error(ret
)) {
1875 ret
= host_to_target_cmsg(msgp
, &msg
);
1880 unlock_iovec(vec
, target_vec
, count
, !send
);
1881 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1885 /* do_accept() Must return target values and target errnos. */
1886 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1887 abi_ulong target_addrlen_addr
)
1893 if (target_addr
== 0)
1894 return get_errno(accept(fd
, NULL
, NULL
));
1896 /* linux returns EINVAL if addrlen pointer is invalid */
1897 if (get_user_u32(addrlen
, target_addrlen_addr
))
1898 return -TARGET_EINVAL
;
1900 if ((int)addrlen
< 0) {
1901 return -TARGET_EINVAL
;
1904 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1905 return -TARGET_EINVAL
;
1907 addr
= alloca(addrlen
);
1909 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1910 if (!is_error(ret
)) {
1911 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1912 if (put_user_u32(addrlen
, target_addrlen_addr
))
1913 ret
= -TARGET_EFAULT
;
1918 /* do_getpeername() Must return target values and target errnos. */
1919 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1920 abi_ulong target_addrlen_addr
)
1926 if (get_user_u32(addrlen
, target_addrlen_addr
))
1927 return -TARGET_EFAULT
;
1929 if ((int)addrlen
< 0) {
1930 return -TARGET_EINVAL
;
1933 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1934 return -TARGET_EFAULT
;
1936 addr
= alloca(addrlen
);
1938 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1939 if (!is_error(ret
)) {
1940 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1941 if (put_user_u32(addrlen
, target_addrlen_addr
))
1942 ret
= -TARGET_EFAULT
;
1947 /* do_getsockname() Must return target values and target errnos. */
1948 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1949 abi_ulong target_addrlen_addr
)
1955 if (get_user_u32(addrlen
, target_addrlen_addr
))
1956 return -TARGET_EFAULT
;
1958 if ((int)addrlen
< 0) {
1959 return -TARGET_EINVAL
;
1962 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1963 return -TARGET_EFAULT
;
1965 addr
= alloca(addrlen
);
1967 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1968 if (!is_error(ret
)) {
1969 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1970 if (put_user_u32(addrlen
, target_addrlen_addr
))
1971 ret
= -TARGET_EFAULT
;
1976 /* do_socketpair() Must return target values and target errnos. */
1977 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1978 abi_ulong target_tab_addr
)
1983 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1984 if (!is_error(ret
)) {
1985 if (put_user_s32(tab
[0], target_tab_addr
)
1986 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1987 ret
= -TARGET_EFAULT
;
1992 /* do_sendto() Must return target values and target errnos. */
1993 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1994 abi_ulong target_addr
, socklen_t addrlen
)
2000 if ((int)addrlen
< 0) {
2001 return -TARGET_EINVAL
;
2004 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2006 return -TARGET_EFAULT
;
2008 addr
= alloca(addrlen
);
2009 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2011 unlock_user(host_msg
, msg
, 0);
2014 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2016 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2018 unlock_user(host_msg
, msg
, 0);
2022 /* do_recvfrom() Must return target values and target errnos. */
2023 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2024 abi_ulong target_addr
,
2025 abi_ulong target_addrlen
)
2032 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2034 return -TARGET_EFAULT
;
2036 if (get_user_u32(addrlen
, target_addrlen
)) {
2037 ret
= -TARGET_EFAULT
;
2040 if ((int)addrlen
< 0) {
2041 ret
= -TARGET_EINVAL
;
2044 addr
= alloca(addrlen
);
2045 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2047 addr
= NULL
; /* To keep compiler quiet. */
2048 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2050 if (!is_error(ret
)) {
2052 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2053 if (put_user_u32(addrlen
, target_addrlen
)) {
2054 ret
= -TARGET_EFAULT
;
2058 unlock_user(host_msg
, msg
, len
);
2061 unlock_user(host_msg
, msg
, 0);
2066 #ifdef TARGET_NR_socketcall
2067 /* do_socketcall() Must return target values and target errnos. */
2068 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2071 const int n
= sizeof(abi_ulong
);
2076 abi_ulong domain
, type
, protocol
;
2078 if (get_user_ual(domain
, vptr
)
2079 || get_user_ual(type
, vptr
+ n
)
2080 || get_user_ual(protocol
, vptr
+ 2 * n
))
2081 return -TARGET_EFAULT
;
2083 ret
= do_socket(domain
, type
, protocol
);
2089 abi_ulong target_addr
;
2092 if (get_user_ual(sockfd
, vptr
)
2093 || get_user_ual(target_addr
, vptr
+ n
)
2094 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2095 return -TARGET_EFAULT
;
2097 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2100 case SOCKOP_connect
:
2103 abi_ulong target_addr
;
2106 if (get_user_ual(sockfd
, vptr
)
2107 || get_user_ual(target_addr
, vptr
+ n
)
2108 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2109 return -TARGET_EFAULT
;
2111 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2116 abi_ulong sockfd
, backlog
;
2118 if (get_user_ual(sockfd
, vptr
)
2119 || get_user_ual(backlog
, vptr
+ n
))
2120 return -TARGET_EFAULT
;
2122 ret
= get_errno(listen(sockfd
, backlog
));
2128 abi_ulong target_addr
, target_addrlen
;
2130 if (get_user_ual(sockfd
, vptr
)
2131 || get_user_ual(target_addr
, vptr
+ n
)
2132 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2133 return -TARGET_EFAULT
;
2135 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2138 case SOCKOP_getsockname
:
2141 abi_ulong target_addr
, target_addrlen
;
2143 if (get_user_ual(sockfd
, vptr
)
2144 || get_user_ual(target_addr
, vptr
+ n
)
2145 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2146 return -TARGET_EFAULT
;
2148 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2151 case SOCKOP_getpeername
:
2154 abi_ulong target_addr
, target_addrlen
;
2156 if (get_user_ual(sockfd
, vptr
)
2157 || get_user_ual(target_addr
, vptr
+ n
)
2158 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2159 return -TARGET_EFAULT
;
2161 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2164 case SOCKOP_socketpair
:
2166 abi_ulong domain
, type
, protocol
;
2169 if (get_user_ual(domain
, vptr
)
2170 || get_user_ual(type
, vptr
+ n
)
2171 || get_user_ual(protocol
, vptr
+ 2 * n
)
2172 || get_user_ual(tab
, vptr
+ 3 * n
))
2173 return -TARGET_EFAULT
;
2175 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2185 if (get_user_ual(sockfd
, vptr
)
2186 || get_user_ual(msg
, vptr
+ n
)
2187 || get_user_ual(len
, vptr
+ 2 * n
)
2188 || get_user_ual(flags
, vptr
+ 3 * n
))
2189 return -TARGET_EFAULT
;
2191 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2201 if (get_user_ual(sockfd
, vptr
)
2202 || get_user_ual(msg
, vptr
+ n
)
2203 || get_user_ual(len
, vptr
+ 2 * n
)
2204 || get_user_ual(flags
, vptr
+ 3 * n
))
2205 return -TARGET_EFAULT
;
2207 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2219 if (get_user_ual(sockfd
, vptr
)
2220 || get_user_ual(msg
, vptr
+ n
)
2221 || get_user_ual(len
, vptr
+ 2 * n
)
2222 || get_user_ual(flags
, vptr
+ 3 * n
)
2223 || get_user_ual(addr
, vptr
+ 4 * n
)
2224 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2225 return -TARGET_EFAULT
;
2227 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2230 case SOCKOP_recvfrom
:
2239 if (get_user_ual(sockfd
, vptr
)
2240 || get_user_ual(msg
, vptr
+ n
)
2241 || get_user_ual(len
, vptr
+ 2 * n
)
2242 || get_user_ual(flags
, vptr
+ 3 * n
)
2243 || get_user_ual(addr
, vptr
+ 4 * n
)
2244 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2245 return -TARGET_EFAULT
;
2247 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2250 case SOCKOP_shutdown
:
2252 abi_ulong sockfd
, how
;
2254 if (get_user_ual(sockfd
, vptr
)
2255 || get_user_ual(how
, vptr
+ n
))
2256 return -TARGET_EFAULT
;
2258 ret
= get_errno(shutdown(sockfd
, how
));
2261 case SOCKOP_sendmsg
:
2262 case SOCKOP_recvmsg
:
2265 abi_ulong target_msg
;
2268 if (get_user_ual(fd
, vptr
)
2269 || get_user_ual(target_msg
, vptr
+ n
)
2270 || get_user_ual(flags
, vptr
+ 2 * n
))
2271 return -TARGET_EFAULT
;
2273 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2274 (num
== SOCKOP_sendmsg
));
2277 case SOCKOP_setsockopt
:
2285 if (get_user_ual(sockfd
, vptr
)
2286 || get_user_ual(level
, vptr
+ n
)
2287 || get_user_ual(optname
, vptr
+ 2 * n
)
2288 || get_user_ual(optval
, vptr
+ 3 * n
)
2289 || get_user_ual(optlen
, vptr
+ 4 * n
))
2290 return -TARGET_EFAULT
;
2292 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2295 case SOCKOP_getsockopt
:
2303 if (get_user_ual(sockfd
, vptr
)
2304 || get_user_ual(level
, vptr
+ n
)
2305 || get_user_ual(optname
, vptr
+ 2 * n
)
2306 || get_user_ual(optval
, vptr
+ 3 * n
)
2307 || get_user_ual(optlen
, vptr
+ 4 * n
))
2308 return -TARGET_EFAULT
;
2310 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2314 gemu_log("Unsupported socketcall: %d\n", num
);
2315 ret
= -TARGET_ENOSYS
;
2322 #define N_SHM_REGIONS 32
2324 static struct shm_region
{
2327 } shm_regions
[N_SHM_REGIONS
];
2329 struct target_ipc_perm
2336 unsigned short int mode
;
2337 unsigned short int __pad1
;
2338 unsigned short int __seq
;
2339 unsigned short int __pad2
;
2340 abi_ulong __unused1
;
2341 abi_ulong __unused2
;
2344 struct target_semid_ds
2346 struct target_ipc_perm sem_perm
;
2347 abi_ulong sem_otime
;
2348 abi_ulong __unused1
;
2349 abi_ulong sem_ctime
;
2350 abi_ulong __unused2
;
2351 abi_ulong sem_nsems
;
2352 abi_ulong __unused3
;
2353 abi_ulong __unused4
;
2356 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2357 abi_ulong target_addr
)
2359 struct target_ipc_perm
*target_ip
;
2360 struct target_semid_ds
*target_sd
;
2362 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2363 return -TARGET_EFAULT
;
2364 target_ip
= &(target_sd
->sem_perm
);
2365 host_ip
->__key
= tswapal(target_ip
->__key
);
2366 host_ip
->uid
= tswapal(target_ip
->uid
);
2367 host_ip
->gid
= tswapal(target_ip
->gid
);
2368 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2369 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2370 host_ip
->mode
= tswap16(target_ip
->mode
);
2371 unlock_user_struct(target_sd
, target_addr
, 0);
2375 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2376 struct ipc_perm
*host_ip
)
2378 struct target_ipc_perm
*target_ip
;
2379 struct target_semid_ds
*target_sd
;
2381 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2382 return -TARGET_EFAULT
;
2383 target_ip
= &(target_sd
->sem_perm
);
2384 target_ip
->__key
= tswapal(host_ip
->__key
);
2385 target_ip
->uid
= tswapal(host_ip
->uid
);
2386 target_ip
->gid
= tswapal(host_ip
->gid
);
2387 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2388 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2389 target_ip
->mode
= tswap16(host_ip
->mode
);
2390 unlock_user_struct(target_sd
, target_addr
, 1);
2394 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2395 abi_ulong target_addr
)
2397 struct target_semid_ds
*target_sd
;
2399 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2400 return -TARGET_EFAULT
;
2401 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2402 return -TARGET_EFAULT
;
2403 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2404 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2405 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2406 unlock_user_struct(target_sd
, target_addr
, 0);
2410 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2411 struct semid_ds
*host_sd
)
2413 struct target_semid_ds
*target_sd
;
2415 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2416 return -TARGET_EFAULT
;
2417 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2418 return -TARGET_EFAULT
;
2419 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2420 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2421 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2422 unlock_user_struct(target_sd
, target_addr
, 1);
2426 struct target_seminfo
{
2439 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2440 struct seminfo
*host_seminfo
)
2442 struct target_seminfo
*target_seminfo
;
2443 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2444 return -TARGET_EFAULT
;
2445 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2446 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2447 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2448 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2449 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2450 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2451 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2452 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2453 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2454 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2455 unlock_user_struct(target_seminfo
, target_addr
, 1);
2461 struct semid_ds
*buf
;
2462 unsigned short *array
;
2463 struct seminfo
*__buf
;
2466 union target_semun
{
2473 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2474 abi_ulong target_addr
)
2477 unsigned short *array
;
2479 struct semid_ds semid_ds
;
2482 semun
.buf
= &semid_ds
;
2484 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2486 return get_errno(ret
);
2488 nsems
= semid_ds
.sem_nsems
;
2490 *host_array
= malloc(nsems
*sizeof(unsigned short));
2491 array
= lock_user(VERIFY_READ
, target_addr
,
2492 nsems
*sizeof(unsigned short), 1);
2494 return -TARGET_EFAULT
;
2496 for(i
=0; i
<nsems
; i
++) {
2497 __get_user((*host_array
)[i
], &array
[i
]);
2499 unlock_user(array
, target_addr
, 0);
2504 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2505 unsigned short **host_array
)
2508 unsigned short *array
;
2510 struct semid_ds semid_ds
;
2513 semun
.buf
= &semid_ds
;
2515 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2517 return get_errno(ret
);
2519 nsems
= semid_ds
.sem_nsems
;
2521 array
= lock_user(VERIFY_WRITE
, target_addr
,
2522 nsems
*sizeof(unsigned short), 0);
2524 return -TARGET_EFAULT
;
2526 for(i
=0; i
<nsems
; i
++) {
2527 __put_user((*host_array
)[i
], &array
[i
]);
2530 unlock_user(array
, target_addr
, 1);
2535 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2536 union target_semun target_su
)
2539 struct semid_ds dsarg
;
2540 unsigned short *array
= NULL
;
2541 struct seminfo seminfo
;
2542 abi_long ret
= -TARGET_EINVAL
;
2549 arg
.val
= tswap32(target_su
.val
);
2550 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2551 target_su
.val
= tswap32(arg
.val
);
2555 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2559 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2560 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2567 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2571 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2572 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2578 arg
.__buf
= &seminfo
;
2579 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2580 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2588 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2595 struct target_sembuf
{
2596 unsigned short sem_num
;
2601 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2602 abi_ulong target_addr
,
2605 struct target_sembuf
*target_sembuf
;
2608 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2609 nsops
*sizeof(struct target_sembuf
), 1);
2611 return -TARGET_EFAULT
;
2613 for(i
=0; i
<nsops
; i
++) {
2614 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2615 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2616 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2619 unlock_user(target_sembuf
, target_addr
, 0);
2624 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2626 struct sembuf sops
[nsops
];
2628 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2629 return -TARGET_EFAULT
;
2631 return semop(semid
, sops
, nsops
);
2634 struct target_msqid_ds
2636 struct target_ipc_perm msg_perm
;
2637 abi_ulong msg_stime
;
2638 #if TARGET_ABI_BITS == 32
2639 abi_ulong __unused1
;
2641 abi_ulong msg_rtime
;
2642 #if TARGET_ABI_BITS == 32
2643 abi_ulong __unused2
;
2645 abi_ulong msg_ctime
;
2646 #if TARGET_ABI_BITS == 32
2647 abi_ulong __unused3
;
2649 abi_ulong __msg_cbytes
;
2651 abi_ulong msg_qbytes
;
2652 abi_ulong msg_lspid
;
2653 abi_ulong msg_lrpid
;
2654 abi_ulong __unused4
;
2655 abi_ulong __unused5
;
2658 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2659 abi_ulong target_addr
)
2661 struct target_msqid_ds
*target_md
;
2663 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2664 return -TARGET_EFAULT
;
2665 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2666 return -TARGET_EFAULT
;
2667 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2668 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2669 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2670 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2671 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2672 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2673 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2674 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2675 unlock_user_struct(target_md
, target_addr
, 0);
2679 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2680 struct msqid_ds
*host_md
)
2682 struct target_msqid_ds
*target_md
;
2684 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2685 return -TARGET_EFAULT
;
2686 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2687 return -TARGET_EFAULT
;
2688 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2689 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2690 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2691 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2692 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2693 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2694 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2695 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2696 unlock_user_struct(target_md
, target_addr
, 1);
2700 struct target_msginfo
{
2708 unsigned short int msgseg
;
2711 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2712 struct msginfo
*host_msginfo
)
2714 struct target_msginfo
*target_msginfo
;
2715 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2716 return -TARGET_EFAULT
;
2717 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2718 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2719 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2720 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2721 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2722 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2723 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2724 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2725 unlock_user_struct(target_msginfo
, target_addr
, 1);
2729 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2731 struct msqid_ds dsarg
;
2732 struct msginfo msginfo
;
2733 abi_long ret
= -TARGET_EINVAL
;
2741 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2742 return -TARGET_EFAULT
;
2743 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2744 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2745 return -TARGET_EFAULT
;
2748 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2752 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2753 if (host_to_target_msginfo(ptr
, &msginfo
))
2754 return -TARGET_EFAULT
;
2761 struct target_msgbuf
{
2766 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2767 unsigned int msgsz
, int msgflg
)
2769 struct target_msgbuf
*target_mb
;
2770 struct msgbuf
*host_mb
;
2773 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2774 return -TARGET_EFAULT
;
2775 host_mb
= malloc(msgsz
+sizeof(long));
2776 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2777 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2778 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2780 unlock_user_struct(target_mb
, msgp
, 0);
2785 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2786 unsigned int msgsz
, abi_long msgtyp
,
2789 struct target_msgbuf
*target_mb
;
2791 struct msgbuf
*host_mb
;
2794 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2795 return -TARGET_EFAULT
;
2797 host_mb
= malloc(msgsz
+sizeof(long));
2798 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapal(msgtyp
), msgflg
));
2801 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2802 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2803 if (!target_mtext
) {
2804 ret
= -TARGET_EFAULT
;
2807 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2808 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2811 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2816 unlock_user_struct(target_mb
, msgp
, 1);
2820 struct target_shmid_ds
2822 struct target_ipc_perm shm_perm
;
2823 abi_ulong shm_segsz
;
2824 abi_ulong shm_atime
;
2825 #if TARGET_ABI_BITS == 32
2826 abi_ulong __unused1
;
2828 abi_ulong shm_dtime
;
2829 #if TARGET_ABI_BITS == 32
2830 abi_ulong __unused2
;
2832 abi_ulong shm_ctime
;
2833 #if TARGET_ABI_BITS == 32
2834 abi_ulong __unused3
;
2838 abi_ulong shm_nattch
;
2839 unsigned long int __unused4
;
2840 unsigned long int __unused5
;
2843 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2844 abi_ulong target_addr
)
2846 struct target_shmid_ds
*target_sd
;
2848 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2849 return -TARGET_EFAULT
;
2850 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2851 return -TARGET_EFAULT
;
2852 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2853 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2854 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2855 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2856 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2857 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2858 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2859 unlock_user_struct(target_sd
, target_addr
, 0);
2863 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2864 struct shmid_ds
*host_sd
)
2866 struct target_shmid_ds
*target_sd
;
2868 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2869 return -TARGET_EFAULT
;
2870 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2871 return -TARGET_EFAULT
;
2872 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2873 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2874 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2875 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2876 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2877 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2878 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2879 unlock_user_struct(target_sd
, target_addr
, 1);
2883 struct target_shminfo
{
2891 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2892 struct shminfo
*host_shminfo
)
2894 struct target_shminfo
*target_shminfo
;
2895 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2896 return -TARGET_EFAULT
;
2897 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2898 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2899 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2900 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2901 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2902 unlock_user_struct(target_shminfo
, target_addr
, 1);
2906 struct target_shm_info
{
2911 abi_ulong swap_attempts
;
2912 abi_ulong swap_successes
;
2915 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2916 struct shm_info
*host_shm_info
)
2918 struct target_shm_info
*target_shm_info
;
2919 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2920 return -TARGET_EFAULT
;
2921 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2922 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2923 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2924 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2925 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2926 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2927 unlock_user_struct(target_shm_info
, target_addr
, 1);
2931 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2933 struct shmid_ds dsarg
;
2934 struct shminfo shminfo
;
2935 struct shm_info shm_info
;
2936 abi_long ret
= -TARGET_EINVAL
;
2944 if (target_to_host_shmid_ds(&dsarg
, buf
))
2945 return -TARGET_EFAULT
;
2946 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2947 if (host_to_target_shmid_ds(buf
, &dsarg
))
2948 return -TARGET_EFAULT
;
2951 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2952 if (host_to_target_shminfo(buf
, &shminfo
))
2953 return -TARGET_EFAULT
;
2956 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2957 if (host_to_target_shm_info(buf
, &shm_info
))
2958 return -TARGET_EFAULT
;
2963 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2970 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2974 struct shmid_ds shm_info
;
2977 /* find out the length of the shared memory segment */
2978 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2979 if (is_error(ret
)) {
2980 /* can't get length, bail out */
2987 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2989 abi_ulong mmap_start
;
2991 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2993 if (mmap_start
== -1) {
2995 host_raddr
= (void *)-1;
2997 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3000 if (host_raddr
== (void *)-1) {
3002 return get_errno((long)host_raddr
);
3004 raddr
=h2g((unsigned long)host_raddr
);
3006 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3007 PAGE_VALID
| PAGE_READ
|
3008 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3010 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3011 if (shm_regions
[i
].start
== 0) {
3012 shm_regions
[i
].start
= raddr
;
3013 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3023 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3027 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3028 if (shm_regions
[i
].start
== shmaddr
) {
3029 shm_regions
[i
].start
= 0;
3030 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3035 return get_errno(shmdt(g2h(shmaddr
)));
3038 #ifdef TARGET_NR_ipc
3039 /* ??? This only works with linear mappings. */
3040 /* do_ipc() must return target values and target errnos. */
3041 static abi_long
do_ipc(unsigned int call
, int first
,
3042 int second
, int third
,
3043 abi_long ptr
, abi_long fifth
)
3048 version
= call
>> 16;
3053 ret
= do_semop(first
, ptr
, second
);
3057 ret
= get_errno(semget(first
, second
, third
));
3061 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3065 ret
= get_errno(msgget(first
, second
));
3069 ret
= do_msgsnd(first
, ptr
, second
, third
);
3073 ret
= do_msgctl(first
, second
, ptr
);
3080 struct target_ipc_kludge
{
3085 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3086 ret
= -TARGET_EFAULT
;
3090 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
3092 unlock_user_struct(tmp
, ptr
, 0);
3096 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3105 raddr
= do_shmat(first
, ptr
, second
);
3106 if (is_error(raddr
))
3107 return get_errno(raddr
);
3108 if (put_user_ual(raddr
, third
))
3109 return -TARGET_EFAULT
;
3113 ret
= -TARGET_EINVAL
;
3118 ret
= do_shmdt(ptr
);
3122 /* IPC_* flag values are the same on all linux platforms */
3123 ret
= get_errno(shmget(first
, second
, third
));
3126 /* IPC_* and SHM_* command values are the same on all linux platforms */
3128 ret
= do_shmctl(first
, second
, third
);
3131 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3132 ret
= -TARGET_ENOSYS
;
3139 /* kernel structure types definitions */
3141 #define STRUCT(name, ...) STRUCT_ ## name,
3142 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3144 #include "syscall_types.h"
3147 #undef STRUCT_SPECIAL
3149 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3150 #define STRUCT_SPECIAL(name)
3151 #include "syscall_types.h"
3153 #undef STRUCT_SPECIAL
3155 typedef struct IOCTLEntry IOCTLEntry
;
3157 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3158 int fd
, abi_long cmd
, abi_long arg
);
3161 unsigned int target_cmd
;
3162 unsigned int host_cmd
;
3165 do_ioctl_fn
*do_ioctl
;
3166 const argtype arg_type
[5];
3169 #define IOC_R 0x0001
3170 #define IOC_W 0x0002
3171 #define IOC_RW (IOC_R | IOC_W)
3173 #define MAX_STRUCT_SIZE 4096
3175 #ifdef CONFIG_FIEMAP
3176 /* So fiemap access checks don't overflow on 32 bit systems.
3177 * This is very slightly smaller than the limit imposed by
3178 * the underlying kernel.
3180 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3181 / sizeof(struct fiemap_extent))
3183 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3184 int fd
, abi_long cmd
, abi_long arg
)
3186 /* The parameter for this ioctl is a struct fiemap followed
3187 * by an array of struct fiemap_extent whose size is set
3188 * in fiemap->fm_extent_count. The array is filled in by the
3191 int target_size_in
, target_size_out
;
3193 const argtype
*arg_type
= ie
->arg_type
;
3194 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3197 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3201 assert(arg_type
[0] == TYPE_PTR
);
3202 assert(ie
->access
== IOC_RW
);
3204 target_size_in
= thunk_type_size(arg_type
, 0);
3205 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3207 return -TARGET_EFAULT
;
3209 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3210 unlock_user(argptr
, arg
, 0);
3211 fm
= (struct fiemap
*)buf_temp
;
3212 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3213 return -TARGET_EINVAL
;
3216 outbufsz
= sizeof (*fm
) +
3217 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3219 if (outbufsz
> MAX_STRUCT_SIZE
) {
3220 /* We can't fit all the extents into the fixed size buffer.
3221 * Allocate one that is large enough and use it instead.
3223 fm
= malloc(outbufsz
);
3225 return -TARGET_ENOMEM
;
3227 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3230 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3231 if (!is_error(ret
)) {
3232 target_size_out
= target_size_in
;
3233 /* An extent_count of 0 means we were only counting the extents
3234 * so there are no structs to copy
3236 if (fm
->fm_extent_count
!= 0) {
3237 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3239 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3241 ret
= -TARGET_EFAULT
;
3243 /* Convert the struct fiemap */
3244 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3245 if (fm
->fm_extent_count
!= 0) {
3246 p
= argptr
+ target_size_in
;
3247 /* ...and then all the struct fiemap_extents */
3248 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3249 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3254 unlock_user(argptr
, arg
, target_size_out
);
3264 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3265 int fd
, abi_long cmd
, abi_long arg
)
3267 const argtype
*arg_type
= ie
->arg_type
;
3271 struct ifconf
*host_ifconf
;
3273 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3274 int target_ifreq_size
;
3279 abi_long target_ifc_buf
;
3283 assert(arg_type
[0] == TYPE_PTR
);
3284 assert(ie
->access
== IOC_RW
);
3287 target_size
= thunk_type_size(arg_type
, 0);
3289 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3291 return -TARGET_EFAULT
;
3292 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3293 unlock_user(argptr
, arg
, 0);
3295 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3296 target_ifc_len
= host_ifconf
->ifc_len
;
3297 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3299 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3300 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3301 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3303 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3304 if (outbufsz
> MAX_STRUCT_SIZE
) {
3305 /* We can't fit all the extents into the fixed size buffer.
3306 * Allocate one that is large enough and use it instead.
3308 host_ifconf
= malloc(outbufsz
);
3310 return -TARGET_ENOMEM
;
3312 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3315 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3317 host_ifconf
->ifc_len
= host_ifc_len
;
3318 host_ifconf
->ifc_buf
= host_ifc_buf
;
3320 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3321 if (!is_error(ret
)) {
3322 /* convert host ifc_len to target ifc_len */
3324 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3325 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3326 host_ifconf
->ifc_len
= target_ifc_len
;
3328 /* restore target ifc_buf */
3330 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3332 /* copy struct ifconf to target user */
3334 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3336 return -TARGET_EFAULT
;
3337 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3338 unlock_user(argptr
, arg
, target_size
);
3340 /* copy ifreq[] to target user */
3342 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3343 for (i
= 0; i
< nb_ifreq
; i
++) {
3344 thunk_convert(argptr
+ i
* target_ifreq_size
,
3345 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3346 ifreq_arg_type
, THUNK_TARGET
);
3348 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3358 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3359 abi_long cmd
, abi_long arg
)
3362 struct dm_ioctl
*host_dm
;
3363 abi_long guest_data
;
3364 uint32_t guest_data_size
;
3366 const argtype
*arg_type
= ie
->arg_type
;
3368 void *big_buf
= NULL
;
3372 target_size
= thunk_type_size(arg_type
, 0);
3373 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3375 ret
= -TARGET_EFAULT
;
3378 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3379 unlock_user(argptr
, arg
, 0);
3381 /* buf_temp is too small, so fetch things into a bigger buffer */
3382 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3383 memcpy(big_buf
, buf_temp
, target_size
);
3387 guest_data
= arg
+ host_dm
->data_start
;
3388 if ((guest_data
- arg
) < 0) {
3392 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3393 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3395 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3396 switch (ie
->host_cmd
) {
3398 case DM_LIST_DEVICES
:
3401 case DM_DEV_SUSPEND
:
3404 case DM_TABLE_STATUS
:
3405 case DM_TABLE_CLEAR
:
3407 case DM_LIST_VERSIONS
:
3411 case DM_DEV_SET_GEOMETRY
:
3412 /* data contains only strings */
3413 memcpy(host_data
, argptr
, guest_data_size
);
3416 memcpy(host_data
, argptr
, guest_data_size
);
3417 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3421 void *gspec
= argptr
;
3422 void *cur_data
= host_data
;
3423 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3424 int spec_size
= thunk_type_size(arg_type
, 0);
3427 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3428 struct dm_target_spec
*spec
= cur_data
;
3432 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3433 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3435 spec
->next
= sizeof(*spec
) + slen
;
3436 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3438 cur_data
+= spec
->next
;
3443 ret
= -TARGET_EINVAL
;
3446 unlock_user(argptr
, guest_data
, 0);
3448 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3449 if (!is_error(ret
)) {
3450 guest_data
= arg
+ host_dm
->data_start
;
3451 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3452 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3453 switch (ie
->host_cmd
) {
3458 case DM_DEV_SUSPEND
:
3461 case DM_TABLE_CLEAR
:
3463 case DM_DEV_SET_GEOMETRY
:
3464 /* no return data */
3466 case DM_LIST_DEVICES
:
3468 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3469 uint32_t remaining_data
= guest_data_size
;
3470 void *cur_data
= argptr
;
3471 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3472 int nl_size
= 12; /* can't use thunk_size due to alignment */
3475 uint32_t next
= nl
->next
;
3477 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3479 if (remaining_data
< nl
->next
) {
3480 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3483 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3484 strcpy(cur_data
+ nl_size
, nl
->name
);
3485 cur_data
+= nl
->next
;
3486 remaining_data
-= nl
->next
;
3490 nl
= (void*)nl
+ next
;
3495 case DM_TABLE_STATUS
:
3497 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3498 void *cur_data
= argptr
;
3499 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3500 int spec_size
= thunk_type_size(arg_type
, 0);
3503 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3504 uint32_t next
= spec
->next
;
3505 int slen
= strlen((char*)&spec
[1]) + 1;
3506 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3507 if (guest_data_size
< spec
->next
) {
3508 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3511 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3512 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3513 cur_data
= argptr
+ spec
->next
;
3514 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3520 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3521 int count
= *(uint32_t*)hdata
;
3522 uint64_t *hdev
= hdata
+ 8;
3523 uint64_t *gdev
= argptr
+ 8;
3526 *(uint32_t*)argptr
= tswap32(count
);
3527 for (i
= 0; i
< count
; i
++) {
3528 *gdev
= tswap64(*hdev
);
3534 case DM_LIST_VERSIONS
:
3536 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3537 uint32_t remaining_data
= guest_data_size
;
3538 void *cur_data
= argptr
;
3539 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3540 int vers_size
= thunk_type_size(arg_type
, 0);
3543 uint32_t next
= vers
->next
;
3545 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3547 if (remaining_data
< vers
->next
) {
3548 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3551 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3552 strcpy(cur_data
+ vers_size
, vers
->name
);
3553 cur_data
+= vers
->next
;
3554 remaining_data
-= vers
->next
;
3558 vers
= (void*)vers
+ next
;
3563 ret
= -TARGET_EINVAL
;
3566 unlock_user(argptr
, guest_data
, guest_data_size
);
3568 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3570 ret
= -TARGET_EFAULT
;
3573 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3574 unlock_user(argptr
, arg
, target_size
);
3583 static IOCTLEntry ioctl_entries
[] = {
3584 #define IOCTL(cmd, access, ...) \
3585 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3586 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3587 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3592 /* ??? Implement proper locking for ioctls. */
3593 /* do_ioctl() Must return target values and target errnos. */
3594 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3596 const IOCTLEntry
*ie
;
3597 const argtype
*arg_type
;
3599 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3605 if (ie
->target_cmd
== 0) {
3606 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3607 return -TARGET_ENOSYS
;
3609 if (ie
->target_cmd
== cmd
)
3613 arg_type
= ie
->arg_type
;
3615 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3618 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3621 switch(arg_type
[0]) {
3624 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3629 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3633 target_size
= thunk_type_size(arg_type
, 0);
3634 switch(ie
->access
) {
3636 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3637 if (!is_error(ret
)) {
3638 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3640 return -TARGET_EFAULT
;
3641 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3642 unlock_user(argptr
, arg
, target_size
);
3646 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3648 return -TARGET_EFAULT
;
3649 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3650 unlock_user(argptr
, arg
, 0);
3651 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3655 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3657 return -TARGET_EFAULT
;
3658 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3659 unlock_user(argptr
, arg
, 0);
3660 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3661 if (!is_error(ret
)) {
3662 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3664 return -TARGET_EFAULT
;
3665 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3666 unlock_user(argptr
, arg
, target_size
);
3672 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3673 (long)cmd
, arg_type
[0]);
3674 ret
= -TARGET_ENOSYS
;
3680 static const bitmask_transtbl iflag_tbl
[] = {
3681 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3682 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3683 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3684 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3685 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3686 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3687 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3688 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3689 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3690 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3691 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3692 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3693 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3694 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3698 static const bitmask_transtbl oflag_tbl
[] = {
3699 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3700 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3701 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3702 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3703 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3704 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3705 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3706 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3707 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3708 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3709 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3710 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3711 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3712 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3713 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3714 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3715 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3716 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3717 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3718 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3719 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3720 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3721 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3722 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3726 static const bitmask_transtbl cflag_tbl
[] = {
3727 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3728 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3729 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3730 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3731 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3732 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3733 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3734 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3735 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3736 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3737 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3738 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3739 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3740 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3741 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3742 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3743 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3744 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3745 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3746 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3747 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3748 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3749 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3750 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3751 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3752 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3753 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3754 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3755 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3756 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3757 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3761 static const bitmask_transtbl lflag_tbl
[] = {
3762 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3763 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3764 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3765 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3766 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3767 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3768 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3769 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3770 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3771 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3772 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3773 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3774 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3775 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3776 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3780 static void target_to_host_termios (void *dst
, const void *src
)
3782 struct host_termios
*host
= dst
;
3783 const struct target_termios
*target
= src
;
3786 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3788 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3790 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3792 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3793 host
->c_line
= target
->c_line
;
3795 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3796 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3797 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3798 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3799 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3800 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3801 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3802 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3803 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3804 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3805 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3806 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3807 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3808 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3809 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3810 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3811 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3812 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3815 static void host_to_target_termios (void *dst
, const void *src
)
3817 struct target_termios
*target
= dst
;
3818 const struct host_termios
*host
= src
;
3821 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3823 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3825 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3827 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3828 target
->c_line
= host
->c_line
;
3830 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3831 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3832 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3833 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3834 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3835 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3836 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3837 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3838 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3839 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3840 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3841 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3842 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3843 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3844 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3845 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3846 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3847 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3850 static const StructEntry struct_termios_def
= {
3851 .convert
= { host_to_target_termios
, target_to_host_termios
},
3852 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3853 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3856 static bitmask_transtbl mmap_flags_tbl
[] = {
3857 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3858 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3859 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3860 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3861 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3862 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3863 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3864 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3868 #if defined(TARGET_I386)
3870 /* NOTE: there is really one LDT for all the threads */
3871 static uint8_t *ldt_table
;
3873 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3880 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3881 if (size
> bytecount
)
3883 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3885 return -TARGET_EFAULT
;
3886 /* ??? Should this by byteswapped? */
3887 memcpy(p
, ldt_table
, size
);
3888 unlock_user(p
, ptr
, size
);
3892 /* XXX: add locking support */
3893 static abi_long
write_ldt(CPUX86State
*env
,
3894 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3896 struct target_modify_ldt_ldt_s ldt_info
;
3897 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3898 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3899 int seg_not_present
, useable
, lm
;
3900 uint32_t *lp
, entry_1
, entry_2
;
3902 if (bytecount
!= sizeof(ldt_info
))
3903 return -TARGET_EINVAL
;
3904 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3905 return -TARGET_EFAULT
;
3906 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3907 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
3908 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3909 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3910 unlock_user_struct(target_ldt_info
, ptr
, 0);
3912 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3913 return -TARGET_EINVAL
;
3914 seg_32bit
= ldt_info
.flags
& 1;
3915 contents
= (ldt_info
.flags
>> 1) & 3;
3916 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3917 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3918 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3919 useable
= (ldt_info
.flags
>> 6) & 1;
3923 lm
= (ldt_info
.flags
>> 7) & 1;
3925 if (contents
== 3) {
3927 return -TARGET_EINVAL
;
3928 if (seg_not_present
== 0)
3929 return -TARGET_EINVAL
;
3931 /* allocate the LDT */
3933 env
->ldt
.base
= target_mmap(0,
3934 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3935 PROT_READ
|PROT_WRITE
,
3936 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3937 if (env
->ldt
.base
== -1)
3938 return -TARGET_ENOMEM
;
3939 memset(g2h(env
->ldt
.base
), 0,
3940 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3941 env
->ldt
.limit
= 0xffff;
3942 ldt_table
= g2h(env
->ldt
.base
);
3945 /* NOTE: same code as Linux kernel */
3946 /* Allow LDTs to be cleared by the user. */
3947 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3950 read_exec_only
== 1 &&
3952 limit_in_pages
== 0 &&
3953 seg_not_present
== 1 &&
3961 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3962 (ldt_info
.limit
& 0x0ffff);
3963 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3964 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3965 (ldt_info
.limit
& 0xf0000) |
3966 ((read_exec_only
^ 1) << 9) |
3968 ((seg_not_present
^ 1) << 15) |
3970 (limit_in_pages
<< 23) |
3974 entry_2
|= (useable
<< 20);
3976 /* Install the new entry ... */
3978 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3979 lp
[0] = tswap32(entry_1
);
3980 lp
[1] = tswap32(entry_2
);
3984 /* specific and weird i386 syscalls */
3985 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3986 unsigned long bytecount
)
3992 ret
= read_ldt(ptr
, bytecount
);
3995 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3998 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4001 ret
= -TARGET_ENOSYS
;
4007 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4008 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4010 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4011 struct target_modify_ldt_ldt_s ldt_info
;
4012 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4013 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4014 int seg_not_present
, useable
, lm
;
4015 uint32_t *lp
, entry_1
, entry_2
;
4018 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4019 if (!target_ldt_info
)
4020 return -TARGET_EFAULT
;
4021 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4022 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4023 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4024 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4025 if (ldt_info
.entry_number
== -1) {
4026 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4027 if (gdt_table
[i
] == 0) {
4028 ldt_info
.entry_number
= i
;
4029 target_ldt_info
->entry_number
= tswap32(i
);
4034 unlock_user_struct(target_ldt_info
, ptr
, 1);
4036 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4037 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4038 return -TARGET_EINVAL
;
4039 seg_32bit
= ldt_info
.flags
& 1;
4040 contents
= (ldt_info
.flags
>> 1) & 3;
4041 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4042 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4043 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4044 useable
= (ldt_info
.flags
>> 6) & 1;
4048 lm
= (ldt_info
.flags
>> 7) & 1;
4051 if (contents
== 3) {
4052 if (seg_not_present
== 0)
4053 return -TARGET_EINVAL
;
4056 /* NOTE: same code as Linux kernel */
4057 /* Allow LDTs to be cleared by the user. */
4058 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4059 if ((contents
== 0 &&
4060 read_exec_only
== 1 &&
4062 limit_in_pages
== 0 &&
4063 seg_not_present
== 1 &&
4071 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4072 (ldt_info
.limit
& 0x0ffff);
4073 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4074 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4075 (ldt_info
.limit
& 0xf0000) |
4076 ((read_exec_only
^ 1) << 9) |
4078 ((seg_not_present
^ 1) << 15) |
4080 (limit_in_pages
<< 23) |
4085 /* Install the new entry ... */
4087 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4088 lp
[0] = tswap32(entry_1
);
4089 lp
[1] = tswap32(entry_2
);
4093 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4095 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4096 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4097 uint32_t base_addr
, limit
, flags
;
4098 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4099 int seg_not_present
, useable
, lm
;
4100 uint32_t *lp
, entry_1
, entry_2
;
4102 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4103 if (!target_ldt_info
)
4104 return -TARGET_EFAULT
;
4105 idx
= tswap32(target_ldt_info
->entry_number
);
4106 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4107 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4108 unlock_user_struct(target_ldt_info
, ptr
, 1);
4109 return -TARGET_EINVAL
;
4111 lp
= (uint32_t *)(gdt_table
+ idx
);
4112 entry_1
= tswap32(lp
[0]);
4113 entry_2
= tswap32(lp
[1]);
4115 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4116 contents
= (entry_2
>> 10) & 3;
4117 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4118 seg_32bit
= (entry_2
>> 22) & 1;
4119 limit_in_pages
= (entry_2
>> 23) & 1;
4120 useable
= (entry_2
>> 20) & 1;
4124 lm
= (entry_2
>> 21) & 1;
4126 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4127 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4128 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4129 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4130 base_addr
= (entry_1
>> 16) |
4131 (entry_2
& 0xff000000) |
4132 ((entry_2
& 0xff) << 16);
4133 target_ldt_info
->base_addr
= tswapal(base_addr
);
4134 target_ldt_info
->limit
= tswap32(limit
);
4135 target_ldt_info
->flags
= tswap32(flags
);
4136 unlock_user_struct(target_ldt_info
, ptr
, 1);
4139 #endif /* TARGET_I386 && TARGET_ABI32 */
4141 #ifndef TARGET_ABI32
4142 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4149 case TARGET_ARCH_SET_GS
:
4150 case TARGET_ARCH_SET_FS
:
4151 if (code
== TARGET_ARCH_SET_GS
)
4155 cpu_x86_load_seg(env
, idx
, 0);
4156 env
->segs
[idx
].base
= addr
;
4158 case TARGET_ARCH_GET_GS
:
4159 case TARGET_ARCH_GET_FS
:
4160 if (code
== TARGET_ARCH_GET_GS
)
4164 val
= env
->segs
[idx
].base
;
4165 if (put_user(val
, addr
, abi_ulong
))
4166 ret
= -TARGET_EFAULT
;
4169 ret
= -TARGET_EINVAL
;
4176 #endif /* defined(TARGET_I386) */
4178 #define NEW_STACK_SIZE 0x40000
4180 #if defined(CONFIG_USE_NPTL)
4182 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4185 pthread_mutex_t mutex
;
4186 pthread_cond_t cond
;
4189 abi_ulong child_tidptr
;
4190 abi_ulong parent_tidptr
;
4194 static void *clone_func(void *arg
)
4196 new_thread_info
*info
= arg
;
4202 ts
= (TaskState
*)thread_env
->opaque
;
4203 info
->tid
= gettid();
4204 env
->host_tid
= info
->tid
;
4206 if (info
->child_tidptr
)
4207 put_user_u32(info
->tid
, info
->child_tidptr
);
4208 if (info
->parent_tidptr
)
4209 put_user_u32(info
->tid
, info
->parent_tidptr
);
4210 /* Enable signals. */
4211 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4212 /* Signal to the parent that we're ready. */
4213 pthread_mutex_lock(&info
->mutex
);
4214 pthread_cond_broadcast(&info
->cond
);
4215 pthread_mutex_unlock(&info
->mutex
);
4216 /* Wait until the parent has finshed initializing the tls state. */
4217 pthread_mutex_lock(&clone_lock
);
4218 pthread_mutex_unlock(&clone_lock
);
4225 static int clone_func(void *arg
)
4227 CPUArchState
*env
= arg
;
4234 /* do_fork() Must return host values and target errnos (unlike most
4235 do_*() functions). */
4236 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4237 abi_ulong parent_tidptr
, target_ulong newtls
,
4238 abi_ulong child_tidptr
)
4242 CPUArchState
*new_env
;
4243 #if defined(CONFIG_USE_NPTL)
4244 unsigned int nptl_flags
;
4250 /* Emulate vfork() with fork() */
4251 if (flags
& CLONE_VFORK
)
4252 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4254 if (flags
& CLONE_VM
) {
4255 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4256 #if defined(CONFIG_USE_NPTL)
4257 new_thread_info info
;
4258 pthread_attr_t attr
;
4260 ts
= g_malloc0(sizeof(TaskState
));
4261 init_task_state(ts
);
4262 /* we create a new CPU instance. */
4263 new_env
= cpu_copy(env
);
4264 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4265 cpu_state_reset(new_env
);
4267 /* Init regs that differ from the parent. */
4268 cpu_clone_regs(new_env
, newsp
);
4269 new_env
->opaque
= ts
;
4270 ts
->bprm
= parent_ts
->bprm
;
4271 ts
->info
= parent_ts
->info
;
4272 #if defined(CONFIG_USE_NPTL)
4274 flags
&= ~CLONE_NPTL_FLAGS2
;
4276 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4277 ts
->child_tidptr
= child_tidptr
;
4280 if (nptl_flags
& CLONE_SETTLS
)
4281 cpu_set_tls (new_env
, newtls
);
4283 /* Grab a mutex so that thread setup appears atomic. */
4284 pthread_mutex_lock(&clone_lock
);
4286 memset(&info
, 0, sizeof(info
));
4287 pthread_mutex_init(&info
.mutex
, NULL
);
4288 pthread_mutex_lock(&info
.mutex
);
4289 pthread_cond_init(&info
.cond
, NULL
);
4291 if (nptl_flags
& CLONE_CHILD_SETTID
)
4292 info
.child_tidptr
= child_tidptr
;
4293 if (nptl_flags
& CLONE_PARENT_SETTID
)
4294 info
.parent_tidptr
= parent_tidptr
;
4296 ret
= pthread_attr_init(&attr
);
4297 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4298 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4299 /* It is not safe to deliver signals until the child has finished
4300 initializing, so temporarily block all signals. */
4301 sigfillset(&sigmask
);
4302 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4304 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4305 /* TODO: Free new CPU state if thread creation failed. */
4307 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4308 pthread_attr_destroy(&attr
);
4310 /* Wait for the child to initialize. */
4311 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4313 if (flags
& CLONE_PARENT_SETTID
)
4314 put_user_u32(ret
, parent_tidptr
);
4318 pthread_mutex_unlock(&info
.mutex
);
4319 pthread_cond_destroy(&info
.cond
);
4320 pthread_mutex_destroy(&info
.mutex
);
4321 pthread_mutex_unlock(&clone_lock
);
4323 if (flags
& CLONE_NPTL_FLAGS2
)
4325 /* This is probably going to die very quickly, but do it anyway. */
4326 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4328 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4330 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4334 /* if no CLONE_VM, we consider it is a fork */
4335 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4340 /* Child Process. */
4341 cpu_clone_regs(env
, newsp
);
4343 #if defined(CONFIG_USE_NPTL)
4344 /* There is a race condition here. The parent process could
4345 theoretically read the TID in the child process before the child
4346 tid is set. This would require using either ptrace
4347 (not implemented) or having *_tidptr to point at a shared memory
4348 mapping. We can't repeat the spinlock hack used above because
4349 the child process gets its own copy of the lock. */
4350 if (flags
& CLONE_CHILD_SETTID
)
4351 put_user_u32(gettid(), child_tidptr
);
4352 if (flags
& CLONE_PARENT_SETTID
)
4353 put_user_u32(gettid(), parent_tidptr
);
4354 ts
= (TaskState
*)env
->opaque
;
4355 if (flags
& CLONE_SETTLS
)
4356 cpu_set_tls (env
, newtls
);
4357 if (flags
& CLONE_CHILD_CLEARTID
)
4358 ts
->child_tidptr
= child_tidptr
;
4367 /* warning : doesn't handle linux specific flags... */
4368 static int target_to_host_fcntl_cmd(int cmd
)
4371 case TARGET_F_DUPFD
:
4372 case TARGET_F_GETFD
:
4373 case TARGET_F_SETFD
:
4374 case TARGET_F_GETFL
:
4375 case TARGET_F_SETFL
:
4377 case TARGET_F_GETLK
:
4379 case TARGET_F_SETLK
:
4381 case TARGET_F_SETLKW
:
4383 case TARGET_F_GETOWN
:
4385 case TARGET_F_SETOWN
:
4387 case TARGET_F_GETSIG
:
4389 case TARGET_F_SETSIG
:
4391 #if TARGET_ABI_BITS == 32
4392 case TARGET_F_GETLK64
:
4394 case TARGET_F_SETLK64
:
4396 case TARGET_F_SETLKW64
:
4399 case TARGET_F_SETLEASE
:
4401 case TARGET_F_GETLEASE
:
4403 #ifdef F_DUPFD_CLOEXEC
4404 case TARGET_F_DUPFD_CLOEXEC
:
4405 return F_DUPFD_CLOEXEC
;
4407 case TARGET_F_NOTIFY
:
4410 return -TARGET_EINVAL
;
4412 return -TARGET_EINVAL
;
4415 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4418 struct target_flock
*target_fl
;
4419 struct flock64 fl64
;
4420 struct target_flock64
*target_fl64
;
4422 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4424 if (host_cmd
== -TARGET_EINVAL
)
4428 case TARGET_F_GETLK
:
4429 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4430 return -TARGET_EFAULT
;
4431 fl
.l_type
= tswap16(target_fl
->l_type
);
4432 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4433 fl
.l_start
= tswapal(target_fl
->l_start
);
4434 fl
.l_len
= tswapal(target_fl
->l_len
);
4435 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4436 unlock_user_struct(target_fl
, arg
, 0);
4437 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4439 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4440 return -TARGET_EFAULT
;
4441 target_fl
->l_type
= tswap16(fl
.l_type
);
4442 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4443 target_fl
->l_start
= tswapal(fl
.l_start
);
4444 target_fl
->l_len
= tswapal(fl
.l_len
);
4445 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4446 unlock_user_struct(target_fl
, arg
, 1);
4450 case TARGET_F_SETLK
:
4451 case TARGET_F_SETLKW
:
4452 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4453 return -TARGET_EFAULT
;
4454 fl
.l_type
= tswap16(target_fl
->l_type
);
4455 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4456 fl
.l_start
= tswapal(target_fl
->l_start
);
4457 fl
.l_len
= tswapal(target_fl
->l_len
);
4458 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4459 unlock_user_struct(target_fl
, arg
, 0);
4460 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4463 case TARGET_F_GETLK64
:
4464 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4465 return -TARGET_EFAULT
;
4466 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4467 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4468 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4469 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4470 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4471 unlock_user_struct(target_fl64
, arg
, 0);
4472 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4474 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4475 return -TARGET_EFAULT
;
4476 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4477 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4478 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4479 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4480 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4481 unlock_user_struct(target_fl64
, arg
, 1);
4484 case TARGET_F_SETLK64
:
4485 case TARGET_F_SETLKW64
:
4486 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4487 return -TARGET_EFAULT
;
4488 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4489 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4490 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4491 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4492 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4493 unlock_user_struct(target_fl64
, arg
, 0);
4494 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4497 case TARGET_F_GETFL
:
4498 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4500 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4504 case TARGET_F_SETFL
:
4505 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4508 case TARGET_F_SETOWN
:
4509 case TARGET_F_GETOWN
:
4510 case TARGET_F_SETSIG
:
4511 case TARGET_F_GETSIG
:
4512 case TARGET_F_SETLEASE
:
4513 case TARGET_F_GETLEASE
:
4514 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4518 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4526 static inline int high2lowuid(int uid
)
4534 static inline int high2lowgid(int gid
)
4542 static inline int low2highuid(int uid
)
4544 if ((int16_t)uid
== -1)
4550 static inline int low2highgid(int gid
)
4552 if ((int16_t)gid
== -1)
4557 static inline int tswapid(int id
)
4561 #else /* !USE_UID16 */
4562 static inline int high2lowuid(int uid
)
4566 static inline int high2lowgid(int gid
)
4570 static inline int low2highuid(int uid
)
4574 static inline int low2highgid(int gid
)
4578 static inline int tswapid(int id
)
4582 #endif /* USE_UID16 */
4584 void syscall_init(void)
4587 const argtype
*arg_type
;
4591 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4592 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4593 #include "syscall_types.h"
4595 #undef STRUCT_SPECIAL
4597 /* we patch the ioctl size if necessary. We rely on the fact that
4598 no ioctl has all the bits at '1' in the size field */
4600 while (ie
->target_cmd
!= 0) {
4601 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4602 TARGET_IOC_SIZEMASK
) {
4603 arg_type
= ie
->arg_type
;
4604 if (arg_type
[0] != TYPE_PTR
) {
4605 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4610 size
= thunk_type_size(arg_type
, 0);
4611 ie
->target_cmd
= (ie
->target_cmd
&
4612 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4613 (size
<< TARGET_IOC_SIZESHIFT
);
4616 /* Build target_to_host_errno_table[] table from
4617 * host_to_target_errno_table[]. */
4618 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4619 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4621 /* automatic consistency check if same arch */
4622 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4623 (defined(__x86_64__) && defined(TARGET_X86_64))
4624 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4625 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4626 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4633 #if TARGET_ABI_BITS == 32
4634 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4636 #ifdef TARGET_WORDS_BIGENDIAN
4637 return ((uint64_t)word0
<< 32) | word1
;
4639 return ((uint64_t)word1
<< 32) | word0
;
4642 #else /* TARGET_ABI_BITS == 32 */
4643 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4647 #endif /* TARGET_ABI_BITS != 32 */
4649 #ifdef TARGET_NR_truncate64
4650 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4655 if (regpairs_aligned(cpu_env
)) {
4659 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4663 #ifdef TARGET_NR_ftruncate64
4664 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4669 if (regpairs_aligned(cpu_env
)) {
4673 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4677 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4678 abi_ulong target_addr
)
4680 struct target_timespec
*target_ts
;
4682 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4683 return -TARGET_EFAULT
;
4684 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4685 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4686 unlock_user_struct(target_ts
, target_addr
, 0);
4690 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4691 struct timespec
*host_ts
)
4693 struct target_timespec
*target_ts
;
4695 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4696 return -TARGET_EFAULT
;
4697 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4698 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4699 unlock_user_struct(target_ts
, target_addr
, 1);
4703 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4704 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4705 abi_ulong target_addr
,
4706 struct stat
*host_st
)
4709 if (((CPUARMState
*)cpu_env
)->eabi
) {
4710 struct target_eabi_stat64
*target_st
;
4712 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4713 return -TARGET_EFAULT
;
4714 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4715 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4716 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4717 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4718 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4720 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4721 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4722 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4723 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4724 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4725 __put_user(host_st
->st_size
, &target_st
->st_size
);
4726 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4727 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4728 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4729 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4730 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4731 unlock_user_struct(target_st
, target_addr
, 1);
4735 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4736 struct target_stat
*target_st
;
4738 struct target_stat64
*target_st
;
4741 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4742 return -TARGET_EFAULT
;
4743 memset(target_st
, 0, sizeof(*target_st
));
4744 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4745 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4746 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4747 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4749 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4750 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4751 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4752 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4753 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4754 /* XXX: better use of kernel struct */
4755 __put_user(host_st
->st_size
, &target_st
->st_size
);
4756 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4757 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4758 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4759 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4760 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4761 unlock_user_struct(target_st
, target_addr
, 1);
4768 #if defined(CONFIG_USE_NPTL)
4769 /* ??? Using host futex calls even when target atomic operations
4770 are not really atomic probably breaks things. However implementing
4771 futexes locally would make futexes shared between multiple processes
4772 tricky. However they're probably useless because guest atomic
4773 operations won't work either. */
4774 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4775 target_ulong uaddr2
, int val3
)
4777 struct timespec ts
, *pts
;
4780 /* ??? We assume FUTEX_* constants are the same on both host
4782 #ifdef FUTEX_CMD_MASK
4783 base_op
= op
& FUTEX_CMD_MASK
;
4791 target_to_host_timespec(pts
, timeout
);
4795 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4798 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4800 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4802 case FUTEX_CMP_REQUEUE
:
4804 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4805 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4806 But the prototype takes a `struct timespec *'; insert casts
4807 to satisfy the compiler. We do not need to tswap TIMEOUT
4808 since it's not compared to guest memory. */
4809 pts
= (struct timespec
*)(uintptr_t) timeout
;
4810 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4812 (base_op
== FUTEX_CMP_REQUEUE
4816 return -TARGET_ENOSYS
;
4821 /* Map host to target signal numbers for the wait family of syscalls.
4822 Assume all other status bits are the same. */
4823 static int host_to_target_waitstatus(int status
)
4825 if (WIFSIGNALED(status
)) {
4826 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4828 if (WIFSTOPPED(status
)) {
4829 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4835 int get_osversion(void)
4837 static int osversion
;
4838 struct new_utsname buf
;
4843 if (qemu_uname_release
&& *qemu_uname_release
) {
4844 s
= qemu_uname_release
;
4846 if (sys_uname(&buf
))
4851 for (i
= 0; i
< 3; i
++) {
4853 while (*s
>= '0' && *s
<= '9') {
4858 tmp
= (tmp
<< 8) + n
;
4867 static int open_self_maps(void *cpu_env
, int fd
)
4869 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4871 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4872 (unsigned long long)ts
->info
->stack_limit
,
4873 (unsigned long long)(ts
->stack_base
+ (TARGET_PAGE_SIZE
- 1))
4875 (unsigned long long)ts
->stack_base
);
4880 static int open_self_stat(void *cpu_env
, int fd
)
4882 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4883 abi_ulong start_stack
= ts
->info
->start_stack
;
4886 for (i
= 0; i
< 44; i
++) {
4894 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
4895 } else if (i
== 1) {
4897 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
4898 } else if (i
== 27) {
4901 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
4903 /* for the rest, there is MasterCard */
4904 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
4908 if (write(fd
, buf
, len
) != len
) {
4916 static int open_self_auxv(void *cpu_env
, int fd
)
4918 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4919 abi_ulong auxv
= ts
->info
->saved_auxv
;
4920 abi_ulong len
= ts
->info
->auxv_len
;
4924 * Auxiliary vector is stored in target process stack.
4925 * read in whole auxv vector and copy it to file
4927 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
4931 r
= write(fd
, ptr
, len
);
4938 lseek(fd
, 0, SEEK_SET
);
4939 unlock_user(ptr
, auxv
, len
);
4945 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
4948 const char *filename
;
4949 int (*fill
)(void *cpu_env
, int fd
);
4951 const struct fake_open
*fake_open
;
4952 static const struct fake_open fakes
[] = {
4953 { "/proc/self/maps", open_self_maps
},
4954 { "/proc/self/stat", open_self_stat
},
4955 { "/proc/self/auxv", open_self_auxv
},
4959 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
4960 if (!strncmp(pathname
, fake_open
->filename
,
4961 strlen(fake_open
->filename
))) {
4966 if (fake_open
->filename
) {
4968 char filename
[PATH_MAX
];
4971 /* create temporary file to map stat to */
4972 tmpdir
= getenv("TMPDIR");
4975 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
4976 fd
= mkstemp(filename
);
4982 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
4986 lseek(fd
, 0, SEEK_SET
);
4991 return get_errno(open(path(pathname
), flags
, mode
));
4994 /* do_syscall() should always have a single exit point at the end so
4995 that actions, such as logging of syscall results, can be performed.
4996 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4997 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4998 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4999 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5008 gemu_log("syscall %d", num
);
5011 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5014 case TARGET_NR_exit
:
5015 #ifdef CONFIG_USE_NPTL
5016 /* In old applications this may be used to implement _exit(2).
5017 However in threaded applictions it is used for thread termination,
5018 and _exit_group is used for application termination.
5019 Do thread termination if we have more then one thread. */
5020 /* FIXME: This probably breaks if a signal arrives. We should probably
5021 be disabling signals. */
5022 if (first_cpu
->next_cpu
) {
5024 CPUArchState
**lastp
;
5030 while (p
&& p
!= (CPUArchState
*)cpu_env
) {
5031 lastp
= &p
->next_cpu
;
5034 /* If we didn't find the CPU for this thread then something is
5038 /* Remove the CPU from the list. */
5039 *lastp
= p
->next_cpu
;
5041 ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5042 if (ts
->child_tidptr
) {
5043 put_user_u32(0, ts
->child_tidptr
);
5044 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5056 gdb_exit(cpu_env
, arg1
);
5058 ret
= 0; /* avoid warning */
5060 case TARGET_NR_read
:
5064 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5066 ret
= get_errno(read(arg1
, p
, arg3
));
5067 unlock_user(p
, arg2
, ret
);
5070 case TARGET_NR_write
:
5071 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5073 ret
= get_errno(write(arg1
, p
, arg3
));
5074 unlock_user(p
, arg2
, 0);
5076 case TARGET_NR_open
:
5077 if (!(p
= lock_user_string(arg1
)))
5079 ret
= get_errno(do_open(cpu_env
, p
,
5080 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5082 unlock_user(p
, arg1
, 0);
5084 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5085 case TARGET_NR_openat
:
5086 if (!(p
= lock_user_string(arg2
)))
5088 ret
= get_errno(sys_openat(arg1
,
5090 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5092 unlock_user(p
, arg2
, 0);
5095 case TARGET_NR_close
:
5096 ret
= get_errno(close(arg1
));
5101 case TARGET_NR_fork
:
5102 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5104 #ifdef TARGET_NR_waitpid
5105 case TARGET_NR_waitpid
:
5108 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5109 if (!is_error(ret
) && arg2
&& ret
5110 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5115 #ifdef TARGET_NR_waitid
5116 case TARGET_NR_waitid
:
5120 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5121 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5122 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5124 host_to_target_siginfo(p
, &info
);
5125 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5130 #ifdef TARGET_NR_creat /* not on alpha */
5131 case TARGET_NR_creat
:
5132 if (!(p
= lock_user_string(arg1
)))
5134 ret
= get_errno(creat(p
, arg2
));
5135 unlock_user(p
, arg1
, 0);
5138 case TARGET_NR_link
:
5141 p
= lock_user_string(arg1
);
5142 p2
= lock_user_string(arg2
);
5144 ret
= -TARGET_EFAULT
;
5146 ret
= get_errno(link(p
, p2
));
5147 unlock_user(p2
, arg2
, 0);
5148 unlock_user(p
, arg1
, 0);
5151 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5152 case TARGET_NR_linkat
:
5157 p
= lock_user_string(arg2
);
5158 p2
= lock_user_string(arg4
);
5160 ret
= -TARGET_EFAULT
;
5162 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
5163 unlock_user(p
, arg2
, 0);
5164 unlock_user(p2
, arg4
, 0);
5168 case TARGET_NR_unlink
:
5169 if (!(p
= lock_user_string(arg1
)))
5171 ret
= get_errno(unlink(p
));
5172 unlock_user(p
, arg1
, 0);
5174 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5175 case TARGET_NR_unlinkat
:
5176 if (!(p
= lock_user_string(arg2
)))
5178 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
5179 unlock_user(p
, arg2
, 0);
5182 case TARGET_NR_execve
:
5184 char **argp
, **envp
;
5187 abi_ulong guest_argp
;
5188 abi_ulong guest_envp
;
5195 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5196 if (get_user_ual(addr
, gp
))
5204 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5205 if (get_user_ual(addr
, gp
))
5212 argp
= alloca((argc
+ 1) * sizeof(void *));
5213 envp
= alloca((envc
+ 1) * sizeof(void *));
5215 for (gp
= guest_argp
, q
= argp
; gp
;
5216 gp
+= sizeof(abi_ulong
), q
++) {
5217 if (get_user_ual(addr
, gp
))
5221 if (!(*q
= lock_user_string(addr
)))
5223 total_size
+= strlen(*q
) + 1;
5227 for (gp
= guest_envp
, q
= envp
; gp
;
5228 gp
+= sizeof(abi_ulong
), q
++) {
5229 if (get_user_ual(addr
, gp
))
5233 if (!(*q
= lock_user_string(addr
)))
5235 total_size
+= strlen(*q
) + 1;
5239 /* This case will not be caught by the host's execve() if its
5240 page size is bigger than the target's. */
5241 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5242 ret
= -TARGET_E2BIG
;
5245 if (!(p
= lock_user_string(arg1
)))
5247 ret
= get_errno(execve(p
, argp
, envp
));
5248 unlock_user(p
, arg1
, 0);
5253 ret
= -TARGET_EFAULT
;
5256 for (gp
= guest_argp
, q
= argp
; *q
;
5257 gp
+= sizeof(abi_ulong
), q
++) {
5258 if (get_user_ual(addr
, gp
)
5261 unlock_user(*q
, addr
, 0);
5263 for (gp
= guest_envp
, q
= envp
; *q
;
5264 gp
+= sizeof(abi_ulong
), q
++) {
5265 if (get_user_ual(addr
, gp
)
5268 unlock_user(*q
, addr
, 0);
5272 case TARGET_NR_chdir
:
5273 if (!(p
= lock_user_string(arg1
)))
5275 ret
= get_errno(chdir(p
));
5276 unlock_user(p
, arg1
, 0);
5278 #ifdef TARGET_NR_time
5279 case TARGET_NR_time
:
5282 ret
= get_errno(time(&host_time
));
5285 && put_user_sal(host_time
, arg1
))
5290 case TARGET_NR_mknod
:
5291 if (!(p
= lock_user_string(arg1
)))
5293 ret
= get_errno(mknod(p
, arg2
, arg3
));
5294 unlock_user(p
, arg1
, 0);
5296 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5297 case TARGET_NR_mknodat
:
5298 if (!(p
= lock_user_string(arg2
)))
5300 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
5301 unlock_user(p
, arg2
, 0);
5304 case TARGET_NR_chmod
:
5305 if (!(p
= lock_user_string(arg1
)))
5307 ret
= get_errno(chmod(p
, arg2
));
5308 unlock_user(p
, arg1
, 0);
5310 #ifdef TARGET_NR_break
5311 case TARGET_NR_break
:
5314 #ifdef TARGET_NR_oldstat
5315 case TARGET_NR_oldstat
:
5318 case TARGET_NR_lseek
:
5319 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5321 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5322 /* Alpha specific */
5323 case TARGET_NR_getxpid
:
5324 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5325 ret
= get_errno(getpid());
5328 #ifdef TARGET_NR_getpid
5329 case TARGET_NR_getpid
:
5330 ret
= get_errno(getpid());
5333 case TARGET_NR_mount
:
5335 /* need to look at the data field */
5337 p
= lock_user_string(arg1
);
5338 p2
= lock_user_string(arg2
);
5339 p3
= lock_user_string(arg3
);
5340 if (!p
|| !p2
|| !p3
)
5341 ret
= -TARGET_EFAULT
;
5343 /* FIXME - arg5 should be locked, but it isn't clear how to
5344 * do that since it's not guaranteed to be a NULL-terminated
5348 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5350 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5352 unlock_user(p
, arg1
, 0);
5353 unlock_user(p2
, arg2
, 0);
5354 unlock_user(p3
, arg3
, 0);
5357 #ifdef TARGET_NR_umount
5358 case TARGET_NR_umount
:
5359 if (!(p
= lock_user_string(arg1
)))
5361 ret
= get_errno(umount(p
));
5362 unlock_user(p
, arg1
, 0);
5365 #ifdef TARGET_NR_stime /* not on alpha */
5366 case TARGET_NR_stime
:
5369 if (get_user_sal(host_time
, arg1
))
5371 ret
= get_errno(stime(&host_time
));
5375 case TARGET_NR_ptrace
:
5377 #ifdef TARGET_NR_alarm /* not on alpha */
5378 case TARGET_NR_alarm
:
5382 #ifdef TARGET_NR_oldfstat
5383 case TARGET_NR_oldfstat
:
5386 #ifdef TARGET_NR_pause /* not on alpha */
5387 case TARGET_NR_pause
:
5388 ret
= get_errno(pause());
5391 #ifdef TARGET_NR_utime
5392 case TARGET_NR_utime
:
5394 struct utimbuf tbuf
, *host_tbuf
;
5395 struct target_utimbuf
*target_tbuf
;
5397 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5399 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5400 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5401 unlock_user_struct(target_tbuf
, arg2
, 0);
5406 if (!(p
= lock_user_string(arg1
)))
5408 ret
= get_errno(utime(p
, host_tbuf
));
5409 unlock_user(p
, arg1
, 0);
5413 case TARGET_NR_utimes
:
5415 struct timeval
*tvp
, tv
[2];
5417 if (copy_from_user_timeval(&tv
[0], arg2
)
5418 || copy_from_user_timeval(&tv
[1],
5419 arg2
+ sizeof(struct target_timeval
)))
5425 if (!(p
= lock_user_string(arg1
)))
5427 ret
= get_errno(utimes(p
, tvp
));
5428 unlock_user(p
, arg1
, 0);
5431 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5432 case TARGET_NR_futimesat
:
5434 struct timeval
*tvp
, tv
[2];
5436 if (copy_from_user_timeval(&tv
[0], arg3
)
5437 || copy_from_user_timeval(&tv
[1],
5438 arg3
+ sizeof(struct target_timeval
)))
5444 if (!(p
= lock_user_string(arg2
)))
5446 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5447 unlock_user(p
, arg2
, 0);
5451 #ifdef TARGET_NR_stty
5452 case TARGET_NR_stty
:
5455 #ifdef TARGET_NR_gtty
5456 case TARGET_NR_gtty
:
5459 case TARGET_NR_access
:
5460 if (!(p
= lock_user_string(arg1
)))
5462 ret
= get_errno(access(path(p
), arg2
));
5463 unlock_user(p
, arg1
, 0);
5465 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5466 case TARGET_NR_faccessat
:
5467 if (!(p
= lock_user_string(arg2
)))
5469 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5470 unlock_user(p
, arg2
, 0);
5473 #ifdef TARGET_NR_nice /* not on alpha */
5474 case TARGET_NR_nice
:
5475 ret
= get_errno(nice(arg1
));
5478 #ifdef TARGET_NR_ftime
5479 case TARGET_NR_ftime
:
5482 case TARGET_NR_sync
:
5486 case TARGET_NR_kill
:
5487 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5489 case TARGET_NR_rename
:
5492 p
= lock_user_string(arg1
);
5493 p2
= lock_user_string(arg2
);
5495 ret
= -TARGET_EFAULT
;
5497 ret
= get_errno(rename(p
, p2
));
5498 unlock_user(p2
, arg2
, 0);
5499 unlock_user(p
, arg1
, 0);
5502 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5503 case TARGET_NR_renameat
:
5506 p
= lock_user_string(arg2
);
5507 p2
= lock_user_string(arg4
);
5509 ret
= -TARGET_EFAULT
;
5511 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5512 unlock_user(p2
, arg4
, 0);
5513 unlock_user(p
, arg2
, 0);
5517 case TARGET_NR_mkdir
:
5518 if (!(p
= lock_user_string(arg1
)))
5520 ret
= get_errno(mkdir(p
, arg2
));
5521 unlock_user(p
, arg1
, 0);
5523 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5524 case TARGET_NR_mkdirat
:
5525 if (!(p
= lock_user_string(arg2
)))
5527 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5528 unlock_user(p
, arg2
, 0);
5531 case TARGET_NR_rmdir
:
5532 if (!(p
= lock_user_string(arg1
)))
5534 ret
= get_errno(rmdir(p
));
5535 unlock_user(p
, arg1
, 0);
5538 ret
= get_errno(dup(arg1
));
5540 case TARGET_NR_pipe
:
5541 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5543 #ifdef TARGET_NR_pipe2
5544 case TARGET_NR_pipe2
:
5545 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5548 case TARGET_NR_times
:
5550 struct target_tms
*tmsp
;
5552 ret
= get_errno(times(&tms
));
5554 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5557 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5558 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5559 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5560 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5563 ret
= host_to_target_clock_t(ret
);
5566 #ifdef TARGET_NR_prof
5567 case TARGET_NR_prof
:
5570 #ifdef TARGET_NR_signal
5571 case TARGET_NR_signal
:
5574 case TARGET_NR_acct
:
5576 ret
= get_errno(acct(NULL
));
5578 if (!(p
= lock_user_string(arg1
)))
5580 ret
= get_errno(acct(path(p
)));
5581 unlock_user(p
, arg1
, 0);
5584 #ifdef TARGET_NR_umount2 /* not on alpha */
5585 case TARGET_NR_umount2
:
5586 if (!(p
= lock_user_string(arg1
)))
5588 ret
= get_errno(umount2(p
, arg2
));
5589 unlock_user(p
, arg1
, 0);
5592 #ifdef TARGET_NR_lock
5593 case TARGET_NR_lock
:
5596 case TARGET_NR_ioctl
:
5597 ret
= do_ioctl(arg1
, arg2
, arg3
);
5599 case TARGET_NR_fcntl
:
5600 ret
= do_fcntl(arg1
, arg2
, arg3
);
5602 #ifdef TARGET_NR_mpx
5606 case TARGET_NR_setpgid
:
5607 ret
= get_errno(setpgid(arg1
, arg2
));
5609 #ifdef TARGET_NR_ulimit
5610 case TARGET_NR_ulimit
:
5613 #ifdef TARGET_NR_oldolduname
5614 case TARGET_NR_oldolduname
:
5617 case TARGET_NR_umask
:
5618 ret
= get_errno(umask(arg1
));
5620 case TARGET_NR_chroot
:
5621 if (!(p
= lock_user_string(arg1
)))
5623 ret
= get_errno(chroot(p
));
5624 unlock_user(p
, arg1
, 0);
5626 case TARGET_NR_ustat
:
5628 case TARGET_NR_dup2
:
5629 ret
= get_errno(dup2(arg1
, arg2
));
5631 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5632 case TARGET_NR_dup3
:
5633 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5636 #ifdef TARGET_NR_getppid /* not on alpha */
5637 case TARGET_NR_getppid
:
5638 ret
= get_errno(getppid());
5641 case TARGET_NR_getpgrp
:
5642 ret
= get_errno(getpgrp());
5644 case TARGET_NR_setsid
:
5645 ret
= get_errno(setsid());
5647 #ifdef TARGET_NR_sigaction
5648 case TARGET_NR_sigaction
:
5650 #if defined(TARGET_ALPHA)
5651 struct target_sigaction act
, oact
, *pact
= 0;
5652 struct target_old_sigaction
*old_act
;
5654 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5656 act
._sa_handler
= old_act
->_sa_handler
;
5657 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5658 act
.sa_flags
= old_act
->sa_flags
;
5659 act
.sa_restorer
= 0;
5660 unlock_user_struct(old_act
, arg2
, 0);
5663 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5664 if (!is_error(ret
) && arg3
) {
5665 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5667 old_act
->_sa_handler
= oact
._sa_handler
;
5668 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5669 old_act
->sa_flags
= oact
.sa_flags
;
5670 unlock_user_struct(old_act
, arg3
, 1);
5672 #elif defined(TARGET_MIPS)
5673 struct target_sigaction act
, oact
, *pact
, *old_act
;
5676 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5678 act
._sa_handler
= old_act
->_sa_handler
;
5679 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5680 act
.sa_flags
= old_act
->sa_flags
;
5681 unlock_user_struct(old_act
, arg2
, 0);
5687 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5689 if (!is_error(ret
) && arg3
) {
5690 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5692 old_act
->_sa_handler
= oact
._sa_handler
;
5693 old_act
->sa_flags
= oact
.sa_flags
;
5694 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5695 old_act
->sa_mask
.sig
[1] = 0;
5696 old_act
->sa_mask
.sig
[2] = 0;
5697 old_act
->sa_mask
.sig
[3] = 0;
5698 unlock_user_struct(old_act
, arg3
, 1);
5701 struct target_old_sigaction
*old_act
;
5702 struct target_sigaction act
, oact
, *pact
;
5704 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5706 act
._sa_handler
= old_act
->_sa_handler
;
5707 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5708 act
.sa_flags
= old_act
->sa_flags
;
5709 act
.sa_restorer
= old_act
->sa_restorer
;
5710 unlock_user_struct(old_act
, arg2
, 0);
5715 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5716 if (!is_error(ret
) && arg3
) {
5717 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5719 old_act
->_sa_handler
= oact
._sa_handler
;
5720 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5721 old_act
->sa_flags
= oact
.sa_flags
;
5722 old_act
->sa_restorer
= oact
.sa_restorer
;
5723 unlock_user_struct(old_act
, arg3
, 1);
5729 case TARGET_NR_rt_sigaction
:
5731 #if defined(TARGET_ALPHA)
5732 struct target_sigaction act
, oact
, *pact
= 0;
5733 struct target_rt_sigaction
*rt_act
;
5734 /* ??? arg4 == sizeof(sigset_t). */
5736 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5738 act
._sa_handler
= rt_act
->_sa_handler
;
5739 act
.sa_mask
= rt_act
->sa_mask
;
5740 act
.sa_flags
= rt_act
->sa_flags
;
5741 act
.sa_restorer
= arg5
;
5742 unlock_user_struct(rt_act
, arg2
, 0);
5745 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5746 if (!is_error(ret
) && arg3
) {
5747 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5749 rt_act
->_sa_handler
= oact
._sa_handler
;
5750 rt_act
->sa_mask
= oact
.sa_mask
;
5751 rt_act
->sa_flags
= oact
.sa_flags
;
5752 unlock_user_struct(rt_act
, arg3
, 1);
5755 struct target_sigaction
*act
;
5756 struct target_sigaction
*oact
;
5759 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5764 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5765 ret
= -TARGET_EFAULT
;
5766 goto rt_sigaction_fail
;
5770 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5773 unlock_user_struct(act
, arg2
, 0);
5775 unlock_user_struct(oact
, arg3
, 1);
5779 #ifdef TARGET_NR_sgetmask /* not on alpha */
5780 case TARGET_NR_sgetmask
:
5783 abi_ulong target_set
;
5784 sigprocmask(0, NULL
, &cur_set
);
5785 host_to_target_old_sigset(&target_set
, &cur_set
);
5790 #ifdef TARGET_NR_ssetmask /* not on alpha */
5791 case TARGET_NR_ssetmask
:
5793 sigset_t set
, oset
, cur_set
;
5794 abi_ulong target_set
= arg1
;
5795 sigprocmask(0, NULL
, &cur_set
);
5796 target_to_host_old_sigset(&set
, &target_set
);
5797 sigorset(&set
, &set
, &cur_set
);
5798 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5799 host_to_target_old_sigset(&target_set
, &oset
);
5804 #ifdef TARGET_NR_sigprocmask
5805 case TARGET_NR_sigprocmask
:
5807 #if defined(TARGET_ALPHA)
5808 sigset_t set
, oldset
;
5813 case TARGET_SIG_BLOCK
:
5816 case TARGET_SIG_UNBLOCK
:
5819 case TARGET_SIG_SETMASK
:
5823 ret
= -TARGET_EINVAL
;
5827 target_to_host_old_sigset(&set
, &mask
);
5829 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5831 if (!is_error(ret
)) {
5832 host_to_target_old_sigset(&mask
, &oldset
);
5834 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5837 sigset_t set
, oldset
, *set_ptr
;
5842 case TARGET_SIG_BLOCK
:
5845 case TARGET_SIG_UNBLOCK
:
5848 case TARGET_SIG_SETMASK
:
5852 ret
= -TARGET_EINVAL
;
5855 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5857 target_to_host_old_sigset(&set
, p
);
5858 unlock_user(p
, arg2
, 0);
5864 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5865 if (!is_error(ret
) && arg3
) {
5866 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5868 host_to_target_old_sigset(p
, &oldset
);
5869 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5875 case TARGET_NR_rt_sigprocmask
:
5878 sigset_t set
, oldset
, *set_ptr
;
5882 case TARGET_SIG_BLOCK
:
5885 case TARGET_SIG_UNBLOCK
:
5888 case TARGET_SIG_SETMASK
:
5892 ret
= -TARGET_EINVAL
;
5895 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5897 target_to_host_sigset(&set
, p
);
5898 unlock_user(p
, arg2
, 0);
5904 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5905 if (!is_error(ret
) && arg3
) {
5906 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5908 host_to_target_sigset(p
, &oldset
);
5909 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5913 #ifdef TARGET_NR_sigpending
5914 case TARGET_NR_sigpending
:
5917 ret
= get_errno(sigpending(&set
));
5918 if (!is_error(ret
)) {
5919 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5921 host_to_target_old_sigset(p
, &set
);
5922 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5927 case TARGET_NR_rt_sigpending
:
5930 ret
= get_errno(sigpending(&set
));
5931 if (!is_error(ret
)) {
5932 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5934 host_to_target_sigset(p
, &set
);
5935 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5939 #ifdef TARGET_NR_sigsuspend
5940 case TARGET_NR_sigsuspend
:
5943 #if defined(TARGET_ALPHA)
5944 abi_ulong mask
= arg1
;
5945 target_to_host_old_sigset(&set
, &mask
);
5947 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5949 target_to_host_old_sigset(&set
, p
);
5950 unlock_user(p
, arg1
, 0);
5952 ret
= get_errno(sigsuspend(&set
));
5956 case TARGET_NR_rt_sigsuspend
:
5959 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5961 target_to_host_sigset(&set
, p
);
5962 unlock_user(p
, arg1
, 0);
5963 ret
= get_errno(sigsuspend(&set
));
5966 case TARGET_NR_rt_sigtimedwait
:
5969 struct timespec uts
, *puts
;
5972 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5974 target_to_host_sigset(&set
, p
);
5975 unlock_user(p
, arg1
, 0);
5978 target_to_host_timespec(puts
, arg3
);
5982 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5983 if (!is_error(ret
) && arg2
) {
5984 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5986 host_to_target_siginfo(p
, &uinfo
);
5987 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5991 case TARGET_NR_rt_sigqueueinfo
:
5994 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5996 target_to_host_siginfo(&uinfo
, p
);
5997 unlock_user(p
, arg1
, 0);
5998 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6001 #ifdef TARGET_NR_sigreturn
6002 case TARGET_NR_sigreturn
:
6003 /* NOTE: ret is eax, so not transcoding must be done */
6004 ret
= do_sigreturn(cpu_env
);
6007 case TARGET_NR_rt_sigreturn
:
6008 /* NOTE: ret is eax, so not transcoding must be done */
6009 ret
= do_rt_sigreturn(cpu_env
);
6011 case TARGET_NR_sethostname
:
6012 if (!(p
= lock_user_string(arg1
)))
6014 ret
= get_errno(sethostname(p
, arg2
));
6015 unlock_user(p
, arg1
, 0);
6017 case TARGET_NR_setrlimit
:
6019 int resource
= target_to_host_resource(arg1
);
6020 struct target_rlimit
*target_rlim
;
6022 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6024 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6025 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6026 unlock_user_struct(target_rlim
, arg2
, 0);
6027 ret
= get_errno(setrlimit(resource
, &rlim
));
6030 case TARGET_NR_getrlimit
:
6032 int resource
= target_to_host_resource(arg1
);
6033 struct target_rlimit
*target_rlim
;
6036 ret
= get_errno(getrlimit(resource
, &rlim
));
6037 if (!is_error(ret
)) {
6038 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6040 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6041 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6042 unlock_user_struct(target_rlim
, arg2
, 1);
6046 case TARGET_NR_getrusage
:
6048 struct rusage rusage
;
6049 ret
= get_errno(getrusage(arg1
, &rusage
));
6050 if (!is_error(ret
)) {
6051 host_to_target_rusage(arg2
, &rusage
);
6055 case TARGET_NR_gettimeofday
:
6058 ret
= get_errno(gettimeofday(&tv
, NULL
));
6059 if (!is_error(ret
)) {
6060 if (copy_to_user_timeval(arg1
, &tv
))
6065 case TARGET_NR_settimeofday
:
6068 if (copy_from_user_timeval(&tv
, arg1
))
6070 ret
= get_errno(settimeofday(&tv
, NULL
));
6073 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6074 case TARGET_NR_select
:
6076 struct target_sel_arg_struct
*sel
;
6077 abi_ulong inp
, outp
, exp
, tvp
;
6080 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6082 nsel
= tswapal(sel
->n
);
6083 inp
= tswapal(sel
->inp
);
6084 outp
= tswapal(sel
->outp
);
6085 exp
= tswapal(sel
->exp
);
6086 tvp
= tswapal(sel
->tvp
);
6087 unlock_user_struct(sel
, arg1
, 0);
6088 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6092 #ifdef TARGET_NR_pselect6
6093 case TARGET_NR_pselect6
:
6095 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6096 fd_set rfds
, wfds
, efds
;
6097 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6098 struct timespec ts
, *ts_ptr
;
6101 * The 6th arg is actually two args smashed together,
6102 * so we cannot use the C library.
6110 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6111 target_sigset_t
*target_sigset
;
6119 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6123 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6127 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6133 * This takes a timespec, and not a timeval, so we cannot
6134 * use the do_select() helper ...
6137 if (target_to_host_timespec(&ts
, ts_addr
)) {
6145 /* Extract the two packed args for the sigset */
6148 sig
.size
= _NSIG
/ 8;
6150 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6154 arg_sigset
= tswapal(arg7
[0]);
6155 arg_sigsize
= tswapal(arg7
[1]);
6156 unlock_user(arg7
, arg6
, 0);
6160 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6161 /* Like the kernel, we enforce correct size sigsets */
6162 ret
= -TARGET_EINVAL
;
6165 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6166 sizeof(*target_sigset
), 1);
6167 if (!target_sigset
) {
6170 target_to_host_sigset(&set
, target_sigset
);
6171 unlock_user(target_sigset
, arg_sigset
, 0);
6179 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6182 if (!is_error(ret
)) {
6183 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6185 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6187 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6190 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6196 case TARGET_NR_symlink
:
6199 p
= lock_user_string(arg1
);
6200 p2
= lock_user_string(arg2
);
6202 ret
= -TARGET_EFAULT
;
6204 ret
= get_errno(symlink(p
, p2
));
6205 unlock_user(p2
, arg2
, 0);
6206 unlock_user(p
, arg1
, 0);
6209 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6210 case TARGET_NR_symlinkat
:
6213 p
= lock_user_string(arg1
);
6214 p2
= lock_user_string(arg3
);
6216 ret
= -TARGET_EFAULT
;
6218 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
6219 unlock_user(p2
, arg3
, 0);
6220 unlock_user(p
, arg1
, 0);
6224 #ifdef TARGET_NR_oldlstat
6225 case TARGET_NR_oldlstat
:
6228 case TARGET_NR_readlink
:
6231 p
= lock_user_string(arg1
);
6232 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6234 ret
= -TARGET_EFAULT
;
6236 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
6237 char real
[PATH_MAX
];
6238 temp
= realpath(exec_path
,real
);
6239 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
6240 snprintf((char *)p2
, arg3
, "%s", real
);
6243 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6245 unlock_user(p2
, arg2
, ret
);
6246 unlock_user(p
, arg1
, 0);
6249 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6250 case TARGET_NR_readlinkat
:
6253 p
= lock_user_string(arg2
);
6254 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6256 ret
= -TARGET_EFAULT
;
6258 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
6259 unlock_user(p2
, arg3
, ret
);
6260 unlock_user(p
, arg2
, 0);
6264 #ifdef TARGET_NR_uselib
6265 case TARGET_NR_uselib
:
6268 #ifdef TARGET_NR_swapon
6269 case TARGET_NR_swapon
:
6270 if (!(p
= lock_user_string(arg1
)))
6272 ret
= get_errno(swapon(p
, arg2
));
6273 unlock_user(p
, arg1
, 0);
6276 case TARGET_NR_reboot
:
6277 if (!(p
= lock_user_string(arg4
)))
6279 ret
= reboot(arg1
, arg2
, arg3
, p
);
6280 unlock_user(p
, arg4
, 0);
6282 #ifdef TARGET_NR_readdir
6283 case TARGET_NR_readdir
:
6286 #ifdef TARGET_NR_mmap
6287 case TARGET_NR_mmap
:
6288 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6289 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6290 || defined(TARGET_S390X)
6293 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6294 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6302 unlock_user(v
, arg1
, 0);
6303 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6304 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6308 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6309 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6315 #ifdef TARGET_NR_mmap2
6316 case TARGET_NR_mmap2
:
6318 #define MMAP_SHIFT 12
6320 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6321 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6323 arg6
<< MMAP_SHIFT
));
6326 case TARGET_NR_munmap
:
6327 ret
= get_errno(target_munmap(arg1
, arg2
));
6329 case TARGET_NR_mprotect
:
6331 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
6332 /* Special hack to detect libc making the stack executable. */
6333 if ((arg3
& PROT_GROWSDOWN
)
6334 && arg1
>= ts
->info
->stack_limit
6335 && arg1
<= ts
->info
->start_stack
) {
6336 arg3
&= ~PROT_GROWSDOWN
;
6337 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6338 arg1
= ts
->info
->stack_limit
;
6341 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6343 #ifdef TARGET_NR_mremap
6344 case TARGET_NR_mremap
:
6345 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6348 /* ??? msync/mlock/munlock are broken for softmmu. */
6349 #ifdef TARGET_NR_msync
6350 case TARGET_NR_msync
:
6351 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6354 #ifdef TARGET_NR_mlock
6355 case TARGET_NR_mlock
:
6356 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6359 #ifdef TARGET_NR_munlock
6360 case TARGET_NR_munlock
:
6361 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6364 #ifdef TARGET_NR_mlockall
6365 case TARGET_NR_mlockall
:
6366 ret
= get_errno(mlockall(arg1
));
6369 #ifdef TARGET_NR_munlockall
6370 case TARGET_NR_munlockall
:
6371 ret
= get_errno(munlockall());
6374 case TARGET_NR_truncate
:
6375 if (!(p
= lock_user_string(arg1
)))
6377 ret
= get_errno(truncate(p
, arg2
));
6378 unlock_user(p
, arg1
, 0);
6380 case TARGET_NR_ftruncate
:
6381 ret
= get_errno(ftruncate(arg1
, arg2
));
6383 case TARGET_NR_fchmod
:
6384 ret
= get_errno(fchmod(arg1
, arg2
));
6386 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6387 case TARGET_NR_fchmodat
:
6388 if (!(p
= lock_user_string(arg2
)))
6390 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
6391 unlock_user(p
, arg2
, 0);
6394 case TARGET_NR_getpriority
:
6395 /* libc does special remapping of the return value of
6396 * sys_getpriority() so it's just easiest to call
6397 * sys_getpriority() directly rather than through libc. */
6398 ret
= get_errno(sys_getpriority(arg1
, arg2
));
6400 case TARGET_NR_setpriority
:
6401 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6403 #ifdef TARGET_NR_profil
6404 case TARGET_NR_profil
:
6407 case TARGET_NR_statfs
:
6408 if (!(p
= lock_user_string(arg1
)))
6410 ret
= get_errno(statfs(path(p
), &stfs
));
6411 unlock_user(p
, arg1
, 0);
6413 if (!is_error(ret
)) {
6414 struct target_statfs
*target_stfs
;
6416 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6418 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6419 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6420 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6421 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6422 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6423 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6424 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6425 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6426 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6427 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6428 unlock_user_struct(target_stfs
, arg2
, 1);
6431 case TARGET_NR_fstatfs
:
6432 ret
= get_errno(fstatfs(arg1
, &stfs
));
6433 goto convert_statfs
;
6434 #ifdef TARGET_NR_statfs64
6435 case TARGET_NR_statfs64
:
6436 if (!(p
= lock_user_string(arg1
)))
6438 ret
= get_errno(statfs(path(p
), &stfs
));
6439 unlock_user(p
, arg1
, 0);
6441 if (!is_error(ret
)) {
6442 struct target_statfs64
*target_stfs
;
6444 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6446 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6447 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6448 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6449 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6450 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6451 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6452 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6453 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6454 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6455 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6456 unlock_user_struct(target_stfs
, arg3
, 1);
6459 case TARGET_NR_fstatfs64
:
6460 ret
= get_errno(fstatfs(arg1
, &stfs
));
6461 goto convert_statfs64
;
6463 #ifdef TARGET_NR_ioperm
6464 case TARGET_NR_ioperm
:
6467 #ifdef TARGET_NR_socketcall
6468 case TARGET_NR_socketcall
:
6469 ret
= do_socketcall(arg1
, arg2
);
6472 #ifdef TARGET_NR_accept
6473 case TARGET_NR_accept
:
6474 ret
= do_accept(arg1
, arg2
, arg3
);
6477 #ifdef TARGET_NR_bind
6478 case TARGET_NR_bind
:
6479 ret
= do_bind(arg1
, arg2
, arg3
);
6482 #ifdef TARGET_NR_connect
6483 case TARGET_NR_connect
:
6484 ret
= do_connect(arg1
, arg2
, arg3
);
6487 #ifdef TARGET_NR_getpeername
6488 case TARGET_NR_getpeername
:
6489 ret
= do_getpeername(arg1
, arg2
, arg3
);
6492 #ifdef TARGET_NR_getsockname
6493 case TARGET_NR_getsockname
:
6494 ret
= do_getsockname(arg1
, arg2
, arg3
);
6497 #ifdef TARGET_NR_getsockopt
6498 case TARGET_NR_getsockopt
:
6499 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6502 #ifdef TARGET_NR_listen
6503 case TARGET_NR_listen
:
6504 ret
= get_errno(listen(arg1
, arg2
));
6507 #ifdef TARGET_NR_recv
6508 case TARGET_NR_recv
:
6509 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6512 #ifdef TARGET_NR_recvfrom
6513 case TARGET_NR_recvfrom
:
6514 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6517 #ifdef TARGET_NR_recvmsg
6518 case TARGET_NR_recvmsg
:
6519 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6522 #ifdef TARGET_NR_send
6523 case TARGET_NR_send
:
6524 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6527 #ifdef TARGET_NR_sendmsg
6528 case TARGET_NR_sendmsg
:
6529 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6532 #ifdef TARGET_NR_sendto
6533 case TARGET_NR_sendto
:
6534 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6537 #ifdef TARGET_NR_shutdown
6538 case TARGET_NR_shutdown
:
6539 ret
= get_errno(shutdown(arg1
, arg2
));
6542 #ifdef TARGET_NR_socket
6543 case TARGET_NR_socket
:
6544 ret
= do_socket(arg1
, arg2
, arg3
);
6547 #ifdef TARGET_NR_socketpair
6548 case TARGET_NR_socketpair
:
6549 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6552 #ifdef TARGET_NR_setsockopt
6553 case TARGET_NR_setsockopt
:
6554 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6558 case TARGET_NR_syslog
:
6559 if (!(p
= lock_user_string(arg2
)))
6561 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6562 unlock_user(p
, arg2
, 0);
6565 case TARGET_NR_setitimer
:
6567 struct itimerval value
, ovalue
, *pvalue
;
6571 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6572 || copy_from_user_timeval(&pvalue
->it_value
,
6573 arg2
+ sizeof(struct target_timeval
)))
6578 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6579 if (!is_error(ret
) && arg3
) {
6580 if (copy_to_user_timeval(arg3
,
6581 &ovalue
.it_interval
)
6582 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6588 case TARGET_NR_getitimer
:
6590 struct itimerval value
;
6592 ret
= get_errno(getitimer(arg1
, &value
));
6593 if (!is_error(ret
) && arg2
) {
6594 if (copy_to_user_timeval(arg2
,
6596 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6602 case TARGET_NR_stat
:
6603 if (!(p
= lock_user_string(arg1
)))
6605 ret
= get_errno(stat(path(p
), &st
));
6606 unlock_user(p
, arg1
, 0);
6608 case TARGET_NR_lstat
:
6609 if (!(p
= lock_user_string(arg1
)))
6611 ret
= get_errno(lstat(path(p
), &st
));
6612 unlock_user(p
, arg1
, 0);
6614 case TARGET_NR_fstat
:
6616 ret
= get_errno(fstat(arg1
, &st
));
6618 if (!is_error(ret
)) {
6619 struct target_stat
*target_st
;
6621 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6623 memset(target_st
, 0, sizeof(*target_st
));
6624 __put_user(st
.st_dev
, &target_st
->st_dev
);
6625 __put_user(st
.st_ino
, &target_st
->st_ino
);
6626 __put_user(st
.st_mode
, &target_st
->st_mode
);
6627 __put_user(st
.st_uid
, &target_st
->st_uid
);
6628 __put_user(st
.st_gid
, &target_st
->st_gid
);
6629 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6630 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6631 __put_user(st
.st_size
, &target_st
->st_size
);
6632 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6633 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6634 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6635 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6636 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6637 unlock_user_struct(target_st
, arg2
, 1);
6641 #ifdef TARGET_NR_olduname
6642 case TARGET_NR_olduname
:
6645 #ifdef TARGET_NR_iopl
6646 case TARGET_NR_iopl
:
6649 case TARGET_NR_vhangup
:
6650 ret
= get_errno(vhangup());
6652 #ifdef TARGET_NR_idle
6653 case TARGET_NR_idle
:
6656 #ifdef TARGET_NR_syscall
6657 case TARGET_NR_syscall
:
6658 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6659 arg6
, arg7
, arg8
, 0);
6662 case TARGET_NR_wait4
:
6665 abi_long status_ptr
= arg2
;
6666 struct rusage rusage
, *rusage_ptr
;
6667 abi_ulong target_rusage
= arg4
;
6669 rusage_ptr
= &rusage
;
6672 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6673 if (!is_error(ret
)) {
6674 if (status_ptr
&& ret
) {
6675 status
= host_to_target_waitstatus(status
);
6676 if (put_user_s32(status
, status_ptr
))
6680 host_to_target_rusage(target_rusage
, &rusage
);
6684 #ifdef TARGET_NR_swapoff
6685 case TARGET_NR_swapoff
:
6686 if (!(p
= lock_user_string(arg1
)))
6688 ret
= get_errno(swapoff(p
));
6689 unlock_user(p
, arg1
, 0);
6692 case TARGET_NR_sysinfo
:
6694 struct target_sysinfo
*target_value
;
6695 struct sysinfo value
;
6696 ret
= get_errno(sysinfo(&value
));
6697 if (!is_error(ret
) && arg1
)
6699 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6701 __put_user(value
.uptime
, &target_value
->uptime
);
6702 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6703 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6704 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6705 __put_user(value
.totalram
, &target_value
->totalram
);
6706 __put_user(value
.freeram
, &target_value
->freeram
);
6707 __put_user(value
.sharedram
, &target_value
->sharedram
);
6708 __put_user(value
.bufferram
, &target_value
->bufferram
);
6709 __put_user(value
.totalswap
, &target_value
->totalswap
);
6710 __put_user(value
.freeswap
, &target_value
->freeswap
);
6711 __put_user(value
.procs
, &target_value
->procs
);
6712 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6713 __put_user(value
.freehigh
, &target_value
->freehigh
);
6714 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6715 unlock_user_struct(target_value
, arg1
, 1);
6719 #ifdef TARGET_NR_ipc
6721 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6724 #ifdef TARGET_NR_semget
6725 case TARGET_NR_semget
:
6726 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6729 #ifdef TARGET_NR_semop
6730 case TARGET_NR_semop
:
6731 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6734 #ifdef TARGET_NR_semctl
6735 case TARGET_NR_semctl
:
6736 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6739 #ifdef TARGET_NR_msgctl
6740 case TARGET_NR_msgctl
:
6741 ret
= do_msgctl(arg1
, arg2
, arg3
);
6744 #ifdef TARGET_NR_msgget
6745 case TARGET_NR_msgget
:
6746 ret
= get_errno(msgget(arg1
, arg2
));
6749 #ifdef TARGET_NR_msgrcv
6750 case TARGET_NR_msgrcv
:
6751 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6754 #ifdef TARGET_NR_msgsnd
6755 case TARGET_NR_msgsnd
:
6756 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6759 #ifdef TARGET_NR_shmget
6760 case TARGET_NR_shmget
:
6761 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6764 #ifdef TARGET_NR_shmctl
6765 case TARGET_NR_shmctl
:
6766 ret
= do_shmctl(arg1
, arg2
, arg3
);
6769 #ifdef TARGET_NR_shmat
6770 case TARGET_NR_shmat
:
6771 ret
= do_shmat(arg1
, arg2
, arg3
);
6774 #ifdef TARGET_NR_shmdt
6775 case TARGET_NR_shmdt
:
6776 ret
= do_shmdt(arg1
);
6779 case TARGET_NR_fsync
:
6780 ret
= get_errno(fsync(arg1
));
6782 case TARGET_NR_clone
:
6783 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6784 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6785 #elif defined(TARGET_CRIS)
6786 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6787 #elif defined(TARGET_S390X)
6788 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6790 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6793 #ifdef __NR_exit_group
6794 /* new thread calls */
6795 case TARGET_NR_exit_group
:
6799 gdb_exit(cpu_env
, arg1
);
6800 ret
= get_errno(exit_group(arg1
));
6803 case TARGET_NR_setdomainname
:
6804 if (!(p
= lock_user_string(arg1
)))
6806 ret
= get_errno(setdomainname(p
, arg2
));
6807 unlock_user(p
, arg1
, 0);
6809 case TARGET_NR_uname
:
6810 /* no need to transcode because we use the linux syscall */
6812 struct new_utsname
* buf
;
6814 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6816 ret
= get_errno(sys_uname(buf
));
6817 if (!is_error(ret
)) {
6818 /* Overrite the native machine name with whatever is being
6820 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6821 /* Allow the user to override the reported release. */
6822 if (qemu_uname_release
&& *qemu_uname_release
)
6823 strcpy (buf
->release
, qemu_uname_release
);
6825 unlock_user_struct(buf
, arg1
, 1);
6829 case TARGET_NR_modify_ldt
:
6830 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6832 #if !defined(TARGET_X86_64)
6833 case TARGET_NR_vm86old
:
6835 case TARGET_NR_vm86
:
6836 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6840 case TARGET_NR_adjtimex
:
6842 #ifdef TARGET_NR_create_module
6843 case TARGET_NR_create_module
:
6845 case TARGET_NR_init_module
:
6846 case TARGET_NR_delete_module
:
6847 #ifdef TARGET_NR_get_kernel_syms
6848 case TARGET_NR_get_kernel_syms
:
6851 case TARGET_NR_quotactl
:
6853 case TARGET_NR_getpgid
:
6854 ret
= get_errno(getpgid(arg1
));
6856 case TARGET_NR_fchdir
:
6857 ret
= get_errno(fchdir(arg1
));
6859 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6860 case TARGET_NR_bdflush
:
6863 #ifdef TARGET_NR_sysfs
6864 case TARGET_NR_sysfs
:
6867 case TARGET_NR_personality
:
6868 ret
= get_errno(personality(arg1
));
6870 #ifdef TARGET_NR_afs_syscall
6871 case TARGET_NR_afs_syscall
:
6874 #ifdef TARGET_NR__llseek /* Not on alpha */
6875 case TARGET_NR__llseek
:
6878 #if !defined(__NR_llseek)
6879 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6881 ret
= get_errno(res
);
6886 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6888 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6894 case TARGET_NR_getdents
:
6895 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6897 struct target_dirent
*target_dirp
;
6898 struct linux_dirent
*dirp
;
6899 abi_long count
= arg3
;
6901 dirp
= malloc(count
);
6903 ret
= -TARGET_ENOMEM
;
6907 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6908 if (!is_error(ret
)) {
6909 struct linux_dirent
*de
;
6910 struct target_dirent
*tde
;
6912 int reclen
, treclen
;
6913 int count1
, tnamelen
;
6917 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6921 reclen
= de
->d_reclen
;
6922 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6923 tde
->d_reclen
= tswap16(treclen
);
6924 tde
->d_ino
= tswapal(de
->d_ino
);
6925 tde
->d_off
= tswapal(de
->d_off
);
6926 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6929 /* XXX: may not be correct */
6930 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6931 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6933 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6937 unlock_user(target_dirp
, arg2
, ret
);
6943 struct linux_dirent
*dirp
;
6944 abi_long count
= arg3
;
6946 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6948 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6949 if (!is_error(ret
)) {
6950 struct linux_dirent
*de
;
6955 reclen
= de
->d_reclen
;
6958 de
->d_reclen
= tswap16(reclen
);
6959 tswapls(&de
->d_ino
);
6960 tswapls(&de
->d_off
);
6961 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6965 unlock_user(dirp
, arg2
, ret
);
6969 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6970 case TARGET_NR_getdents64
:
6972 struct linux_dirent64
*dirp
;
6973 abi_long count
= arg3
;
6974 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6976 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6977 if (!is_error(ret
)) {
6978 struct linux_dirent64
*de
;
6983 reclen
= de
->d_reclen
;
6986 de
->d_reclen
= tswap16(reclen
);
6987 tswap64s((uint64_t *)&de
->d_ino
);
6988 tswap64s((uint64_t *)&de
->d_off
);
6989 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6993 unlock_user(dirp
, arg2
, ret
);
6996 #endif /* TARGET_NR_getdents64 */
6997 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6999 case TARGET_NR_select
:
7001 case TARGET_NR__newselect
:
7003 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7006 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7007 # ifdef TARGET_NR_poll
7008 case TARGET_NR_poll
:
7010 # ifdef TARGET_NR_ppoll
7011 case TARGET_NR_ppoll
:
7014 struct target_pollfd
*target_pfd
;
7015 unsigned int nfds
= arg2
;
7020 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7024 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7025 for(i
= 0; i
< nfds
; i
++) {
7026 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7027 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7030 # ifdef TARGET_NR_ppoll
7031 if (num
== TARGET_NR_ppoll
) {
7032 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7033 target_sigset_t
*target_set
;
7034 sigset_t _set
, *set
= &_set
;
7037 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7038 unlock_user(target_pfd
, arg1
, 0);
7046 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7048 unlock_user(target_pfd
, arg1
, 0);
7051 target_to_host_sigset(set
, target_set
);
7056 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7058 if (!is_error(ret
) && arg3
) {
7059 host_to_target_timespec(arg3
, timeout_ts
);
7062 unlock_user(target_set
, arg4
, 0);
7066 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7068 if (!is_error(ret
)) {
7069 for(i
= 0; i
< nfds
; i
++) {
7070 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7073 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7077 case TARGET_NR_flock
:
7078 /* NOTE: the flock constant seems to be the same for every
7080 ret
= get_errno(flock(arg1
, arg2
));
7082 case TARGET_NR_readv
:
7087 vec
= alloca(count
* sizeof(struct iovec
));
7088 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
7090 ret
= get_errno(readv(arg1
, vec
, count
));
7091 unlock_iovec(vec
, arg2
, count
, 1);
7094 case TARGET_NR_writev
:
7099 vec
= alloca(count
* sizeof(struct iovec
));
7100 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7102 ret
= get_errno(writev(arg1
, vec
, count
));
7103 unlock_iovec(vec
, arg2
, count
, 0);
7106 case TARGET_NR_getsid
:
7107 ret
= get_errno(getsid(arg1
));
7109 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7110 case TARGET_NR_fdatasync
:
7111 ret
= get_errno(fdatasync(arg1
));
7114 case TARGET_NR__sysctl
:
7115 /* We don't implement this, but ENOTDIR is always a safe
7117 ret
= -TARGET_ENOTDIR
;
7119 case TARGET_NR_sched_getaffinity
:
7121 unsigned int mask_size
;
7122 unsigned long *mask
;
7125 * sched_getaffinity needs multiples of ulong, so need to take
7126 * care of mismatches between target ulong and host ulong sizes.
7128 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7129 ret
= -TARGET_EINVAL
;
7132 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7134 mask
= alloca(mask_size
);
7135 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7137 if (!is_error(ret
)) {
7138 if (copy_to_user(arg3
, mask
, ret
)) {
7144 case TARGET_NR_sched_setaffinity
:
7146 unsigned int mask_size
;
7147 unsigned long *mask
;
7150 * sched_setaffinity needs multiples of ulong, so need to take
7151 * care of mismatches between target ulong and host ulong sizes.
7153 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7154 ret
= -TARGET_EINVAL
;
7157 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7159 mask
= alloca(mask_size
);
7160 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7163 memcpy(mask
, p
, arg2
);
7164 unlock_user_struct(p
, arg2
, 0);
7166 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7169 case TARGET_NR_sched_setparam
:
7171 struct sched_param
*target_schp
;
7172 struct sched_param schp
;
7174 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7176 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7177 unlock_user_struct(target_schp
, arg2
, 0);
7178 ret
= get_errno(sched_setparam(arg1
, &schp
));
7181 case TARGET_NR_sched_getparam
:
7183 struct sched_param
*target_schp
;
7184 struct sched_param schp
;
7185 ret
= get_errno(sched_getparam(arg1
, &schp
));
7186 if (!is_error(ret
)) {
7187 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7189 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7190 unlock_user_struct(target_schp
, arg2
, 1);
7194 case TARGET_NR_sched_setscheduler
:
7196 struct sched_param
*target_schp
;
7197 struct sched_param schp
;
7198 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7200 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7201 unlock_user_struct(target_schp
, arg3
, 0);
7202 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7205 case TARGET_NR_sched_getscheduler
:
7206 ret
= get_errno(sched_getscheduler(arg1
));
7208 case TARGET_NR_sched_yield
:
7209 ret
= get_errno(sched_yield());
7211 case TARGET_NR_sched_get_priority_max
:
7212 ret
= get_errno(sched_get_priority_max(arg1
));
7214 case TARGET_NR_sched_get_priority_min
:
7215 ret
= get_errno(sched_get_priority_min(arg1
));
7217 case TARGET_NR_sched_rr_get_interval
:
7220 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7221 if (!is_error(ret
)) {
7222 host_to_target_timespec(arg2
, &ts
);
7226 case TARGET_NR_nanosleep
:
7228 struct timespec req
, rem
;
7229 target_to_host_timespec(&req
, arg1
);
7230 ret
= get_errno(nanosleep(&req
, &rem
));
7231 if (is_error(ret
) && arg2
) {
7232 host_to_target_timespec(arg2
, &rem
);
7236 #ifdef TARGET_NR_query_module
7237 case TARGET_NR_query_module
:
7240 #ifdef TARGET_NR_nfsservctl
7241 case TARGET_NR_nfsservctl
:
7244 case TARGET_NR_prctl
:
7246 case PR_GET_PDEATHSIG
:
7249 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7250 if (!is_error(ret
) && arg2
7251 && put_user_ual(deathsig
, arg2
)) {
7259 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7263 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7265 unlock_user(name
, arg2
, 16);
7270 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7274 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7276 unlock_user(name
, arg2
, 0);
7281 /* Most prctl options have no pointer arguments */
7282 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7286 #ifdef TARGET_NR_arch_prctl
7287 case TARGET_NR_arch_prctl
:
7288 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7289 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7295 #ifdef TARGET_NR_pread
7296 case TARGET_NR_pread
:
7297 if (regpairs_aligned(cpu_env
))
7299 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7301 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
7302 unlock_user(p
, arg2
, ret
);
7304 case TARGET_NR_pwrite
:
7305 if (regpairs_aligned(cpu_env
))
7307 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7309 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
7310 unlock_user(p
, arg2
, 0);
7313 #ifdef TARGET_NR_pread64
7314 case TARGET_NR_pread64
:
7315 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7317 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7318 unlock_user(p
, arg2
, ret
);
7320 case TARGET_NR_pwrite64
:
7321 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7323 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7324 unlock_user(p
, arg2
, 0);
7327 case TARGET_NR_getcwd
:
7328 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7330 ret
= get_errno(sys_getcwd1(p
, arg2
));
7331 unlock_user(p
, arg1
, ret
);
7333 case TARGET_NR_capget
:
7335 case TARGET_NR_capset
:
7337 case TARGET_NR_sigaltstack
:
7338 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7339 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7340 defined(TARGET_M68K) || defined(TARGET_S390X)
7341 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7346 case TARGET_NR_sendfile
:
7348 #ifdef TARGET_NR_getpmsg
7349 case TARGET_NR_getpmsg
:
7352 #ifdef TARGET_NR_putpmsg
7353 case TARGET_NR_putpmsg
:
7356 #ifdef TARGET_NR_vfork
7357 case TARGET_NR_vfork
:
7358 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7362 #ifdef TARGET_NR_ugetrlimit
7363 case TARGET_NR_ugetrlimit
:
7366 int resource
= target_to_host_resource(arg1
);
7367 ret
= get_errno(getrlimit(resource
, &rlim
));
7368 if (!is_error(ret
)) {
7369 struct target_rlimit
*target_rlim
;
7370 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7372 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7373 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7374 unlock_user_struct(target_rlim
, arg2
, 1);
7379 #ifdef TARGET_NR_truncate64
7380 case TARGET_NR_truncate64
:
7381 if (!(p
= lock_user_string(arg1
)))
7383 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7384 unlock_user(p
, arg1
, 0);
7387 #ifdef TARGET_NR_ftruncate64
7388 case TARGET_NR_ftruncate64
:
7389 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7392 #ifdef TARGET_NR_stat64
7393 case TARGET_NR_stat64
:
7394 if (!(p
= lock_user_string(arg1
)))
7396 ret
= get_errno(stat(path(p
), &st
));
7397 unlock_user(p
, arg1
, 0);
7399 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7402 #ifdef TARGET_NR_lstat64
7403 case TARGET_NR_lstat64
:
7404 if (!(p
= lock_user_string(arg1
)))
7406 ret
= get_errno(lstat(path(p
), &st
));
7407 unlock_user(p
, arg1
, 0);
7409 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7412 #ifdef TARGET_NR_fstat64
7413 case TARGET_NR_fstat64
:
7414 ret
= get_errno(fstat(arg1
, &st
));
7416 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7419 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7420 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7421 #ifdef TARGET_NR_fstatat64
7422 case TARGET_NR_fstatat64
:
7424 #ifdef TARGET_NR_newfstatat
7425 case TARGET_NR_newfstatat
:
7427 if (!(p
= lock_user_string(arg2
)))
7429 #ifdef __NR_fstatat64
7430 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7432 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7435 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7438 case TARGET_NR_lchown
:
7439 if (!(p
= lock_user_string(arg1
)))
7441 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7442 unlock_user(p
, arg1
, 0);
7444 #ifdef TARGET_NR_getuid
7445 case TARGET_NR_getuid
:
7446 ret
= get_errno(high2lowuid(getuid()));
7449 #ifdef TARGET_NR_getgid
7450 case TARGET_NR_getgid
:
7451 ret
= get_errno(high2lowgid(getgid()));
7454 #ifdef TARGET_NR_geteuid
7455 case TARGET_NR_geteuid
:
7456 ret
= get_errno(high2lowuid(geteuid()));
7459 #ifdef TARGET_NR_getegid
7460 case TARGET_NR_getegid
:
7461 ret
= get_errno(high2lowgid(getegid()));
7464 case TARGET_NR_setreuid
:
7465 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7467 case TARGET_NR_setregid
:
7468 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7470 case TARGET_NR_getgroups
:
7472 int gidsetsize
= arg1
;
7473 target_id
*target_grouplist
;
7477 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7478 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7479 if (gidsetsize
== 0)
7481 if (!is_error(ret
)) {
7482 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7483 if (!target_grouplist
)
7485 for(i
= 0;i
< ret
; i
++)
7486 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7487 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7491 case TARGET_NR_setgroups
:
7493 int gidsetsize
= arg1
;
7494 target_id
*target_grouplist
;
7498 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7499 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7500 if (!target_grouplist
) {
7501 ret
= -TARGET_EFAULT
;
7504 for(i
= 0;i
< gidsetsize
; i
++)
7505 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7506 unlock_user(target_grouplist
, arg2
, 0);
7507 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7510 case TARGET_NR_fchown
:
7511 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7513 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7514 case TARGET_NR_fchownat
:
7515 if (!(p
= lock_user_string(arg2
)))
7517 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7518 unlock_user(p
, arg2
, 0);
7521 #ifdef TARGET_NR_setresuid
7522 case TARGET_NR_setresuid
:
7523 ret
= get_errno(setresuid(low2highuid(arg1
),
7525 low2highuid(arg3
)));
7528 #ifdef TARGET_NR_getresuid
7529 case TARGET_NR_getresuid
:
7531 uid_t ruid
, euid
, suid
;
7532 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7533 if (!is_error(ret
)) {
7534 if (put_user_u16(high2lowuid(ruid
), arg1
)
7535 || put_user_u16(high2lowuid(euid
), arg2
)
7536 || put_user_u16(high2lowuid(suid
), arg3
))
7542 #ifdef TARGET_NR_getresgid
7543 case TARGET_NR_setresgid
:
7544 ret
= get_errno(setresgid(low2highgid(arg1
),
7546 low2highgid(arg3
)));
7549 #ifdef TARGET_NR_getresgid
7550 case TARGET_NR_getresgid
:
7552 gid_t rgid
, egid
, sgid
;
7553 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7554 if (!is_error(ret
)) {
7555 if (put_user_u16(high2lowgid(rgid
), arg1
)
7556 || put_user_u16(high2lowgid(egid
), arg2
)
7557 || put_user_u16(high2lowgid(sgid
), arg3
))
7563 case TARGET_NR_chown
:
7564 if (!(p
= lock_user_string(arg1
)))
7566 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7567 unlock_user(p
, arg1
, 0);
7569 case TARGET_NR_setuid
:
7570 ret
= get_errno(setuid(low2highuid(arg1
)));
7572 case TARGET_NR_setgid
:
7573 ret
= get_errno(setgid(low2highgid(arg1
)));
7575 case TARGET_NR_setfsuid
:
7576 ret
= get_errno(setfsuid(arg1
));
7578 case TARGET_NR_setfsgid
:
7579 ret
= get_errno(setfsgid(arg1
));
7582 #ifdef TARGET_NR_lchown32
7583 case TARGET_NR_lchown32
:
7584 if (!(p
= lock_user_string(arg1
)))
7586 ret
= get_errno(lchown(p
, arg2
, arg3
));
7587 unlock_user(p
, arg1
, 0);
7590 #ifdef TARGET_NR_getuid32
7591 case TARGET_NR_getuid32
:
7592 ret
= get_errno(getuid());
7596 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7597 /* Alpha specific */
7598 case TARGET_NR_getxuid
:
7602 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7604 ret
= get_errno(getuid());
7607 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7608 /* Alpha specific */
7609 case TARGET_NR_getxgid
:
7613 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7615 ret
= get_errno(getgid());
7618 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7619 /* Alpha specific */
7620 case TARGET_NR_osf_getsysinfo
:
7621 ret
= -TARGET_EOPNOTSUPP
;
7623 case TARGET_GSI_IEEE_FP_CONTROL
:
7625 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7627 /* Copied from linux ieee_fpcr_to_swcr. */
7628 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7629 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7630 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7631 | SWCR_TRAP_ENABLE_DZE
7632 | SWCR_TRAP_ENABLE_OVF
);
7633 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7634 | SWCR_TRAP_ENABLE_INE
);
7635 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7636 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7638 if (put_user_u64 (swcr
, arg2
))
7644 /* case GSI_IEEE_STATE_AT_SIGNAL:
7645 -- Not implemented in linux kernel.
7647 -- Retrieves current unaligned access state; not much used.
7649 -- Retrieves implver information; surely not used.
7651 -- Grabs a copy of the HWRPB; surely not used.
7656 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7657 /* Alpha specific */
7658 case TARGET_NR_osf_setsysinfo
:
7659 ret
= -TARGET_EOPNOTSUPP
;
7661 case TARGET_SSI_IEEE_FP_CONTROL
:
7662 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7664 uint64_t swcr
, fpcr
, orig_fpcr
;
7666 if (get_user_u64 (swcr
, arg2
))
7668 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7669 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7671 /* Copied from linux ieee_swcr_to_fpcr. */
7672 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7673 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7674 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7675 | SWCR_TRAP_ENABLE_DZE
7676 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7677 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7678 | SWCR_TRAP_ENABLE_INE
)) << 57;
7679 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7680 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7682 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
7685 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
7686 /* Old exceptions are not signaled. */
7687 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7689 /* If any exceptions set by this call, and are unmasked,
7696 /* case SSI_NVPAIRS:
7697 -- Used with SSIN_UACPROC to enable unaligned accesses.
7698 case SSI_IEEE_STATE_AT_SIGNAL:
7699 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7700 -- Not implemented in linux kernel
7705 #ifdef TARGET_NR_osf_sigprocmask
7706 /* Alpha specific. */
7707 case TARGET_NR_osf_sigprocmask
:
7711 sigset_t set
, oldset
;
7714 case TARGET_SIG_BLOCK
:
7717 case TARGET_SIG_UNBLOCK
:
7720 case TARGET_SIG_SETMASK
:
7724 ret
= -TARGET_EINVAL
;
7728 target_to_host_old_sigset(&set
, &mask
);
7729 sigprocmask(how
, &set
, &oldset
);
7730 host_to_target_old_sigset(&mask
, &oldset
);
7736 #ifdef TARGET_NR_getgid32
7737 case TARGET_NR_getgid32
:
7738 ret
= get_errno(getgid());
7741 #ifdef TARGET_NR_geteuid32
7742 case TARGET_NR_geteuid32
:
7743 ret
= get_errno(geteuid());
7746 #ifdef TARGET_NR_getegid32
7747 case TARGET_NR_getegid32
:
7748 ret
= get_errno(getegid());
7751 #ifdef TARGET_NR_setreuid32
7752 case TARGET_NR_setreuid32
:
7753 ret
= get_errno(setreuid(arg1
, arg2
));
7756 #ifdef TARGET_NR_setregid32
7757 case TARGET_NR_setregid32
:
7758 ret
= get_errno(setregid(arg1
, arg2
));
7761 #ifdef TARGET_NR_getgroups32
7762 case TARGET_NR_getgroups32
:
7764 int gidsetsize
= arg1
;
7765 uint32_t *target_grouplist
;
7769 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7770 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7771 if (gidsetsize
== 0)
7773 if (!is_error(ret
)) {
7774 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7775 if (!target_grouplist
) {
7776 ret
= -TARGET_EFAULT
;
7779 for(i
= 0;i
< ret
; i
++)
7780 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7781 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7786 #ifdef TARGET_NR_setgroups32
7787 case TARGET_NR_setgroups32
:
7789 int gidsetsize
= arg1
;
7790 uint32_t *target_grouplist
;
7794 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7795 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7796 if (!target_grouplist
) {
7797 ret
= -TARGET_EFAULT
;
7800 for(i
= 0;i
< gidsetsize
; i
++)
7801 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7802 unlock_user(target_grouplist
, arg2
, 0);
7803 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7807 #ifdef TARGET_NR_fchown32
7808 case TARGET_NR_fchown32
:
7809 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7812 #ifdef TARGET_NR_setresuid32
7813 case TARGET_NR_setresuid32
:
7814 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7817 #ifdef TARGET_NR_getresuid32
7818 case TARGET_NR_getresuid32
:
7820 uid_t ruid
, euid
, suid
;
7821 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7822 if (!is_error(ret
)) {
7823 if (put_user_u32(ruid
, arg1
)
7824 || put_user_u32(euid
, arg2
)
7825 || put_user_u32(suid
, arg3
))
7831 #ifdef TARGET_NR_setresgid32
7832 case TARGET_NR_setresgid32
:
7833 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7836 #ifdef TARGET_NR_getresgid32
7837 case TARGET_NR_getresgid32
:
7839 gid_t rgid
, egid
, sgid
;
7840 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7841 if (!is_error(ret
)) {
7842 if (put_user_u32(rgid
, arg1
)
7843 || put_user_u32(egid
, arg2
)
7844 || put_user_u32(sgid
, arg3
))
7850 #ifdef TARGET_NR_chown32
7851 case TARGET_NR_chown32
:
7852 if (!(p
= lock_user_string(arg1
)))
7854 ret
= get_errno(chown(p
, arg2
, arg3
));
7855 unlock_user(p
, arg1
, 0);
7858 #ifdef TARGET_NR_setuid32
7859 case TARGET_NR_setuid32
:
7860 ret
= get_errno(setuid(arg1
));
7863 #ifdef TARGET_NR_setgid32
7864 case TARGET_NR_setgid32
:
7865 ret
= get_errno(setgid(arg1
));
7868 #ifdef TARGET_NR_setfsuid32
7869 case TARGET_NR_setfsuid32
:
7870 ret
= get_errno(setfsuid(arg1
));
7873 #ifdef TARGET_NR_setfsgid32
7874 case TARGET_NR_setfsgid32
:
7875 ret
= get_errno(setfsgid(arg1
));
7879 case TARGET_NR_pivot_root
:
7881 #ifdef TARGET_NR_mincore
7882 case TARGET_NR_mincore
:
7885 ret
= -TARGET_EFAULT
;
7886 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7888 if (!(p
= lock_user_string(arg3
)))
7890 ret
= get_errno(mincore(a
, arg2
, p
));
7891 unlock_user(p
, arg3
, ret
);
7893 unlock_user(a
, arg1
, 0);
7897 #ifdef TARGET_NR_arm_fadvise64_64
7898 case TARGET_NR_arm_fadvise64_64
:
7901 * arm_fadvise64_64 looks like fadvise64_64 but
7902 * with different argument order
7910 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7911 #ifdef TARGET_NR_fadvise64_64
7912 case TARGET_NR_fadvise64_64
:
7914 #ifdef TARGET_NR_fadvise64
7915 case TARGET_NR_fadvise64
:
7919 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7920 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7921 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7922 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7926 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7929 #ifdef TARGET_NR_madvise
7930 case TARGET_NR_madvise
:
7931 /* A straight passthrough may not be safe because qemu sometimes
7932 turns private flie-backed mappings into anonymous mappings.
7933 This will break MADV_DONTNEED.
7934 This is a hint, so ignoring and returning success is ok. */
7938 #if TARGET_ABI_BITS == 32
7939 case TARGET_NR_fcntl64
:
7943 struct target_flock64
*target_fl
;
7945 struct target_eabi_flock64
*target_efl
;
7948 cmd
= target_to_host_fcntl_cmd(arg2
);
7949 if (cmd
== -TARGET_EINVAL
) {
7955 case TARGET_F_GETLK64
:
7957 if (((CPUARMState
*)cpu_env
)->eabi
) {
7958 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7960 fl
.l_type
= tswap16(target_efl
->l_type
);
7961 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7962 fl
.l_start
= tswap64(target_efl
->l_start
);
7963 fl
.l_len
= tswap64(target_efl
->l_len
);
7964 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7965 unlock_user_struct(target_efl
, arg3
, 0);
7969 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7971 fl
.l_type
= tswap16(target_fl
->l_type
);
7972 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7973 fl
.l_start
= tswap64(target_fl
->l_start
);
7974 fl
.l_len
= tswap64(target_fl
->l_len
);
7975 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7976 unlock_user_struct(target_fl
, arg3
, 0);
7978 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7981 if (((CPUARMState
*)cpu_env
)->eabi
) {
7982 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7984 target_efl
->l_type
= tswap16(fl
.l_type
);
7985 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7986 target_efl
->l_start
= tswap64(fl
.l_start
);
7987 target_efl
->l_len
= tswap64(fl
.l_len
);
7988 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7989 unlock_user_struct(target_efl
, arg3
, 1);
7993 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7995 target_fl
->l_type
= tswap16(fl
.l_type
);
7996 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7997 target_fl
->l_start
= tswap64(fl
.l_start
);
7998 target_fl
->l_len
= tswap64(fl
.l_len
);
7999 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8000 unlock_user_struct(target_fl
, arg3
, 1);
8005 case TARGET_F_SETLK64
:
8006 case TARGET_F_SETLKW64
:
8008 if (((CPUARMState
*)cpu_env
)->eabi
) {
8009 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8011 fl
.l_type
= tswap16(target_efl
->l_type
);
8012 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8013 fl
.l_start
= tswap64(target_efl
->l_start
);
8014 fl
.l_len
= tswap64(target_efl
->l_len
);
8015 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8016 unlock_user_struct(target_efl
, arg3
, 0);
8020 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8022 fl
.l_type
= tswap16(target_fl
->l_type
);
8023 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8024 fl
.l_start
= tswap64(target_fl
->l_start
);
8025 fl
.l_len
= tswap64(target_fl
->l_len
);
8026 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8027 unlock_user_struct(target_fl
, arg3
, 0);
8029 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8032 ret
= do_fcntl(arg1
, arg2
, arg3
);
8038 #ifdef TARGET_NR_cacheflush
8039 case TARGET_NR_cacheflush
:
8040 /* self-modifying code is handled automatically, so nothing needed */
8044 #ifdef TARGET_NR_security
8045 case TARGET_NR_security
:
8048 #ifdef TARGET_NR_getpagesize
8049 case TARGET_NR_getpagesize
:
8050 ret
= TARGET_PAGE_SIZE
;
8053 case TARGET_NR_gettid
:
8054 ret
= get_errno(gettid());
8056 #ifdef TARGET_NR_readahead
8057 case TARGET_NR_readahead
:
8058 #if TARGET_ABI_BITS == 32
8059 if (regpairs_aligned(cpu_env
)) {
8064 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8066 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8071 #ifdef TARGET_NR_setxattr
8072 case TARGET_NR_listxattr
:
8073 case TARGET_NR_llistxattr
:
8077 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8079 ret
= -TARGET_EFAULT
;
8083 p
= lock_user_string(arg1
);
8085 if (num
== TARGET_NR_listxattr
) {
8086 ret
= get_errno(listxattr(p
, b
, arg3
));
8088 ret
= get_errno(llistxattr(p
, b
, arg3
));
8091 ret
= -TARGET_EFAULT
;
8093 unlock_user(p
, arg1
, 0);
8094 unlock_user(b
, arg2
, arg3
);
8097 case TARGET_NR_flistxattr
:
8101 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8103 ret
= -TARGET_EFAULT
;
8107 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8108 unlock_user(b
, arg2
, arg3
);
8111 case TARGET_NR_setxattr
:
8112 case TARGET_NR_lsetxattr
:
8114 void *p
, *n
, *v
= 0;
8116 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8118 ret
= -TARGET_EFAULT
;
8122 p
= lock_user_string(arg1
);
8123 n
= lock_user_string(arg2
);
8125 if (num
== TARGET_NR_setxattr
) {
8126 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8128 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8131 ret
= -TARGET_EFAULT
;
8133 unlock_user(p
, arg1
, 0);
8134 unlock_user(n
, arg2
, 0);
8135 unlock_user(v
, arg3
, 0);
8138 case TARGET_NR_fsetxattr
:
8142 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8144 ret
= -TARGET_EFAULT
;
8148 n
= lock_user_string(arg2
);
8150 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8152 ret
= -TARGET_EFAULT
;
8154 unlock_user(n
, arg2
, 0);
8155 unlock_user(v
, arg3
, 0);
8158 case TARGET_NR_getxattr
:
8159 case TARGET_NR_lgetxattr
:
8161 void *p
, *n
, *v
= 0;
8163 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8165 ret
= -TARGET_EFAULT
;
8169 p
= lock_user_string(arg1
);
8170 n
= lock_user_string(arg2
);
8172 if (num
== TARGET_NR_getxattr
) {
8173 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8175 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8178 ret
= -TARGET_EFAULT
;
8180 unlock_user(p
, arg1
, 0);
8181 unlock_user(n
, arg2
, 0);
8182 unlock_user(v
, arg3
, arg4
);
8185 case TARGET_NR_fgetxattr
:
8189 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8191 ret
= -TARGET_EFAULT
;
8195 n
= lock_user_string(arg2
);
8197 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8199 ret
= -TARGET_EFAULT
;
8201 unlock_user(n
, arg2
, 0);
8202 unlock_user(v
, arg3
, arg4
);
8205 case TARGET_NR_removexattr
:
8206 case TARGET_NR_lremovexattr
:
8209 p
= lock_user_string(arg1
);
8210 n
= lock_user_string(arg2
);
8212 if (num
== TARGET_NR_removexattr
) {
8213 ret
= get_errno(removexattr(p
, n
));
8215 ret
= get_errno(lremovexattr(p
, n
));
8218 ret
= -TARGET_EFAULT
;
8220 unlock_user(p
, arg1
, 0);
8221 unlock_user(n
, arg2
, 0);
8224 case TARGET_NR_fremovexattr
:
8227 n
= lock_user_string(arg2
);
8229 ret
= get_errno(fremovexattr(arg1
, n
));
8231 ret
= -TARGET_EFAULT
;
8233 unlock_user(n
, arg2
, 0);
8237 #endif /* CONFIG_ATTR */
8238 #ifdef TARGET_NR_set_thread_area
8239 case TARGET_NR_set_thread_area
:
8240 #if defined(TARGET_MIPS)
8241 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8244 #elif defined(TARGET_CRIS)
8246 ret
= -TARGET_EINVAL
;
8248 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8252 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8253 ret
= do_set_thread_area(cpu_env
, arg1
);
8256 goto unimplemented_nowarn
;
8259 #ifdef TARGET_NR_get_thread_area
8260 case TARGET_NR_get_thread_area
:
8261 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8262 ret
= do_get_thread_area(cpu_env
, arg1
);
8264 goto unimplemented_nowarn
;
8267 #ifdef TARGET_NR_getdomainname
8268 case TARGET_NR_getdomainname
:
8269 goto unimplemented_nowarn
;
8272 #ifdef TARGET_NR_clock_gettime
8273 case TARGET_NR_clock_gettime
:
8276 ret
= get_errno(clock_gettime(arg1
, &ts
));
8277 if (!is_error(ret
)) {
8278 host_to_target_timespec(arg2
, &ts
);
8283 #ifdef TARGET_NR_clock_getres
8284 case TARGET_NR_clock_getres
:
8287 ret
= get_errno(clock_getres(arg1
, &ts
));
8288 if (!is_error(ret
)) {
8289 host_to_target_timespec(arg2
, &ts
);
8294 #ifdef TARGET_NR_clock_nanosleep
8295 case TARGET_NR_clock_nanosleep
:
8298 target_to_host_timespec(&ts
, arg3
);
8299 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8301 host_to_target_timespec(arg4
, &ts
);
8306 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8307 case TARGET_NR_set_tid_address
:
8308 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8312 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8313 case TARGET_NR_tkill
:
8314 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8318 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8319 case TARGET_NR_tgkill
:
8320 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8321 target_to_host_signal(arg3
)));
8325 #ifdef TARGET_NR_set_robust_list
8326 case TARGET_NR_set_robust_list
:
8327 goto unimplemented_nowarn
;
8330 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8331 case TARGET_NR_utimensat
:
8333 struct timespec
*tsp
, ts
[2];
8337 target_to_host_timespec(ts
, arg3
);
8338 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8342 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8344 if (!(p
= lock_user_string(arg2
))) {
8345 ret
= -TARGET_EFAULT
;
8348 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8349 unlock_user(p
, arg2
, 0);
8354 #if defined(CONFIG_USE_NPTL)
8355 case TARGET_NR_futex
:
8356 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8359 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8360 case TARGET_NR_inotify_init
:
8361 ret
= get_errno(sys_inotify_init());
8364 #ifdef CONFIG_INOTIFY1
8365 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8366 case TARGET_NR_inotify_init1
:
8367 ret
= get_errno(sys_inotify_init1(arg1
));
8371 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8372 case TARGET_NR_inotify_add_watch
:
8373 p
= lock_user_string(arg2
);
8374 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8375 unlock_user(p
, arg2
, 0);
8378 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8379 case TARGET_NR_inotify_rm_watch
:
8380 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8384 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8385 case TARGET_NR_mq_open
:
8387 struct mq_attr posix_mq_attr
;
8389 p
= lock_user_string(arg1
- 1);
8391 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8392 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8393 unlock_user (p
, arg1
, 0);
8397 case TARGET_NR_mq_unlink
:
8398 p
= lock_user_string(arg1
- 1);
8399 ret
= get_errno(mq_unlink(p
));
8400 unlock_user (p
, arg1
, 0);
8403 case TARGET_NR_mq_timedsend
:
8407 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8409 target_to_host_timespec(&ts
, arg5
);
8410 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8411 host_to_target_timespec(arg5
, &ts
);
8414 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8415 unlock_user (p
, arg2
, arg3
);
8419 case TARGET_NR_mq_timedreceive
:
8424 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8426 target_to_host_timespec(&ts
, arg5
);
8427 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8428 host_to_target_timespec(arg5
, &ts
);
8431 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8432 unlock_user (p
, arg2
, arg3
);
8434 put_user_u32(prio
, arg4
);
8438 /* Not implemented for now... */
8439 /* case TARGET_NR_mq_notify: */
8442 case TARGET_NR_mq_getsetattr
:
8444 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8447 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8448 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8451 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8452 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8459 #ifdef CONFIG_SPLICE
8460 #ifdef TARGET_NR_tee
8463 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8467 #ifdef TARGET_NR_splice
8468 case TARGET_NR_splice
:
8470 loff_t loff_in
, loff_out
;
8471 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8473 get_user_u64(loff_in
, arg2
);
8474 ploff_in
= &loff_in
;
8477 get_user_u64(loff_out
, arg2
);
8478 ploff_out
= &loff_out
;
8480 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8484 #ifdef TARGET_NR_vmsplice
8485 case TARGET_NR_vmsplice
:
8490 vec
= alloca(count
* sizeof(struct iovec
));
8491 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
8493 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
8494 unlock_iovec(vec
, arg2
, count
, 0);
8498 #endif /* CONFIG_SPLICE */
8499 #ifdef CONFIG_EVENTFD
8500 #if defined(TARGET_NR_eventfd)
8501 case TARGET_NR_eventfd
:
8502 ret
= get_errno(eventfd(arg1
, 0));
8505 #if defined(TARGET_NR_eventfd2)
8506 case TARGET_NR_eventfd2
:
8507 ret
= get_errno(eventfd(arg1
, arg2
));
8510 #endif /* CONFIG_EVENTFD */
8511 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8512 case TARGET_NR_fallocate
:
8513 #if TARGET_ABI_BITS == 32
8514 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8515 target_offset64(arg5
, arg6
)));
8517 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8521 #if defined(CONFIG_SYNC_FILE_RANGE)
8522 #if defined(TARGET_NR_sync_file_range)
8523 case TARGET_NR_sync_file_range
:
8524 #if TARGET_ABI_BITS == 32
8525 #if defined(TARGET_MIPS)
8526 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8527 target_offset64(arg5
, arg6
), arg7
));
8529 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8530 target_offset64(arg4
, arg5
), arg6
));
8531 #endif /* !TARGET_MIPS */
8533 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8537 #if defined(TARGET_NR_sync_file_range2)
8538 case TARGET_NR_sync_file_range2
:
8539 /* This is like sync_file_range but the arguments are reordered */
8540 #if TARGET_ABI_BITS == 32
8541 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8542 target_offset64(arg5
, arg6
), arg2
));
8544 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8549 #if defined(CONFIG_EPOLL)
8550 #if defined(TARGET_NR_epoll_create)
8551 case TARGET_NR_epoll_create
:
8552 ret
= get_errno(epoll_create(arg1
));
8555 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8556 case TARGET_NR_epoll_create1
:
8557 ret
= get_errno(epoll_create1(arg1
));
8560 #if defined(TARGET_NR_epoll_ctl)
8561 case TARGET_NR_epoll_ctl
:
8563 struct epoll_event ep
;
8564 struct epoll_event
*epp
= 0;
8566 struct target_epoll_event
*target_ep
;
8567 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8570 ep
.events
= tswap32(target_ep
->events
);
8571 /* The epoll_data_t union is just opaque data to the kernel,
8572 * so we transfer all 64 bits across and need not worry what
8573 * actual data type it is.
8575 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8576 unlock_user_struct(target_ep
, arg4
, 0);
8579 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8584 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8585 #define IMPLEMENT_EPOLL_PWAIT
8587 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8588 #if defined(TARGET_NR_epoll_wait)
8589 case TARGET_NR_epoll_wait
:
8591 #if defined(IMPLEMENT_EPOLL_PWAIT)
8592 case TARGET_NR_epoll_pwait
:
8595 struct target_epoll_event
*target_ep
;
8596 struct epoll_event
*ep
;
8598 int maxevents
= arg3
;
8601 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8602 maxevents
* sizeof(struct target_epoll_event
), 1);
8607 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8610 #if defined(IMPLEMENT_EPOLL_PWAIT)
8611 case TARGET_NR_epoll_pwait
:
8613 target_sigset_t
*target_set
;
8614 sigset_t _set
, *set
= &_set
;
8617 target_set
= lock_user(VERIFY_READ
, arg5
,
8618 sizeof(target_sigset_t
), 1);
8620 unlock_user(target_ep
, arg2
, 0);
8623 target_to_host_sigset(set
, target_set
);
8624 unlock_user(target_set
, arg5
, 0);
8629 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8633 #if defined(TARGET_NR_epoll_wait)
8634 case TARGET_NR_epoll_wait
:
8635 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8639 ret
= -TARGET_ENOSYS
;
8641 if (!is_error(ret
)) {
8643 for (i
= 0; i
< ret
; i
++) {
8644 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8645 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8648 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8653 #ifdef TARGET_NR_prlimit64
8654 case TARGET_NR_prlimit64
:
8656 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8657 struct target_rlimit64
*target_rnew
, *target_rold
;
8658 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8660 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8663 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8664 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8665 unlock_user_struct(target_rnew
, arg3
, 0);
8669 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8670 if (!is_error(ret
) && arg4
) {
8671 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8674 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8675 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8676 unlock_user_struct(target_rold
, arg4
, 1);
8683 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8684 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8685 unimplemented_nowarn
:
8687 ret
= -TARGET_ENOSYS
;
8692 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8695 print_syscall_ret(num
, ret
);
8698 ret
= -TARGET_EFAULT
;