4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
49 int __clone2(int (*fn
)(void *), void *child_stack_base
,
50 size_t stack_size
, int flags
, void *arg
, ...);
52 #include <sys/socket.h>
56 #include <sys/times.h>
59 #include <sys/statfs.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/utsname.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
96 #include <linux/mtio.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include <linux/reboot.h>
105 #include "linux_loop.h"
106 #include "cpu-uname.h"
110 #if defined(CONFIG_USE_NPTL)
111 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
112 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
114 /* XXX: Hardcode the above values. */
115 #define CLONE_NPTL_FLAGS2 0
120 //#include <linux/msdos_fs.h>
121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
133 #define _syscall0(type,name) \
134 static type name (void) \
136 return syscall(__NR_##name); \
139 #define _syscall1(type,name,type1,arg1) \
140 static type name (type1 arg1) \
142 return syscall(__NR_##name, arg1); \
145 #define _syscall2(type,name,type1,arg1,type2,arg2) \
146 static type name (type1 arg1,type2 arg2) \
148 return syscall(__NR_##name, arg1, arg2); \
151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
152 static type name (type1 arg1,type2 arg2,type3 arg3) \
154 return syscall(__NR_##name, arg1, arg2, arg3); \
157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 type5,arg5,type6,arg6) \
173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
180 #define __NR_sys_uname __NR_uname
181 #define __NR_sys_faccessat __NR_faccessat
182 #define __NR_sys_fchmodat __NR_fchmodat
183 #define __NR_sys_fchownat __NR_fchownat
184 #define __NR_sys_fstatat64 __NR_fstatat64
185 #define __NR_sys_futimesat __NR_futimesat
186 #define __NR_sys_getcwd1 __NR_getcwd
187 #define __NR_sys_getdents __NR_getdents
188 #define __NR_sys_getdents64 __NR_getdents64
189 #define __NR_sys_getpriority __NR_getpriority
190 #define __NR_sys_linkat __NR_linkat
191 #define __NR_sys_mkdirat __NR_mkdirat
192 #define __NR_sys_mknodat __NR_mknodat
193 #define __NR_sys_newfstatat __NR_newfstatat
194 #define __NR_sys_openat __NR_openat
195 #define __NR_sys_readlinkat __NR_readlinkat
196 #define __NR_sys_renameat __NR_renameat
197 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
198 #define __NR_sys_symlinkat __NR_symlinkat
199 #define __NR_sys_syslog __NR_syslog
200 #define __NR_sys_tgkill __NR_tgkill
201 #define __NR_sys_tkill __NR_tkill
202 #define __NR_sys_unlinkat __NR_unlinkat
203 #define __NR_sys_utimensat __NR_utimensat
204 #define __NR_sys_futex __NR_futex
205 #define __NR_sys_inotify_init __NR_inotify_init
206 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
207 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
209 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
211 #define __NR__llseek __NR_lseek
215 _syscall0(int, gettid
)
217 /* This is a replacement for the host gettid() and must return a host
219 static int gettid(void) {
223 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
224 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
225 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
227 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
228 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
229 loff_t
*, res
, uint
, wh
);
231 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
232 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
233 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
234 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
236 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
237 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
239 #ifdef __NR_exit_group
240 _syscall1(int,exit_group
,int,error_code
)
242 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
243 _syscall1(int,set_tid_address
,int *,tidptr
)
245 #if defined(CONFIG_USE_NPTL)
246 #if defined(TARGET_NR_futex) && defined(__NR_futex)
247 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
248 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
251 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
252 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
253 unsigned long *, user_mask_ptr
);
254 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
255 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
256 unsigned long *, user_mask_ptr
);
257 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
260 static bitmask_transtbl fcntl_flags_tbl
[] = {
261 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
262 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
263 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
264 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
265 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
266 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
267 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
268 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
269 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
270 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
271 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
272 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
273 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
274 #if defined(O_DIRECT)
275 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
277 #if defined(O_NOATIME)
278 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
280 #if defined(O_CLOEXEC)
281 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
284 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
286 /* Don't terminate the list prematurely on 64-bit host+guest. */
287 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
288 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
293 #define COPY_UTSNAME_FIELD(dest, src) \
295 /* __NEW_UTS_LEN doesn't include terminating null */ \
296 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
297 (dest)[__NEW_UTS_LEN] = '\0'; \
300 static int sys_uname(struct new_utsname
*buf
)
302 struct utsname uts_buf
;
304 if (uname(&uts_buf
) < 0)
308 * Just in case these have some differences, we
309 * translate utsname to new_utsname (which is the
310 * struct linux kernel uses).
313 memset(buf
, 0, sizeof(*buf
));
314 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
315 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
316 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
317 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
318 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
320 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
324 #undef COPY_UTSNAME_FIELD
327 static int sys_getcwd1(char *buf
, size_t size
)
329 if (getcwd(buf
, size
) == NULL
) {
330 /* getcwd() sets errno */
333 return strlen(buf
)+1;
338 * Host system seems to have atfile syscall stubs available. We
339 * now enable them one by one as specified by target syscall_nr.h.
342 #ifdef TARGET_NR_faccessat
343 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
345 return (faccessat(dirfd
, pathname
, mode
, 0));
348 #ifdef TARGET_NR_fchmodat
349 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
351 return (fchmodat(dirfd
, pathname
, mode
, 0));
354 #if defined(TARGET_NR_fchownat)
355 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
356 gid_t group
, int flags
)
358 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
361 #ifdef __NR_fstatat64
362 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
365 return (fstatat(dirfd
, pathname
, buf
, flags
));
368 #ifdef __NR_newfstatat
369 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
372 return (fstatat(dirfd
, pathname
, buf
, flags
));
375 #ifdef TARGET_NR_futimesat
376 static int sys_futimesat(int dirfd
, const char *pathname
,
377 const struct timeval times
[2])
379 return (futimesat(dirfd
, pathname
, times
));
382 #ifdef TARGET_NR_linkat
383 static int sys_linkat(int olddirfd
, const char *oldpath
,
384 int newdirfd
, const char *newpath
, int flags
)
386 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
389 #ifdef TARGET_NR_mkdirat
390 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
392 return (mkdirat(dirfd
, pathname
, mode
));
395 #ifdef TARGET_NR_mknodat
396 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
399 return (mknodat(dirfd
, pathname
, mode
, dev
));
402 #ifdef TARGET_NR_openat
403 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
406 * open(2) has extra parameter 'mode' when called with
409 if ((flags
& O_CREAT
) != 0) {
410 return (openat(dirfd
, pathname
, flags
, mode
));
412 return (openat(dirfd
, pathname
, flags
));
415 #ifdef TARGET_NR_readlinkat
416 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
418 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
421 #ifdef TARGET_NR_renameat
422 static int sys_renameat(int olddirfd
, const char *oldpath
,
423 int newdirfd
, const char *newpath
)
425 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
428 #ifdef TARGET_NR_symlinkat
429 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
431 return (symlinkat(oldpath
, newdirfd
, newpath
));
434 #ifdef TARGET_NR_unlinkat
435 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
437 return (unlinkat(dirfd
, pathname
, flags
));
440 #else /* !CONFIG_ATFILE */
443 * Try direct syscalls instead
445 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
446 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
448 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
449 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
451 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
452 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
453 uid_t
,owner
,gid_t
,group
,int,flags
)
455 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
456 defined(__NR_fstatat64)
457 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
458 struct stat
*,buf
,int,flags
)
460 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
461 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
462 const struct timeval
*,times
)
464 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
465 defined(__NR_newfstatat)
466 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
467 struct stat
*,buf
,int,flags
)
469 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
470 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
471 int,newdirfd
,const char *,newpath
,int,flags
)
473 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
474 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
476 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
477 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
478 mode_t
,mode
,dev_t
,dev
)
480 #if defined(TARGET_NR_openat) && defined(__NR_openat)
481 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
483 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
484 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
485 char *,buf
,size_t,bufsize
)
487 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
488 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
489 int,newdirfd
,const char *,newpath
)
491 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
492 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
493 int,newdirfd
,const char *,newpath
)
495 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
496 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
499 #endif /* CONFIG_ATFILE */
501 #ifdef CONFIG_UTIMENSAT
502 static int sys_utimensat(int dirfd
, const char *pathname
,
503 const struct timespec times
[2], int flags
)
505 if (pathname
== NULL
)
506 return futimens(dirfd
, times
);
508 return utimensat(dirfd
, pathname
, times
, flags
);
511 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
512 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
513 const struct timespec
*,tsp
,int,flags
)
515 #endif /* CONFIG_UTIMENSAT */
517 #ifdef CONFIG_INOTIFY
518 #include <sys/inotify.h>
520 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
521 static int sys_inotify_init(void)
523 return (inotify_init());
526 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
527 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
529 return (inotify_add_watch(fd
, pathname
, mask
));
532 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
533 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
535 return (inotify_rm_watch(fd
, wd
));
538 #ifdef CONFIG_INOTIFY1
539 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
540 static int sys_inotify_init1(int flags
)
542 return (inotify_init1(flags
));
547 /* Userspace can usually survive runtime without inotify */
548 #undef TARGET_NR_inotify_init
549 #undef TARGET_NR_inotify_init1
550 #undef TARGET_NR_inotify_add_watch
551 #undef TARGET_NR_inotify_rm_watch
552 #endif /* CONFIG_INOTIFY */
554 #if defined(TARGET_NR_ppoll)
556 # define __NR_ppoll -1
558 #define __NR_sys_ppoll __NR_ppoll
559 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
560 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
564 #if defined(TARGET_NR_pselect6)
565 #ifndef __NR_pselect6
566 # define __NR_pselect6 -1
568 #define __NR_sys_pselect6 __NR_pselect6
569 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
570 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
573 #if defined(TARGET_NR_prlimit64)
574 #ifndef __NR_prlimit64
575 # define __NR_prlimit64 -1
577 #define __NR_sys_prlimit64 __NR_prlimit64
578 /* The glibc rlimit structure may not be that used by the underlying syscall */
579 struct host_rlimit64
{
583 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
584 const struct host_rlimit64
*, new_limit
,
585 struct host_rlimit64
*, old_limit
)
588 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
590 static inline int regpairs_aligned(void *cpu_env
) {
591 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
593 #elif defined(TARGET_MIPS)
594 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
595 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
596 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
597 * of registers which translates to the same as ARM/MIPS, because we start with
599 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
601 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
604 #define ERRNO_TABLE_SIZE 1200
606 /* target_to_host_errno_table[] is initialized from
607 * host_to_target_errno_table[] in syscall_init(). */
608 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
612 * This list is the union of errno values overridden in asm-<arch>/errno.h
613 * minus the errnos that are not actually generic to all archs.
615 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
616 [EIDRM
] = TARGET_EIDRM
,
617 [ECHRNG
] = TARGET_ECHRNG
,
618 [EL2NSYNC
] = TARGET_EL2NSYNC
,
619 [EL3HLT
] = TARGET_EL3HLT
,
620 [EL3RST
] = TARGET_EL3RST
,
621 [ELNRNG
] = TARGET_ELNRNG
,
622 [EUNATCH
] = TARGET_EUNATCH
,
623 [ENOCSI
] = TARGET_ENOCSI
,
624 [EL2HLT
] = TARGET_EL2HLT
,
625 [EDEADLK
] = TARGET_EDEADLK
,
626 [ENOLCK
] = TARGET_ENOLCK
,
627 [EBADE
] = TARGET_EBADE
,
628 [EBADR
] = TARGET_EBADR
,
629 [EXFULL
] = TARGET_EXFULL
,
630 [ENOANO
] = TARGET_ENOANO
,
631 [EBADRQC
] = TARGET_EBADRQC
,
632 [EBADSLT
] = TARGET_EBADSLT
,
633 [EBFONT
] = TARGET_EBFONT
,
634 [ENOSTR
] = TARGET_ENOSTR
,
635 [ENODATA
] = TARGET_ENODATA
,
636 [ETIME
] = TARGET_ETIME
,
637 [ENOSR
] = TARGET_ENOSR
,
638 [ENONET
] = TARGET_ENONET
,
639 [ENOPKG
] = TARGET_ENOPKG
,
640 [EREMOTE
] = TARGET_EREMOTE
,
641 [ENOLINK
] = TARGET_ENOLINK
,
642 [EADV
] = TARGET_EADV
,
643 [ESRMNT
] = TARGET_ESRMNT
,
644 [ECOMM
] = TARGET_ECOMM
,
645 [EPROTO
] = TARGET_EPROTO
,
646 [EDOTDOT
] = TARGET_EDOTDOT
,
647 [EMULTIHOP
] = TARGET_EMULTIHOP
,
648 [EBADMSG
] = TARGET_EBADMSG
,
649 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
650 [EOVERFLOW
] = TARGET_EOVERFLOW
,
651 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
652 [EBADFD
] = TARGET_EBADFD
,
653 [EREMCHG
] = TARGET_EREMCHG
,
654 [ELIBACC
] = TARGET_ELIBACC
,
655 [ELIBBAD
] = TARGET_ELIBBAD
,
656 [ELIBSCN
] = TARGET_ELIBSCN
,
657 [ELIBMAX
] = TARGET_ELIBMAX
,
658 [ELIBEXEC
] = TARGET_ELIBEXEC
,
659 [EILSEQ
] = TARGET_EILSEQ
,
660 [ENOSYS
] = TARGET_ENOSYS
,
661 [ELOOP
] = TARGET_ELOOP
,
662 [ERESTART
] = TARGET_ERESTART
,
663 [ESTRPIPE
] = TARGET_ESTRPIPE
,
664 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
665 [EUSERS
] = TARGET_EUSERS
,
666 [ENOTSOCK
] = TARGET_ENOTSOCK
,
667 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
668 [EMSGSIZE
] = TARGET_EMSGSIZE
,
669 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
670 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
671 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
672 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
673 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
674 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
675 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
676 [EADDRINUSE
] = TARGET_EADDRINUSE
,
677 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
678 [ENETDOWN
] = TARGET_ENETDOWN
,
679 [ENETUNREACH
] = TARGET_ENETUNREACH
,
680 [ENETRESET
] = TARGET_ENETRESET
,
681 [ECONNABORTED
] = TARGET_ECONNABORTED
,
682 [ECONNRESET
] = TARGET_ECONNRESET
,
683 [ENOBUFS
] = TARGET_ENOBUFS
,
684 [EISCONN
] = TARGET_EISCONN
,
685 [ENOTCONN
] = TARGET_ENOTCONN
,
686 [EUCLEAN
] = TARGET_EUCLEAN
,
687 [ENOTNAM
] = TARGET_ENOTNAM
,
688 [ENAVAIL
] = TARGET_ENAVAIL
,
689 [EISNAM
] = TARGET_EISNAM
,
690 [EREMOTEIO
] = TARGET_EREMOTEIO
,
691 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
692 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
693 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
694 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
695 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
696 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
697 [EALREADY
] = TARGET_EALREADY
,
698 [EINPROGRESS
] = TARGET_EINPROGRESS
,
699 [ESTALE
] = TARGET_ESTALE
,
700 [ECANCELED
] = TARGET_ECANCELED
,
701 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
702 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
704 [ENOKEY
] = TARGET_ENOKEY
,
707 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
710 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
713 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
716 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
718 #ifdef ENOTRECOVERABLE
719 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
723 static inline int host_to_target_errno(int err
)
725 if(host_to_target_errno_table
[err
])
726 return host_to_target_errno_table
[err
];
730 static inline int target_to_host_errno(int err
)
732 if (target_to_host_errno_table
[err
])
733 return target_to_host_errno_table
[err
];
737 static inline abi_long
get_errno(abi_long ret
)
740 return -host_to_target_errno(errno
);
745 static inline int is_error(abi_long ret
)
747 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
750 char *target_strerror(int err
)
752 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
755 return strerror(target_to_host_errno(err
));
758 static abi_ulong target_brk
;
759 static abi_ulong target_original_brk
;
760 static abi_ulong brk_page
;
762 void target_set_brk(abi_ulong new_brk
)
764 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
765 brk_page
= HOST_PAGE_ALIGN(target_brk
);
768 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
769 #define DEBUGF_BRK(message, args...)
771 /* do_brk() must return target values and target errnos. */
772 abi_long
do_brk(abi_ulong new_brk
)
774 abi_long mapped_addr
;
777 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
780 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
783 if (new_brk
< target_original_brk
) {
784 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
789 /* If the new brk is less than the highest page reserved to the
790 * target heap allocation, set it and we're almost done... */
791 if (new_brk
<= brk_page
) {
792 /* Heap contents are initialized to zero, as for anonymous
794 if (new_brk
> target_brk
) {
795 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
797 target_brk
= new_brk
;
798 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
802 /* We need to allocate more memory after the brk... Note that
803 * we don't use MAP_FIXED because that will map over the top of
804 * any existing mapping (like the one with the host libc or qemu
805 * itself); instead we treat "mapped but at wrong address" as
806 * a failure and unmap again.
808 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
809 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
810 PROT_READ
|PROT_WRITE
,
811 MAP_ANON
|MAP_PRIVATE
, 0, 0));
813 if (mapped_addr
== brk_page
) {
814 /* Heap contents are initialized to zero, as for anonymous
815 * mapped pages. Technically the new pages are already
816 * initialized to zero since they *are* anonymous mapped
817 * pages, however we have to take care with the contents that
818 * come from the remaining part of the previous page: it may
819 * contains garbage data due to a previous heap usage (grown
821 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
823 target_brk
= new_brk
;
824 brk_page
= HOST_PAGE_ALIGN(target_brk
);
825 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
828 } else if (mapped_addr
!= -1) {
829 /* Mapped but at wrong address, meaning there wasn't actually
830 * enough space for this brk.
832 target_munmap(mapped_addr
, new_alloc_size
);
834 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
837 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
840 #if defined(TARGET_ALPHA)
841 /* We (partially) emulate OSF/1 on Alpha, which requires we
842 return a proper errno, not an unchanged brk value. */
843 return -TARGET_ENOMEM
;
845 /* For everything else, return the previous break. */
849 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
850 abi_ulong target_fds_addr
,
854 abi_ulong b
, *target_fds
;
856 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
857 if (!(target_fds
= lock_user(VERIFY_READ
,
859 sizeof(abi_ulong
) * nw
,
861 return -TARGET_EFAULT
;
865 for (i
= 0; i
< nw
; i
++) {
866 /* grab the abi_ulong */
867 __get_user(b
, &target_fds
[i
]);
868 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
869 /* check the bit inside the abi_ulong */
876 unlock_user(target_fds
, target_fds_addr
, 0);
881 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
882 abi_ulong target_fds_addr
,
885 if (target_fds_addr
) {
886 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
887 return -TARGET_EFAULT
;
895 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
901 abi_ulong
*target_fds
;
903 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
904 if (!(target_fds
= lock_user(VERIFY_WRITE
,
906 sizeof(abi_ulong
) * nw
,
908 return -TARGET_EFAULT
;
911 for (i
= 0; i
< nw
; i
++) {
913 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
914 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
917 __put_user(v
, &target_fds
[i
]);
920 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
925 #if defined(__alpha__)
931 static inline abi_long
host_to_target_clock_t(long ticks
)
933 #if HOST_HZ == TARGET_HZ
936 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
940 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
941 const struct rusage
*rusage
)
943 struct target_rusage
*target_rusage
;
945 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
946 return -TARGET_EFAULT
;
947 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
948 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
949 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
950 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
951 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
952 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
953 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
954 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
955 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
956 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
957 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
958 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
959 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
960 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
961 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
962 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
963 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
964 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
965 unlock_user_struct(target_rusage
, target_addr
, 1);
970 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
972 abi_ulong target_rlim_swap
;
975 target_rlim_swap
= tswapal(target_rlim
);
976 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
977 return RLIM_INFINITY
;
979 result
= target_rlim_swap
;
980 if (target_rlim_swap
!= (rlim_t
)result
)
981 return RLIM_INFINITY
;
986 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
988 abi_ulong target_rlim_swap
;
991 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
992 target_rlim_swap
= TARGET_RLIM_INFINITY
;
994 target_rlim_swap
= rlim
;
995 result
= tswapal(target_rlim_swap
);
1000 static inline int target_to_host_resource(int code
)
1003 case TARGET_RLIMIT_AS
:
1005 case TARGET_RLIMIT_CORE
:
1007 case TARGET_RLIMIT_CPU
:
1009 case TARGET_RLIMIT_DATA
:
1011 case TARGET_RLIMIT_FSIZE
:
1012 return RLIMIT_FSIZE
;
1013 case TARGET_RLIMIT_LOCKS
:
1014 return RLIMIT_LOCKS
;
1015 case TARGET_RLIMIT_MEMLOCK
:
1016 return RLIMIT_MEMLOCK
;
1017 case TARGET_RLIMIT_MSGQUEUE
:
1018 return RLIMIT_MSGQUEUE
;
1019 case TARGET_RLIMIT_NICE
:
1021 case TARGET_RLIMIT_NOFILE
:
1022 return RLIMIT_NOFILE
;
1023 case TARGET_RLIMIT_NPROC
:
1024 return RLIMIT_NPROC
;
1025 case TARGET_RLIMIT_RSS
:
1027 case TARGET_RLIMIT_RTPRIO
:
1028 return RLIMIT_RTPRIO
;
1029 case TARGET_RLIMIT_SIGPENDING
:
1030 return RLIMIT_SIGPENDING
;
1031 case TARGET_RLIMIT_STACK
:
1032 return RLIMIT_STACK
;
1038 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1039 abi_ulong target_tv_addr
)
1041 struct target_timeval
*target_tv
;
1043 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1044 return -TARGET_EFAULT
;
1046 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1047 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1049 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1054 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1055 const struct timeval
*tv
)
1057 struct target_timeval
*target_tv
;
1059 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1060 return -TARGET_EFAULT
;
1062 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1063 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1065 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1070 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1073 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1074 abi_ulong target_mq_attr_addr
)
1076 struct target_mq_attr
*target_mq_attr
;
1078 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1079 target_mq_attr_addr
, 1))
1080 return -TARGET_EFAULT
;
1082 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1083 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1084 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1085 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1087 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1092 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1093 const struct mq_attr
*attr
)
1095 struct target_mq_attr
*target_mq_attr
;
1097 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1098 target_mq_attr_addr
, 0))
1099 return -TARGET_EFAULT
;
1101 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1102 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1103 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1104 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1106 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1112 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1113 /* do_select() must return target values and target errnos. */
1114 static abi_long
do_select(int n
,
1115 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1116 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1118 fd_set rfds
, wfds
, efds
;
1119 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1120 struct timeval tv
, *tv_ptr
;
1123 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1127 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1131 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1136 if (target_tv_addr
) {
1137 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1138 return -TARGET_EFAULT
;
1144 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1146 if (!is_error(ret
)) {
1147 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1148 return -TARGET_EFAULT
;
1149 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1150 return -TARGET_EFAULT
;
1151 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1152 return -TARGET_EFAULT
;
1154 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1155 return -TARGET_EFAULT
;
1162 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1165 return pipe2(host_pipe
, flags
);
1171 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1172 int flags
, int is_pipe2
)
1176 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1179 return get_errno(ret
);
1181 /* Several targets have special calling conventions for the original
1182 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1184 #if defined(TARGET_ALPHA)
1185 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1186 return host_pipe
[0];
1187 #elif defined(TARGET_MIPS)
1188 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1189 return host_pipe
[0];
1190 #elif defined(TARGET_SH4)
1191 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1192 return host_pipe
[0];
1196 if (put_user_s32(host_pipe
[0], pipedes
)
1197 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1198 return -TARGET_EFAULT
;
1199 return get_errno(ret
);
1202 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1203 abi_ulong target_addr
,
1206 struct target_ip_mreqn
*target_smreqn
;
1208 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1210 return -TARGET_EFAULT
;
1211 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1212 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1213 if (len
== sizeof(struct target_ip_mreqn
))
1214 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1215 unlock_user(target_smreqn
, target_addr
, 0);
1220 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1221 abi_ulong target_addr
,
1224 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1225 sa_family_t sa_family
;
1226 struct target_sockaddr
*target_saddr
;
1228 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1230 return -TARGET_EFAULT
;
1232 sa_family
= tswap16(target_saddr
->sa_family
);
1234 /* Oops. The caller might send a incomplete sun_path; sun_path
1235 * must be terminated by \0 (see the manual page), but
1236 * unfortunately it is quite common to specify sockaddr_un
1237 * length as "strlen(x->sun_path)" while it should be
1238 * "strlen(...) + 1". We'll fix that here if needed.
1239 * Linux kernel has a similar feature.
1242 if (sa_family
== AF_UNIX
) {
1243 if (len
< unix_maxlen
&& len
> 0) {
1244 char *cp
= (char*)target_saddr
;
1246 if ( cp
[len
-1] && !cp
[len
] )
1249 if (len
> unix_maxlen
)
1253 memcpy(addr
, target_saddr
, len
);
1254 addr
->sa_family
= sa_family
;
1255 unlock_user(target_saddr
, target_addr
, 0);
1260 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1261 struct sockaddr
*addr
,
1264 struct target_sockaddr
*target_saddr
;
1266 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1268 return -TARGET_EFAULT
;
1269 memcpy(target_saddr
, addr
, len
);
1270 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1271 unlock_user(target_saddr
, target_addr
, len
);
1276 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1277 struct target_msghdr
*target_msgh
)
1279 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1280 abi_long msg_controllen
;
1281 abi_ulong target_cmsg_addr
;
1282 struct target_cmsghdr
*target_cmsg
;
1283 socklen_t space
= 0;
1285 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1286 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1288 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1289 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1291 return -TARGET_EFAULT
;
1293 while (cmsg
&& target_cmsg
) {
1294 void *data
= CMSG_DATA(cmsg
);
1295 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1297 int len
= tswapal(target_cmsg
->cmsg_len
)
1298 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1300 space
+= CMSG_SPACE(len
);
1301 if (space
> msgh
->msg_controllen
) {
1302 space
-= CMSG_SPACE(len
);
1303 gemu_log("Host cmsg overflow\n");
1307 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1308 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1309 cmsg
->cmsg_len
= CMSG_LEN(len
);
1311 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1312 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1313 memcpy(data
, target_data
, len
);
1315 int *fd
= (int *)data
;
1316 int *target_fd
= (int *)target_data
;
1317 int i
, numfds
= len
/ sizeof(int);
1319 for (i
= 0; i
< numfds
; i
++)
1320 fd
[i
] = tswap32(target_fd
[i
]);
1323 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1324 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1326 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1328 msgh
->msg_controllen
= space
;
1332 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1333 struct msghdr
*msgh
)
1335 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1336 abi_long msg_controllen
;
1337 abi_ulong target_cmsg_addr
;
1338 struct target_cmsghdr
*target_cmsg
;
1339 socklen_t space
= 0;
1341 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1342 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1344 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1345 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1347 return -TARGET_EFAULT
;
1349 while (cmsg
&& target_cmsg
) {
1350 void *data
= CMSG_DATA(cmsg
);
1351 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1353 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1355 space
+= TARGET_CMSG_SPACE(len
);
1356 if (space
> msg_controllen
) {
1357 space
-= TARGET_CMSG_SPACE(len
);
1358 gemu_log("Target cmsg overflow\n");
1362 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1363 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1364 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1366 if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1367 (cmsg
->cmsg_type
== SCM_RIGHTS
)) {
1368 int *fd
= (int *)data
;
1369 int *target_fd
= (int *)target_data
;
1370 int i
, numfds
= len
/ sizeof(int);
1372 for (i
= 0; i
< numfds
; i
++)
1373 target_fd
[i
] = tswap32(fd
[i
]);
1374 } else if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1375 (cmsg
->cmsg_type
== SO_TIMESTAMP
) &&
1376 (len
== sizeof(struct timeval
))) {
1377 /* copy struct timeval to target */
1378 struct timeval
*tv
= (struct timeval
*)data
;
1379 struct target_timeval
*target_tv
=
1380 (struct target_timeval
*)target_data
;
1382 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1383 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1385 gemu_log("Unsupported ancillary data: %d/%d\n",
1386 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1387 memcpy(target_data
, data
, len
);
1390 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1391 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1393 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1395 target_msgh
->msg_controllen
= tswapal(space
);
1399 /* do_setsockopt() Must return target values and target errnos. */
1400 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1401 abi_ulong optval_addr
, socklen_t optlen
)
1405 struct ip_mreqn
*ip_mreq
;
1406 struct ip_mreq_source
*ip_mreq_source
;
1410 /* TCP options all take an 'int' value. */
1411 if (optlen
< sizeof(uint32_t))
1412 return -TARGET_EINVAL
;
1414 if (get_user_u32(val
, optval_addr
))
1415 return -TARGET_EFAULT
;
1416 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1423 case IP_ROUTER_ALERT
:
1427 case IP_MTU_DISCOVER
:
1433 case IP_MULTICAST_TTL
:
1434 case IP_MULTICAST_LOOP
:
1436 if (optlen
>= sizeof(uint32_t)) {
1437 if (get_user_u32(val
, optval_addr
))
1438 return -TARGET_EFAULT
;
1439 } else if (optlen
>= 1) {
1440 if (get_user_u8(val
, optval_addr
))
1441 return -TARGET_EFAULT
;
1443 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1445 case IP_ADD_MEMBERSHIP
:
1446 case IP_DROP_MEMBERSHIP
:
1447 if (optlen
< sizeof (struct target_ip_mreq
) ||
1448 optlen
> sizeof (struct target_ip_mreqn
))
1449 return -TARGET_EINVAL
;
1451 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1452 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1453 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1456 case IP_BLOCK_SOURCE
:
1457 case IP_UNBLOCK_SOURCE
:
1458 case IP_ADD_SOURCE_MEMBERSHIP
:
1459 case IP_DROP_SOURCE_MEMBERSHIP
:
1460 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1461 return -TARGET_EINVAL
;
1463 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1464 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1465 unlock_user (ip_mreq_source
, optval_addr
, 0);
1475 /* struct icmp_filter takes an u32 value */
1476 if (optlen
< sizeof(uint32_t)) {
1477 return -TARGET_EINVAL
;
1480 if (get_user_u32(val
, optval_addr
)) {
1481 return -TARGET_EFAULT
;
1483 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1484 &val
, sizeof(val
)));
1491 case TARGET_SOL_SOCKET
:
1493 case TARGET_SO_RCVTIMEO
:
1497 optname
= SO_RCVTIMEO
;
1500 if (optlen
!= sizeof(struct target_timeval
)) {
1501 return -TARGET_EINVAL
;
1504 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1505 return -TARGET_EFAULT
;
1508 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1512 case TARGET_SO_SNDTIMEO
:
1513 optname
= SO_SNDTIMEO
;
1515 /* Options with 'int' argument. */
1516 case TARGET_SO_DEBUG
:
1519 case TARGET_SO_REUSEADDR
:
1520 optname
= SO_REUSEADDR
;
1522 case TARGET_SO_TYPE
:
1525 case TARGET_SO_ERROR
:
1528 case TARGET_SO_DONTROUTE
:
1529 optname
= SO_DONTROUTE
;
1531 case TARGET_SO_BROADCAST
:
1532 optname
= SO_BROADCAST
;
1534 case TARGET_SO_SNDBUF
:
1535 optname
= SO_SNDBUF
;
1537 case TARGET_SO_RCVBUF
:
1538 optname
= SO_RCVBUF
;
1540 case TARGET_SO_KEEPALIVE
:
1541 optname
= SO_KEEPALIVE
;
1543 case TARGET_SO_OOBINLINE
:
1544 optname
= SO_OOBINLINE
;
1546 case TARGET_SO_NO_CHECK
:
1547 optname
= SO_NO_CHECK
;
1549 case TARGET_SO_PRIORITY
:
1550 optname
= SO_PRIORITY
;
1553 case TARGET_SO_BSDCOMPAT
:
1554 optname
= SO_BSDCOMPAT
;
1557 case TARGET_SO_PASSCRED
:
1558 optname
= SO_PASSCRED
;
1560 case TARGET_SO_TIMESTAMP
:
1561 optname
= SO_TIMESTAMP
;
1563 case TARGET_SO_RCVLOWAT
:
1564 optname
= SO_RCVLOWAT
;
1570 if (optlen
< sizeof(uint32_t))
1571 return -TARGET_EINVAL
;
1573 if (get_user_u32(val
, optval_addr
))
1574 return -TARGET_EFAULT
;
1575 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1579 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1580 ret
= -TARGET_ENOPROTOOPT
;
1585 /* do_getsockopt() Must return target values and target errnos. */
1586 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1587 abi_ulong optval_addr
, abi_ulong optlen
)
1594 case TARGET_SOL_SOCKET
:
1597 /* These don't just return a single integer */
1598 case TARGET_SO_LINGER
:
1599 case TARGET_SO_RCVTIMEO
:
1600 case TARGET_SO_SNDTIMEO
:
1601 case TARGET_SO_PEERNAME
:
1603 case TARGET_SO_PEERCRED
: {
1606 struct target_ucred
*tcr
;
1608 if (get_user_u32(len
, optlen
)) {
1609 return -TARGET_EFAULT
;
1612 return -TARGET_EINVAL
;
1616 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1624 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1625 return -TARGET_EFAULT
;
1627 __put_user(cr
.pid
, &tcr
->pid
);
1628 __put_user(cr
.uid
, &tcr
->uid
);
1629 __put_user(cr
.gid
, &tcr
->gid
);
1630 unlock_user_struct(tcr
, optval_addr
, 1);
1631 if (put_user_u32(len
, optlen
)) {
1632 return -TARGET_EFAULT
;
1636 /* Options with 'int' argument. */
1637 case TARGET_SO_DEBUG
:
1640 case TARGET_SO_REUSEADDR
:
1641 optname
= SO_REUSEADDR
;
1643 case TARGET_SO_TYPE
:
1646 case TARGET_SO_ERROR
:
1649 case TARGET_SO_DONTROUTE
:
1650 optname
= SO_DONTROUTE
;
1652 case TARGET_SO_BROADCAST
:
1653 optname
= SO_BROADCAST
;
1655 case TARGET_SO_SNDBUF
:
1656 optname
= SO_SNDBUF
;
1658 case TARGET_SO_RCVBUF
:
1659 optname
= SO_RCVBUF
;
1661 case TARGET_SO_KEEPALIVE
:
1662 optname
= SO_KEEPALIVE
;
1664 case TARGET_SO_OOBINLINE
:
1665 optname
= SO_OOBINLINE
;
1667 case TARGET_SO_NO_CHECK
:
1668 optname
= SO_NO_CHECK
;
1670 case TARGET_SO_PRIORITY
:
1671 optname
= SO_PRIORITY
;
1674 case TARGET_SO_BSDCOMPAT
:
1675 optname
= SO_BSDCOMPAT
;
1678 case TARGET_SO_PASSCRED
:
1679 optname
= SO_PASSCRED
;
1681 case TARGET_SO_TIMESTAMP
:
1682 optname
= SO_TIMESTAMP
;
1684 case TARGET_SO_RCVLOWAT
:
1685 optname
= SO_RCVLOWAT
;
1692 /* TCP options all take an 'int' value. */
1694 if (get_user_u32(len
, optlen
))
1695 return -TARGET_EFAULT
;
1697 return -TARGET_EINVAL
;
1699 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1705 if (put_user_u32(val
, optval_addr
))
1706 return -TARGET_EFAULT
;
1708 if (put_user_u8(val
, optval_addr
))
1709 return -TARGET_EFAULT
;
1711 if (put_user_u32(len
, optlen
))
1712 return -TARGET_EFAULT
;
1719 case IP_ROUTER_ALERT
:
1723 case IP_MTU_DISCOVER
:
1729 case IP_MULTICAST_TTL
:
1730 case IP_MULTICAST_LOOP
:
1731 if (get_user_u32(len
, optlen
))
1732 return -TARGET_EFAULT
;
1734 return -TARGET_EINVAL
;
1736 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1739 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1741 if (put_user_u32(len
, optlen
)
1742 || put_user_u8(val
, optval_addr
))
1743 return -TARGET_EFAULT
;
1745 if (len
> sizeof(int))
1747 if (put_user_u32(len
, optlen
)
1748 || put_user_u32(val
, optval_addr
))
1749 return -TARGET_EFAULT
;
1753 ret
= -TARGET_ENOPROTOOPT
;
1759 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1761 ret
= -TARGET_EOPNOTSUPP
;
1767 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1768 int count
, int copy
)
1770 struct target_iovec
*target_vec
;
1772 abi_ulong total_len
, max_len
;
1779 if (count
> IOV_MAX
) {
1784 vec
= calloc(count
, sizeof(struct iovec
));
1790 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1791 count
* sizeof(struct target_iovec
), 1);
1792 if (target_vec
== NULL
) {
1797 /* ??? If host page size > target page size, this will result in a
1798 value larger than what we can actually support. */
1799 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1802 for (i
= 0; i
< count
; i
++) {
1803 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1804 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1809 } else if (len
== 0) {
1810 /* Zero length pointer is ignored. */
1811 vec
[i
].iov_base
= 0;
1813 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1814 if (!vec
[i
].iov_base
) {
1818 if (len
> max_len
- total_len
) {
1819 len
= max_len
- total_len
;
1822 vec
[i
].iov_len
= len
;
1826 unlock_user(target_vec
, target_addr
, 0);
1832 unlock_user(target_vec
, target_addr
, 0);
1836 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1837 int count
, int copy
)
1839 struct target_iovec
*target_vec
;
1842 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1843 count
* sizeof(struct target_iovec
), 1);
1845 for (i
= 0; i
< count
; i
++) {
1846 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1847 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1851 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1853 unlock_user(target_vec
, target_addr
, 0);
1859 /* do_socket() Must return target values and target errnos. */
1860 static abi_long
do_socket(int domain
, int type
, int protocol
)
1862 #if defined(TARGET_MIPS)
1864 case TARGET_SOCK_DGRAM
:
1867 case TARGET_SOCK_STREAM
:
1870 case TARGET_SOCK_RAW
:
1873 case TARGET_SOCK_RDM
:
1876 case TARGET_SOCK_SEQPACKET
:
1877 type
= SOCK_SEQPACKET
;
1879 case TARGET_SOCK_PACKET
:
1884 if (domain
== PF_NETLINK
)
1885 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1886 return get_errno(socket(domain
, type
, protocol
));
1889 /* do_bind() Must return target values and target errnos. */
1890 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1896 if ((int)addrlen
< 0) {
1897 return -TARGET_EINVAL
;
1900 addr
= alloca(addrlen
+1);
1902 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1906 return get_errno(bind(sockfd
, addr
, addrlen
));
1909 /* do_connect() Must return target values and target errnos. */
1910 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1916 if ((int)addrlen
< 0) {
1917 return -TARGET_EINVAL
;
1920 addr
= alloca(addrlen
);
1922 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1926 return get_errno(connect(sockfd
, addr
, addrlen
));
1929 /* do_sendrecvmsg() Must return target values and target errnos. */
1930 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1931 int flags
, int send
)
1934 struct target_msghdr
*msgp
;
1938 abi_ulong target_vec
;
1941 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1945 return -TARGET_EFAULT
;
1946 if (msgp
->msg_name
) {
1947 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1948 msg
.msg_name
= alloca(msg
.msg_namelen
);
1949 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1955 msg
.msg_name
= NULL
;
1956 msg
.msg_namelen
= 0;
1958 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1959 msg
.msg_control
= alloca(msg
.msg_controllen
);
1960 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1962 count
= tswapal(msgp
->msg_iovlen
);
1963 target_vec
= tswapal(msgp
->msg_iov
);
1964 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
1965 target_vec
, count
, send
);
1967 ret
= -host_to_target_errno(errno
);
1970 msg
.msg_iovlen
= count
;
1974 ret
= target_to_host_cmsg(&msg
, msgp
);
1976 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1978 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1979 if (!is_error(ret
)) {
1981 ret
= host_to_target_cmsg(msgp
, &msg
);
1982 if (!is_error(ret
)) {
1983 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
1984 if (msg
.msg_name
!= NULL
) {
1985 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
1986 msg
.msg_name
, msg
.msg_namelen
);
1998 unlock_iovec(vec
, target_vec
, count
, !send
);
2000 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2004 /* do_accept() Must return target values and target errnos. */
2005 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
2006 abi_ulong target_addrlen_addr
)
2012 if (target_addr
== 0)
2013 return get_errno(accept(fd
, NULL
, NULL
));
2015 /* linux returns EINVAL if addrlen pointer is invalid */
2016 if (get_user_u32(addrlen
, target_addrlen_addr
))
2017 return -TARGET_EINVAL
;
2019 if ((int)addrlen
< 0) {
2020 return -TARGET_EINVAL
;
2023 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2024 return -TARGET_EINVAL
;
2026 addr
= alloca(addrlen
);
2028 ret
= get_errno(accept(fd
, addr
, &addrlen
));
2029 if (!is_error(ret
)) {
2030 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2031 if (put_user_u32(addrlen
, target_addrlen_addr
))
2032 ret
= -TARGET_EFAULT
;
2037 /* do_getpeername() Must return target values and target errnos. */
2038 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2039 abi_ulong target_addrlen_addr
)
2045 if (get_user_u32(addrlen
, target_addrlen_addr
))
2046 return -TARGET_EFAULT
;
2048 if ((int)addrlen
< 0) {
2049 return -TARGET_EINVAL
;
2052 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2053 return -TARGET_EFAULT
;
2055 addr
= alloca(addrlen
);
2057 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2058 if (!is_error(ret
)) {
2059 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2060 if (put_user_u32(addrlen
, target_addrlen_addr
))
2061 ret
= -TARGET_EFAULT
;
2066 /* do_getsockname() Must return target values and target errnos. */
2067 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2068 abi_ulong target_addrlen_addr
)
2074 if (get_user_u32(addrlen
, target_addrlen_addr
))
2075 return -TARGET_EFAULT
;
2077 if ((int)addrlen
< 0) {
2078 return -TARGET_EINVAL
;
2081 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2082 return -TARGET_EFAULT
;
2084 addr
= alloca(addrlen
);
2086 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2087 if (!is_error(ret
)) {
2088 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2089 if (put_user_u32(addrlen
, target_addrlen_addr
))
2090 ret
= -TARGET_EFAULT
;
2095 /* do_socketpair() Must return target values and target errnos. */
2096 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2097 abi_ulong target_tab_addr
)
2102 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2103 if (!is_error(ret
)) {
2104 if (put_user_s32(tab
[0], target_tab_addr
)
2105 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2106 ret
= -TARGET_EFAULT
;
2111 /* do_sendto() Must return target values and target errnos. */
2112 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2113 abi_ulong target_addr
, socklen_t addrlen
)
2119 if ((int)addrlen
< 0) {
2120 return -TARGET_EINVAL
;
2123 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2125 return -TARGET_EFAULT
;
2127 addr
= alloca(addrlen
);
2128 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2130 unlock_user(host_msg
, msg
, 0);
2133 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2135 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2137 unlock_user(host_msg
, msg
, 0);
2141 /* do_recvfrom() Must return target values and target errnos. */
2142 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2143 abi_ulong target_addr
,
2144 abi_ulong target_addrlen
)
2151 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2153 return -TARGET_EFAULT
;
2155 if (get_user_u32(addrlen
, target_addrlen
)) {
2156 ret
= -TARGET_EFAULT
;
2159 if ((int)addrlen
< 0) {
2160 ret
= -TARGET_EINVAL
;
2163 addr
= alloca(addrlen
);
2164 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2166 addr
= NULL
; /* To keep compiler quiet. */
2167 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2169 if (!is_error(ret
)) {
2171 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2172 if (put_user_u32(addrlen
, target_addrlen
)) {
2173 ret
= -TARGET_EFAULT
;
2177 unlock_user(host_msg
, msg
, len
);
2180 unlock_user(host_msg
, msg
, 0);
2185 #ifdef TARGET_NR_socketcall
2186 /* do_socketcall() Must return target values and target errnos. */
2187 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2190 const int n
= sizeof(abi_ulong
);
2195 abi_ulong domain
, type
, protocol
;
2197 if (get_user_ual(domain
, vptr
)
2198 || get_user_ual(type
, vptr
+ n
)
2199 || get_user_ual(protocol
, vptr
+ 2 * n
))
2200 return -TARGET_EFAULT
;
2202 ret
= do_socket(domain
, type
, protocol
);
2208 abi_ulong target_addr
;
2211 if (get_user_ual(sockfd
, vptr
)
2212 || get_user_ual(target_addr
, vptr
+ n
)
2213 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2214 return -TARGET_EFAULT
;
2216 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2219 case SOCKOP_connect
:
2222 abi_ulong target_addr
;
2225 if (get_user_ual(sockfd
, vptr
)
2226 || get_user_ual(target_addr
, vptr
+ n
)
2227 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2228 return -TARGET_EFAULT
;
2230 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2235 abi_ulong sockfd
, backlog
;
2237 if (get_user_ual(sockfd
, vptr
)
2238 || get_user_ual(backlog
, vptr
+ n
))
2239 return -TARGET_EFAULT
;
2241 ret
= get_errno(listen(sockfd
, backlog
));
2247 abi_ulong target_addr
, target_addrlen
;
2249 if (get_user_ual(sockfd
, vptr
)
2250 || get_user_ual(target_addr
, vptr
+ n
)
2251 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2252 return -TARGET_EFAULT
;
2254 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2257 case SOCKOP_getsockname
:
2260 abi_ulong target_addr
, target_addrlen
;
2262 if (get_user_ual(sockfd
, vptr
)
2263 || get_user_ual(target_addr
, vptr
+ n
)
2264 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2265 return -TARGET_EFAULT
;
2267 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2270 case SOCKOP_getpeername
:
2273 abi_ulong target_addr
, target_addrlen
;
2275 if (get_user_ual(sockfd
, vptr
)
2276 || get_user_ual(target_addr
, vptr
+ n
)
2277 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2278 return -TARGET_EFAULT
;
2280 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2283 case SOCKOP_socketpair
:
2285 abi_ulong domain
, type
, protocol
;
2288 if (get_user_ual(domain
, vptr
)
2289 || get_user_ual(type
, vptr
+ n
)
2290 || get_user_ual(protocol
, vptr
+ 2 * n
)
2291 || get_user_ual(tab
, vptr
+ 3 * n
))
2292 return -TARGET_EFAULT
;
2294 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2304 if (get_user_ual(sockfd
, vptr
)
2305 || get_user_ual(msg
, vptr
+ n
)
2306 || get_user_ual(len
, vptr
+ 2 * n
)
2307 || get_user_ual(flags
, vptr
+ 3 * n
))
2308 return -TARGET_EFAULT
;
2310 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2320 if (get_user_ual(sockfd
, vptr
)
2321 || get_user_ual(msg
, vptr
+ n
)
2322 || get_user_ual(len
, vptr
+ 2 * n
)
2323 || get_user_ual(flags
, vptr
+ 3 * n
))
2324 return -TARGET_EFAULT
;
2326 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2338 if (get_user_ual(sockfd
, vptr
)
2339 || get_user_ual(msg
, vptr
+ n
)
2340 || get_user_ual(len
, vptr
+ 2 * n
)
2341 || get_user_ual(flags
, vptr
+ 3 * n
)
2342 || get_user_ual(addr
, vptr
+ 4 * n
)
2343 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2344 return -TARGET_EFAULT
;
2346 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2349 case SOCKOP_recvfrom
:
2358 if (get_user_ual(sockfd
, vptr
)
2359 || get_user_ual(msg
, vptr
+ n
)
2360 || get_user_ual(len
, vptr
+ 2 * n
)
2361 || get_user_ual(flags
, vptr
+ 3 * n
)
2362 || get_user_ual(addr
, vptr
+ 4 * n
)
2363 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2364 return -TARGET_EFAULT
;
2366 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2369 case SOCKOP_shutdown
:
2371 abi_ulong sockfd
, how
;
2373 if (get_user_ual(sockfd
, vptr
)
2374 || get_user_ual(how
, vptr
+ n
))
2375 return -TARGET_EFAULT
;
2377 ret
= get_errno(shutdown(sockfd
, how
));
2380 case SOCKOP_sendmsg
:
2381 case SOCKOP_recvmsg
:
2384 abi_ulong target_msg
;
2387 if (get_user_ual(fd
, vptr
)
2388 || get_user_ual(target_msg
, vptr
+ n
)
2389 || get_user_ual(flags
, vptr
+ 2 * n
))
2390 return -TARGET_EFAULT
;
2392 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2393 (num
== SOCKOP_sendmsg
));
2396 case SOCKOP_setsockopt
:
2404 if (get_user_ual(sockfd
, vptr
)
2405 || get_user_ual(level
, vptr
+ n
)
2406 || get_user_ual(optname
, vptr
+ 2 * n
)
2407 || get_user_ual(optval
, vptr
+ 3 * n
)
2408 || get_user_ual(optlen
, vptr
+ 4 * n
))
2409 return -TARGET_EFAULT
;
2411 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2414 case SOCKOP_getsockopt
:
2422 if (get_user_ual(sockfd
, vptr
)
2423 || get_user_ual(level
, vptr
+ n
)
2424 || get_user_ual(optname
, vptr
+ 2 * n
)
2425 || get_user_ual(optval
, vptr
+ 3 * n
)
2426 || get_user_ual(optlen
, vptr
+ 4 * n
))
2427 return -TARGET_EFAULT
;
2429 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2433 gemu_log("Unsupported socketcall: %d\n", num
);
2434 ret
= -TARGET_ENOSYS
;
2441 #define N_SHM_REGIONS 32
2443 static struct shm_region
{
2446 } shm_regions
[N_SHM_REGIONS
];
2448 struct target_ipc_perm
2455 unsigned short int mode
;
2456 unsigned short int __pad1
;
2457 unsigned short int __seq
;
2458 unsigned short int __pad2
;
2459 abi_ulong __unused1
;
2460 abi_ulong __unused2
;
2463 struct target_semid_ds
2465 struct target_ipc_perm sem_perm
;
2466 abi_ulong sem_otime
;
2467 abi_ulong __unused1
;
2468 abi_ulong sem_ctime
;
2469 abi_ulong __unused2
;
2470 abi_ulong sem_nsems
;
2471 abi_ulong __unused3
;
2472 abi_ulong __unused4
;
2475 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2476 abi_ulong target_addr
)
2478 struct target_ipc_perm
*target_ip
;
2479 struct target_semid_ds
*target_sd
;
2481 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2482 return -TARGET_EFAULT
;
2483 target_ip
= &(target_sd
->sem_perm
);
2484 host_ip
->__key
= tswapal(target_ip
->__key
);
2485 host_ip
->uid
= tswapal(target_ip
->uid
);
2486 host_ip
->gid
= tswapal(target_ip
->gid
);
2487 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2488 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2489 host_ip
->mode
= tswap16(target_ip
->mode
);
2490 unlock_user_struct(target_sd
, target_addr
, 0);
2494 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2495 struct ipc_perm
*host_ip
)
2497 struct target_ipc_perm
*target_ip
;
2498 struct target_semid_ds
*target_sd
;
2500 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2501 return -TARGET_EFAULT
;
2502 target_ip
= &(target_sd
->sem_perm
);
2503 target_ip
->__key
= tswapal(host_ip
->__key
);
2504 target_ip
->uid
= tswapal(host_ip
->uid
);
2505 target_ip
->gid
= tswapal(host_ip
->gid
);
2506 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2507 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2508 target_ip
->mode
= tswap16(host_ip
->mode
);
2509 unlock_user_struct(target_sd
, target_addr
, 1);
2513 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2514 abi_ulong target_addr
)
2516 struct target_semid_ds
*target_sd
;
2518 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2519 return -TARGET_EFAULT
;
2520 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2521 return -TARGET_EFAULT
;
2522 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2523 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2524 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2525 unlock_user_struct(target_sd
, target_addr
, 0);
2529 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2530 struct semid_ds
*host_sd
)
2532 struct target_semid_ds
*target_sd
;
2534 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2535 return -TARGET_EFAULT
;
2536 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2537 return -TARGET_EFAULT
;
2538 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2539 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2540 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2541 unlock_user_struct(target_sd
, target_addr
, 1);
2545 struct target_seminfo
{
2558 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2559 struct seminfo
*host_seminfo
)
2561 struct target_seminfo
*target_seminfo
;
2562 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2563 return -TARGET_EFAULT
;
2564 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2565 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2566 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2567 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2568 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2569 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2570 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2571 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2572 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2573 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2574 unlock_user_struct(target_seminfo
, target_addr
, 1);
2580 struct semid_ds
*buf
;
2581 unsigned short *array
;
2582 struct seminfo
*__buf
;
2585 union target_semun
{
2592 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2593 abi_ulong target_addr
)
2596 unsigned short *array
;
2598 struct semid_ds semid_ds
;
2601 semun
.buf
= &semid_ds
;
2603 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2605 return get_errno(ret
);
2607 nsems
= semid_ds
.sem_nsems
;
2609 *host_array
= malloc(nsems
*sizeof(unsigned short));
2610 array
= lock_user(VERIFY_READ
, target_addr
,
2611 nsems
*sizeof(unsigned short), 1);
2613 return -TARGET_EFAULT
;
2615 for(i
=0; i
<nsems
; i
++) {
2616 __get_user((*host_array
)[i
], &array
[i
]);
2618 unlock_user(array
, target_addr
, 0);
2623 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2624 unsigned short **host_array
)
2627 unsigned short *array
;
2629 struct semid_ds semid_ds
;
2632 semun
.buf
= &semid_ds
;
2634 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2636 return get_errno(ret
);
2638 nsems
= semid_ds
.sem_nsems
;
2640 array
= lock_user(VERIFY_WRITE
, target_addr
,
2641 nsems
*sizeof(unsigned short), 0);
2643 return -TARGET_EFAULT
;
2645 for(i
=0; i
<nsems
; i
++) {
2646 __put_user((*host_array
)[i
], &array
[i
]);
2649 unlock_user(array
, target_addr
, 1);
2654 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2655 union target_semun target_su
)
2658 struct semid_ds dsarg
;
2659 unsigned short *array
= NULL
;
2660 struct seminfo seminfo
;
2661 abi_long ret
= -TARGET_EINVAL
;
2668 arg
.val
= tswap32(target_su
.val
);
2669 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2670 target_su
.val
= tswap32(arg
.val
);
2674 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2678 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2679 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2686 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2690 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2691 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2697 arg
.__buf
= &seminfo
;
2698 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2699 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2707 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2714 struct target_sembuf
{
2715 unsigned short sem_num
;
2720 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2721 abi_ulong target_addr
,
2724 struct target_sembuf
*target_sembuf
;
2727 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2728 nsops
*sizeof(struct target_sembuf
), 1);
2730 return -TARGET_EFAULT
;
2732 for(i
=0; i
<nsops
; i
++) {
2733 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2734 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2735 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2738 unlock_user(target_sembuf
, target_addr
, 0);
2743 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2745 struct sembuf sops
[nsops
];
2747 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2748 return -TARGET_EFAULT
;
2750 return semop(semid
, sops
, nsops
);
2753 struct target_msqid_ds
2755 struct target_ipc_perm msg_perm
;
2756 abi_ulong msg_stime
;
2757 #if TARGET_ABI_BITS == 32
2758 abi_ulong __unused1
;
2760 abi_ulong msg_rtime
;
2761 #if TARGET_ABI_BITS == 32
2762 abi_ulong __unused2
;
2764 abi_ulong msg_ctime
;
2765 #if TARGET_ABI_BITS == 32
2766 abi_ulong __unused3
;
2768 abi_ulong __msg_cbytes
;
2770 abi_ulong msg_qbytes
;
2771 abi_ulong msg_lspid
;
2772 abi_ulong msg_lrpid
;
2773 abi_ulong __unused4
;
2774 abi_ulong __unused5
;
2777 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2778 abi_ulong target_addr
)
2780 struct target_msqid_ds
*target_md
;
2782 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2783 return -TARGET_EFAULT
;
2784 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2785 return -TARGET_EFAULT
;
2786 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2787 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2788 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2789 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2790 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2791 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2792 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2793 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2794 unlock_user_struct(target_md
, target_addr
, 0);
2798 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2799 struct msqid_ds
*host_md
)
2801 struct target_msqid_ds
*target_md
;
2803 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2804 return -TARGET_EFAULT
;
2805 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2806 return -TARGET_EFAULT
;
2807 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2808 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2809 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2810 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2811 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2812 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2813 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2814 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2815 unlock_user_struct(target_md
, target_addr
, 1);
2819 struct target_msginfo
{
2827 unsigned short int msgseg
;
2830 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2831 struct msginfo
*host_msginfo
)
2833 struct target_msginfo
*target_msginfo
;
2834 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2835 return -TARGET_EFAULT
;
2836 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2837 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2838 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2839 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2840 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2841 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2842 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2843 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2844 unlock_user_struct(target_msginfo
, target_addr
, 1);
2848 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2850 struct msqid_ds dsarg
;
2851 struct msginfo msginfo
;
2852 abi_long ret
= -TARGET_EINVAL
;
2860 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2861 return -TARGET_EFAULT
;
2862 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2863 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2864 return -TARGET_EFAULT
;
2867 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2871 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2872 if (host_to_target_msginfo(ptr
, &msginfo
))
2873 return -TARGET_EFAULT
;
2880 struct target_msgbuf
{
2885 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2886 unsigned int msgsz
, int msgflg
)
2888 struct target_msgbuf
*target_mb
;
2889 struct msgbuf
*host_mb
;
2892 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2893 return -TARGET_EFAULT
;
2894 host_mb
= malloc(msgsz
+sizeof(long));
2895 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2896 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2897 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2899 unlock_user_struct(target_mb
, msgp
, 0);
2904 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2905 unsigned int msgsz
, abi_long msgtyp
,
2908 struct target_msgbuf
*target_mb
;
2910 struct msgbuf
*host_mb
;
2913 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2914 return -TARGET_EFAULT
;
2916 host_mb
= g_malloc(msgsz
+sizeof(long));
2917 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
2920 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2921 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2922 if (!target_mtext
) {
2923 ret
= -TARGET_EFAULT
;
2926 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2927 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2930 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2934 unlock_user_struct(target_mb
, msgp
, 1);
2939 struct target_shmid_ds
2941 struct target_ipc_perm shm_perm
;
2942 abi_ulong shm_segsz
;
2943 abi_ulong shm_atime
;
2944 #if TARGET_ABI_BITS == 32
2945 abi_ulong __unused1
;
2947 abi_ulong shm_dtime
;
2948 #if TARGET_ABI_BITS == 32
2949 abi_ulong __unused2
;
2951 abi_ulong shm_ctime
;
2952 #if TARGET_ABI_BITS == 32
2953 abi_ulong __unused3
;
2957 abi_ulong shm_nattch
;
2958 unsigned long int __unused4
;
2959 unsigned long int __unused5
;
2962 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2963 abi_ulong target_addr
)
2965 struct target_shmid_ds
*target_sd
;
2967 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2968 return -TARGET_EFAULT
;
2969 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2970 return -TARGET_EFAULT
;
2971 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2972 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2973 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2974 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2975 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2976 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2977 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2978 unlock_user_struct(target_sd
, target_addr
, 0);
2982 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2983 struct shmid_ds
*host_sd
)
2985 struct target_shmid_ds
*target_sd
;
2987 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2988 return -TARGET_EFAULT
;
2989 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2990 return -TARGET_EFAULT
;
2991 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2992 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2993 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2994 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2995 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2996 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2997 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2998 unlock_user_struct(target_sd
, target_addr
, 1);
3002 struct target_shminfo
{
3010 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3011 struct shminfo
*host_shminfo
)
3013 struct target_shminfo
*target_shminfo
;
3014 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3015 return -TARGET_EFAULT
;
3016 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3017 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3018 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3019 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3020 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3021 unlock_user_struct(target_shminfo
, target_addr
, 1);
3025 struct target_shm_info
{
3030 abi_ulong swap_attempts
;
3031 abi_ulong swap_successes
;
3034 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3035 struct shm_info
*host_shm_info
)
3037 struct target_shm_info
*target_shm_info
;
3038 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3039 return -TARGET_EFAULT
;
3040 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3041 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3042 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3043 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3044 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3045 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3046 unlock_user_struct(target_shm_info
, target_addr
, 1);
3050 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3052 struct shmid_ds dsarg
;
3053 struct shminfo shminfo
;
3054 struct shm_info shm_info
;
3055 abi_long ret
= -TARGET_EINVAL
;
3063 if (target_to_host_shmid_ds(&dsarg
, buf
))
3064 return -TARGET_EFAULT
;
3065 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3066 if (host_to_target_shmid_ds(buf
, &dsarg
))
3067 return -TARGET_EFAULT
;
3070 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3071 if (host_to_target_shminfo(buf
, &shminfo
))
3072 return -TARGET_EFAULT
;
3075 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3076 if (host_to_target_shm_info(buf
, &shm_info
))
3077 return -TARGET_EFAULT
;
3082 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3089 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3093 struct shmid_ds shm_info
;
3096 /* find out the length of the shared memory segment */
3097 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3098 if (is_error(ret
)) {
3099 /* can't get length, bail out */
3106 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3108 abi_ulong mmap_start
;
3110 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3112 if (mmap_start
== -1) {
3114 host_raddr
= (void *)-1;
3116 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3119 if (host_raddr
== (void *)-1) {
3121 return get_errno((long)host_raddr
);
3123 raddr
=h2g((unsigned long)host_raddr
);
3125 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3126 PAGE_VALID
| PAGE_READ
|
3127 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3129 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3130 if (shm_regions
[i
].start
== 0) {
3131 shm_regions
[i
].start
= raddr
;
3132 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3142 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3146 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3147 if (shm_regions
[i
].start
== shmaddr
) {
3148 shm_regions
[i
].start
= 0;
3149 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3154 return get_errno(shmdt(g2h(shmaddr
)));
3157 #ifdef TARGET_NR_ipc
3158 /* ??? This only works with linear mappings. */
3159 /* do_ipc() must return target values and target errnos. */
3160 static abi_long
do_ipc(unsigned int call
, int first
,
3161 int second
, int third
,
3162 abi_long ptr
, abi_long fifth
)
3167 version
= call
>> 16;
3172 ret
= do_semop(first
, ptr
, second
);
3176 ret
= get_errno(semget(first
, second
, third
));
3180 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3184 ret
= get_errno(msgget(first
, second
));
3188 ret
= do_msgsnd(first
, ptr
, second
, third
);
3192 ret
= do_msgctl(first
, second
, ptr
);
3199 struct target_ipc_kludge
{
3204 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3205 ret
= -TARGET_EFAULT
;
3209 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3211 unlock_user_struct(tmp
, ptr
, 0);
3215 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3224 raddr
= do_shmat(first
, ptr
, second
);
3225 if (is_error(raddr
))
3226 return get_errno(raddr
);
3227 if (put_user_ual(raddr
, third
))
3228 return -TARGET_EFAULT
;
3232 ret
= -TARGET_EINVAL
;
3237 ret
= do_shmdt(ptr
);
3241 /* IPC_* flag values are the same on all linux platforms */
3242 ret
= get_errno(shmget(first
, second
, third
));
3245 /* IPC_* and SHM_* command values are the same on all linux platforms */
3247 ret
= do_shmctl(first
, second
, third
);
3250 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3251 ret
= -TARGET_ENOSYS
;
3258 /* kernel structure types definitions */
3260 #define STRUCT(name, ...) STRUCT_ ## name,
3261 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3263 #include "syscall_types.h"
3266 #undef STRUCT_SPECIAL
3268 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3269 #define STRUCT_SPECIAL(name)
3270 #include "syscall_types.h"
3272 #undef STRUCT_SPECIAL
3274 typedef struct IOCTLEntry IOCTLEntry
;
3276 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3277 int fd
, abi_long cmd
, abi_long arg
);
3280 unsigned int target_cmd
;
3281 unsigned int host_cmd
;
3284 do_ioctl_fn
*do_ioctl
;
3285 const argtype arg_type
[5];
3288 #define IOC_R 0x0001
3289 #define IOC_W 0x0002
3290 #define IOC_RW (IOC_R | IOC_W)
3292 #define MAX_STRUCT_SIZE 4096
3294 #ifdef CONFIG_FIEMAP
3295 /* So fiemap access checks don't overflow on 32 bit systems.
3296 * This is very slightly smaller than the limit imposed by
3297 * the underlying kernel.
3299 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3300 / sizeof(struct fiemap_extent))
3302 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3303 int fd
, abi_long cmd
, abi_long arg
)
3305 /* The parameter for this ioctl is a struct fiemap followed
3306 * by an array of struct fiemap_extent whose size is set
3307 * in fiemap->fm_extent_count. The array is filled in by the
3310 int target_size_in
, target_size_out
;
3312 const argtype
*arg_type
= ie
->arg_type
;
3313 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3316 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3320 assert(arg_type
[0] == TYPE_PTR
);
3321 assert(ie
->access
== IOC_RW
);
3323 target_size_in
= thunk_type_size(arg_type
, 0);
3324 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3326 return -TARGET_EFAULT
;
3328 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3329 unlock_user(argptr
, arg
, 0);
3330 fm
= (struct fiemap
*)buf_temp
;
3331 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3332 return -TARGET_EINVAL
;
3335 outbufsz
= sizeof (*fm
) +
3336 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3338 if (outbufsz
> MAX_STRUCT_SIZE
) {
3339 /* We can't fit all the extents into the fixed size buffer.
3340 * Allocate one that is large enough and use it instead.
3342 fm
= malloc(outbufsz
);
3344 return -TARGET_ENOMEM
;
3346 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3349 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3350 if (!is_error(ret
)) {
3351 target_size_out
= target_size_in
;
3352 /* An extent_count of 0 means we were only counting the extents
3353 * so there are no structs to copy
3355 if (fm
->fm_extent_count
!= 0) {
3356 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3358 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3360 ret
= -TARGET_EFAULT
;
3362 /* Convert the struct fiemap */
3363 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3364 if (fm
->fm_extent_count
!= 0) {
3365 p
= argptr
+ target_size_in
;
3366 /* ...and then all the struct fiemap_extents */
3367 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3368 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3373 unlock_user(argptr
, arg
, target_size_out
);
3383 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3384 int fd
, abi_long cmd
, abi_long arg
)
3386 const argtype
*arg_type
= ie
->arg_type
;
3390 struct ifconf
*host_ifconf
;
3392 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3393 int target_ifreq_size
;
3398 abi_long target_ifc_buf
;
3402 assert(arg_type
[0] == TYPE_PTR
);
3403 assert(ie
->access
== IOC_RW
);
3406 target_size
= thunk_type_size(arg_type
, 0);
3408 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3410 return -TARGET_EFAULT
;
3411 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3412 unlock_user(argptr
, arg
, 0);
3414 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3415 target_ifc_len
= host_ifconf
->ifc_len
;
3416 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3418 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3419 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3420 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3422 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3423 if (outbufsz
> MAX_STRUCT_SIZE
) {
3424 /* We can't fit all the extents into the fixed size buffer.
3425 * Allocate one that is large enough and use it instead.
3427 host_ifconf
= malloc(outbufsz
);
3429 return -TARGET_ENOMEM
;
3431 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3434 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3436 host_ifconf
->ifc_len
= host_ifc_len
;
3437 host_ifconf
->ifc_buf
= host_ifc_buf
;
3439 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3440 if (!is_error(ret
)) {
3441 /* convert host ifc_len to target ifc_len */
3443 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3444 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3445 host_ifconf
->ifc_len
= target_ifc_len
;
3447 /* restore target ifc_buf */
3449 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3451 /* copy struct ifconf to target user */
3453 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3455 return -TARGET_EFAULT
;
3456 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3457 unlock_user(argptr
, arg
, target_size
);
3459 /* copy ifreq[] to target user */
3461 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3462 for (i
= 0; i
< nb_ifreq
; i
++) {
3463 thunk_convert(argptr
+ i
* target_ifreq_size
,
3464 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3465 ifreq_arg_type
, THUNK_TARGET
);
3467 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3477 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3478 abi_long cmd
, abi_long arg
)
3481 struct dm_ioctl
*host_dm
;
3482 abi_long guest_data
;
3483 uint32_t guest_data_size
;
3485 const argtype
*arg_type
= ie
->arg_type
;
3487 void *big_buf
= NULL
;
3491 target_size
= thunk_type_size(arg_type
, 0);
3492 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3494 ret
= -TARGET_EFAULT
;
3497 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3498 unlock_user(argptr
, arg
, 0);
3500 /* buf_temp is too small, so fetch things into a bigger buffer */
3501 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3502 memcpy(big_buf
, buf_temp
, target_size
);
3506 guest_data
= arg
+ host_dm
->data_start
;
3507 if ((guest_data
- arg
) < 0) {
3511 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3512 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3514 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3515 switch (ie
->host_cmd
) {
3517 case DM_LIST_DEVICES
:
3520 case DM_DEV_SUSPEND
:
3523 case DM_TABLE_STATUS
:
3524 case DM_TABLE_CLEAR
:
3526 case DM_LIST_VERSIONS
:
3530 case DM_DEV_SET_GEOMETRY
:
3531 /* data contains only strings */
3532 memcpy(host_data
, argptr
, guest_data_size
);
3535 memcpy(host_data
, argptr
, guest_data_size
);
3536 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3540 void *gspec
= argptr
;
3541 void *cur_data
= host_data
;
3542 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3543 int spec_size
= thunk_type_size(arg_type
, 0);
3546 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3547 struct dm_target_spec
*spec
= cur_data
;
3551 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3552 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3554 spec
->next
= sizeof(*spec
) + slen
;
3555 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3557 cur_data
+= spec
->next
;
3562 ret
= -TARGET_EINVAL
;
3565 unlock_user(argptr
, guest_data
, 0);
3567 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3568 if (!is_error(ret
)) {
3569 guest_data
= arg
+ host_dm
->data_start
;
3570 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3571 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3572 switch (ie
->host_cmd
) {
3577 case DM_DEV_SUSPEND
:
3580 case DM_TABLE_CLEAR
:
3582 case DM_DEV_SET_GEOMETRY
:
3583 /* no return data */
3585 case DM_LIST_DEVICES
:
3587 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3588 uint32_t remaining_data
= guest_data_size
;
3589 void *cur_data
= argptr
;
3590 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3591 int nl_size
= 12; /* can't use thunk_size due to alignment */
3594 uint32_t next
= nl
->next
;
3596 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3598 if (remaining_data
< nl
->next
) {
3599 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3602 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3603 strcpy(cur_data
+ nl_size
, nl
->name
);
3604 cur_data
+= nl
->next
;
3605 remaining_data
-= nl
->next
;
3609 nl
= (void*)nl
+ next
;
3614 case DM_TABLE_STATUS
:
3616 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3617 void *cur_data
= argptr
;
3618 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3619 int spec_size
= thunk_type_size(arg_type
, 0);
3622 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3623 uint32_t next
= spec
->next
;
3624 int slen
= strlen((char*)&spec
[1]) + 1;
3625 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3626 if (guest_data_size
< spec
->next
) {
3627 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3630 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3631 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3632 cur_data
= argptr
+ spec
->next
;
3633 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3639 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3640 int count
= *(uint32_t*)hdata
;
3641 uint64_t *hdev
= hdata
+ 8;
3642 uint64_t *gdev
= argptr
+ 8;
3645 *(uint32_t*)argptr
= tswap32(count
);
3646 for (i
= 0; i
< count
; i
++) {
3647 *gdev
= tswap64(*hdev
);
3653 case DM_LIST_VERSIONS
:
3655 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3656 uint32_t remaining_data
= guest_data_size
;
3657 void *cur_data
= argptr
;
3658 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3659 int vers_size
= thunk_type_size(arg_type
, 0);
3662 uint32_t next
= vers
->next
;
3664 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3666 if (remaining_data
< vers
->next
) {
3667 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3670 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3671 strcpy(cur_data
+ vers_size
, vers
->name
);
3672 cur_data
+= vers
->next
;
3673 remaining_data
-= vers
->next
;
3677 vers
= (void*)vers
+ next
;
3682 ret
= -TARGET_EINVAL
;
3685 unlock_user(argptr
, guest_data
, guest_data_size
);
3687 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3689 ret
= -TARGET_EFAULT
;
3692 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3693 unlock_user(argptr
, arg
, target_size
);
3700 static IOCTLEntry ioctl_entries
[] = {
3701 #define IOCTL(cmd, access, ...) \
3702 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3703 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3704 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3709 /* ??? Implement proper locking for ioctls. */
3710 /* do_ioctl() Must return target values and target errnos. */
3711 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3713 const IOCTLEntry
*ie
;
3714 const argtype
*arg_type
;
3716 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3722 if (ie
->target_cmd
== 0) {
3723 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3724 return -TARGET_ENOSYS
;
3726 if (ie
->target_cmd
== cmd
)
3730 arg_type
= ie
->arg_type
;
3732 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3735 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3738 switch(arg_type
[0]) {
3741 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3746 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3750 target_size
= thunk_type_size(arg_type
, 0);
3751 switch(ie
->access
) {
3753 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3754 if (!is_error(ret
)) {
3755 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3757 return -TARGET_EFAULT
;
3758 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3759 unlock_user(argptr
, arg
, target_size
);
3763 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3765 return -TARGET_EFAULT
;
3766 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3767 unlock_user(argptr
, arg
, 0);
3768 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3772 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3774 return -TARGET_EFAULT
;
3775 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3776 unlock_user(argptr
, arg
, 0);
3777 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3778 if (!is_error(ret
)) {
3779 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3781 return -TARGET_EFAULT
;
3782 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3783 unlock_user(argptr
, arg
, target_size
);
3789 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3790 (long)cmd
, arg_type
[0]);
3791 ret
= -TARGET_ENOSYS
;
3797 static const bitmask_transtbl iflag_tbl
[] = {
3798 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3799 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3800 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3801 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3802 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3803 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3804 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3805 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3806 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3807 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3808 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3809 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3810 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3811 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3815 static const bitmask_transtbl oflag_tbl
[] = {
3816 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3817 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3818 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3819 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3820 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3821 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3822 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3823 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3824 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3825 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3826 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3827 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3828 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3829 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3830 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3831 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3832 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3833 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3834 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3835 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3836 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3837 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3838 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3839 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3843 static const bitmask_transtbl cflag_tbl
[] = {
3844 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3845 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3846 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3847 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3848 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3849 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3850 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3851 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3852 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3853 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3854 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3855 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3856 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3857 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3858 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3859 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3860 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3861 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3862 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3863 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3864 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3865 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3866 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3867 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3868 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3869 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3870 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3871 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3872 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3873 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3874 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3878 static const bitmask_transtbl lflag_tbl
[] = {
3879 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3880 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3881 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3882 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3883 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3884 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3885 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3886 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3887 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3888 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3889 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3890 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3891 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3892 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3893 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3897 static void target_to_host_termios (void *dst
, const void *src
)
3899 struct host_termios
*host
= dst
;
3900 const struct target_termios
*target
= src
;
3903 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3905 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3907 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3909 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3910 host
->c_line
= target
->c_line
;
3912 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3913 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3914 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3915 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3916 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3917 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3918 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3919 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3920 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3921 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3922 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3923 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3924 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3925 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3926 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3927 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3928 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3929 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3932 static void host_to_target_termios (void *dst
, const void *src
)
3934 struct target_termios
*target
= dst
;
3935 const struct host_termios
*host
= src
;
3938 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3940 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3942 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3944 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3945 target
->c_line
= host
->c_line
;
3947 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3948 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3949 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3950 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3951 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3952 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3953 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3954 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3955 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3956 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3957 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3958 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3959 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3960 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3961 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3962 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3963 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3964 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3967 static const StructEntry struct_termios_def
= {
3968 .convert
= { host_to_target_termios
, target_to_host_termios
},
3969 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3970 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3973 static bitmask_transtbl mmap_flags_tbl
[] = {
3974 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3975 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3976 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3977 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3978 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3979 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3980 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3981 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3985 #if defined(TARGET_I386)
3987 /* NOTE: there is really one LDT for all the threads */
3988 static uint8_t *ldt_table
;
3990 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3997 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3998 if (size
> bytecount
)
4000 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4002 return -TARGET_EFAULT
;
4003 /* ??? Should this by byteswapped? */
4004 memcpy(p
, ldt_table
, size
);
4005 unlock_user(p
, ptr
, size
);
4009 /* XXX: add locking support */
4010 static abi_long
write_ldt(CPUX86State
*env
,
4011 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4013 struct target_modify_ldt_ldt_s ldt_info
;
4014 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4015 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4016 int seg_not_present
, useable
, lm
;
4017 uint32_t *lp
, entry_1
, entry_2
;
4019 if (bytecount
!= sizeof(ldt_info
))
4020 return -TARGET_EINVAL
;
4021 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4022 return -TARGET_EFAULT
;
4023 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4024 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4025 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4026 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4027 unlock_user_struct(target_ldt_info
, ptr
, 0);
4029 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4030 return -TARGET_EINVAL
;
4031 seg_32bit
= ldt_info
.flags
& 1;
4032 contents
= (ldt_info
.flags
>> 1) & 3;
4033 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4034 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4035 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4036 useable
= (ldt_info
.flags
>> 6) & 1;
4040 lm
= (ldt_info
.flags
>> 7) & 1;
4042 if (contents
== 3) {
4044 return -TARGET_EINVAL
;
4045 if (seg_not_present
== 0)
4046 return -TARGET_EINVAL
;
4048 /* allocate the LDT */
4050 env
->ldt
.base
= target_mmap(0,
4051 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4052 PROT_READ
|PROT_WRITE
,
4053 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4054 if (env
->ldt
.base
== -1)
4055 return -TARGET_ENOMEM
;
4056 memset(g2h(env
->ldt
.base
), 0,
4057 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4058 env
->ldt
.limit
= 0xffff;
4059 ldt_table
= g2h(env
->ldt
.base
);
4062 /* NOTE: same code as Linux kernel */
4063 /* Allow LDTs to be cleared by the user. */
4064 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4067 read_exec_only
== 1 &&
4069 limit_in_pages
== 0 &&
4070 seg_not_present
== 1 &&
4078 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4079 (ldt_info
.limit
& 0x0ffff);
4080 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4081 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4082 (ldt_info
.limit
& 0xf0000) |
4083 ((read_exec_only
^ 1) << 9) |
4085 ((seg_not_present
^ 1) << 15) |
4087 (limit_in_pages
<< 23) |
4091 entry_2
|= (useable
<< 20);
4093 /* Install the new entry ... */
4095 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4096 lp
[0] = tswap32(entry_1
);
4097 lp
[1] = tswap32(entry_2
);
4101 /* specific and weird i386 syscalls */
4102 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4103 unsigned long bytecount
)
4109 ret
= read_ldt(ptr
, bytecount
);
4112 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4115 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4118 ret
= -TARGET_ENOSYS
;
4124 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4125 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4127 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4128 struct target_modify_ldt_ldt_s ldt_info
;
4129 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4130 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4131 int seg_not_present
, useable
, lm
;
4132 uint32_t *lp
, entry_1
, entry_2
;
4135 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4136 if (!target_ldt_info
)
4137 return -TARGET_EFAULT
;
4138 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4139 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4140 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4141 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4142 if (ldt_info
.entry_number
== -1) {
4143 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4144 if (gdt_table
[i
] == 0) {
4145 ldt_info
.entry_number
= i
;
4146 target_ldt_info
->entry_number
= tswap32(i
);
4151 unlock_user_struct(target_ldt_info
, ptr
, 1);
4153 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4154 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4155 return -TARGET_EINVAL
;
4156 seg_32bit
= ldt_info
.flags
& 1;
4157 contents
= (ldt_info
.flags
>> 1) & 3;
4158 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4159 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4160 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4161 useable
= (ldt_info
.flags
>> 6) & 1;
4165 lm
= (ldt_info
.flags
>> 7) & 1;
4168 if (contents
== 3) {
4169 if (seg_not_present
== 0)
4170 return -TARGET_EINVAL
;
4173 /* NOTE: same code as Linux kernel */
4174 /* Allow LDTs to be cleared by the user. */
4175 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4176 if ((contents
== 0 &&
4177 read_exec_only
== 1 &&
4179 limit_in_pages
== 0 &&
4180 seg_not_present
== 1 &&
4188 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4189 (ldt_info
.limit
& 0x0ffff);
4190 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4191 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4192 (ldt_info
.limit
& 0xf0000) |
4193 ((read_exec_only
^ 1) << 9) |
4195 ((seg_not_present
^ 1) << 15) |
4197 (limit_in_pages
<< 23) |
4202 /* Install the new entry ... */
4204 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4205 lp
[0] = tswap32(entry_1
);
4206 lp
[1] = tswap32(entry_2
);
4210 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4212 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4213 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4214 uint32_t base_addr
, limit
, flags
;
4215 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4216 int seg_not_present
, useable
, lm
;
4217 uint32_t *lp
, entry_1
, entry_2
;
4219 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4220 if (!target_ldt_info
)
4221 return -TARGET_EFAULT
;
4222 idx
= tswap32(target_ldt_info
->entry_number
);
4223 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4224 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4225 unlock_user_struct(target_ldt_info
, ptr
, 1);
4226 return -TARGET_EINVAL
;
4228 lp
= (uint32_t *)(gdt_table
+ idx
);
4229 entry_1
= tswap32(lp
[0]);
4230 entry_2
= tswap32(lp
[1]);
4232 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4233 contents
= (entry_2
>> 10) & 3;
4234 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4235 seg_32bit
= (entry_2
>> 22) & 1;
4236 limit_in_pages
= (entry_2
>> 23) & 1;
4237 useable
= (entry_2
>> 20) & 1;
4241 lm
= (entry_2
>> 21) & 1;
4243 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4244 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4245 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4246 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4247 base_addr
= (entry_1
>> 16) |
4248 (entry_2
& 0xff000000) |
4249 ((entry_2
& 0xff) << 16);
4250 target_ldt_info
->base_addr
= tswapal(base_addr
);
4251 target_ldt_info
->limit
= tswap32(limit
);
4252 target_ldt_info
->flags
= tswap32(flags
);
4253 unlock_user_struct(target_ldt_info
, ptr
, 1);
4256 #endif /* TARGET_I386 && TARGET_ABI32 */
4258 #ifndef TARGET_ABI32
4259 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4266 case TARGET_ARCH_SET_GS
:
4267 case TARGET_ARCH_SET_FS
:
4268 if (code
== TARGET_ARCH_SET_GS
)
4272 cpu_x86_load_seg(env
, idx
, 0);
4273 env
->segs
[idx
].base
= addr
;
4275 case TARGET_ARCH_GET_GS
:
4276 case TARGET_ARCH_GET_FS
:
4277 if (code
== TARGET_ARCH_GET_GS
)
4281 val
= env
->segs
[idx
].base
;
4282 if (put_user(val
, addr
, abi_ulong
))
4283 ret
= -TARGET_EFAULT
;
4286 ret
= -TARGET_EINVAL
;
4293 #endif /* defined(TARGET_I386) */
4295 #define NEW_STACK_SIZE 0x40000
4297 #if defined(CONFIG_USE_NPTL)
4299 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4302 pthread_mutex_t mutex
;
4303 pthread_cond_t cond
;
4306 abi_ulong child_tidptr
;
4307 abi_ulong parent_tidptr
;
4311 static void *clone_func(void *arg
)
4313 new_thread_info
*info
= arg
;
4319 ts
= (TaskState
*)thread_env
->opaque
;
4320 info
->tid
= gettid();
4321 env
->host_tid
= info
->tid
;
4323 if (info
->child_tidptr
)
4324 put_user_u32(info
->tid
, info
->child_tidptr
);
4325 if (info
->parent_tidptr
)
4326 put_user_u32(info
->tid
, info
->parent_tidptr
);
4327 /* Enable signals. */
4328 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4329 /* Signal to the parent that we're ready. */
4330 pthread_mutex_lock(&info
->mutex
);
4331 pthread_cond_broadcast(&info
->cond
);
4332 pthread_mutex_unlock(&info
->mutex
);
4333 /* Wait until the parent has finshed initializing the tls state. */
4334 pthread_mutex_lock(&clone_lock
);
4335 pthread_mutex_unlock(&clone_lock
);
4342 static int clone_func(void *arg
)
4344 CPUArchState
*env
= arg
;
4351 /* do_fork() Must return host values and target errnos (unlike most
4352 do_*() functions). */
4353 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4354 abi_ulong parent_tidptr
, target_ulong newtls
,
4355 abi_ulong child_tidptr
)
4359 CPUArchState
*new_env
;
4360 #if defined(CONFIG_USE_NPTL)
4361 unsigned int nptl_flags
;
4367 /* Emulate vfork() with fork() */
4368 if (flags
& CLONE_VFORK
)
4369 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4371 if (flags
& CLONE_VM
) {
4372 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4373 #if defined(CONFIG_USE_NPTL)
4374 new_thread_info info
;
4375 pthread_attr_t attr
;
4377 ts
= g_malloc0(sizeof(TaskState
));
4378 init_task_state(ts
);
4379 /* we create a new CPU instance. */
4380 new_env
= cpu_copy(env
);
4381 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4382 cpu_reset(ENV_GET_CPU(new_env
));
4384 /* Init regs that differ from the parent. */
4385 cpu_clone_regs(new_env
, newsp
);
4386 new_env
->opaque
= ts
;
4387 ts
->bprm
= parent_ts
->bprm
;
4388 ts
->info
= parent_ts
->info
;
4389 #if defined(CONFIG_USE_NPTL)
4391 flags
&= ~CLONE_NPTL_FLAGS2
;
4393 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4394 ts
->child_tidptr
= child_tidptr
;
4397 if (nptl_flags
& CLONE_SETTLS
)
4398 cpu_set_tls (new_env
, newtls
);
4400 /* Grab a mutex so that thread setup appears atomic. */
4401 pthread_mutex_lock(&clone_lock
);
4403 memset(&info
, 0, sizeof(info
));
4404 pthread_mutex_init(&info
.mutex
, NULL
);
4405 pthread_mutex_lock(&info
.mutex
);
4406 pthread_cond_init(&info
.cond
, NULL
);
4408 if (nptl_flags
& CLONE_CHILD_SETTID
)
4409 info
.child_tidptr
= child_tidptr
;
4410 if (nptl_flags
& CLONE_PARENT_SETTID
)
4411 info
.parent_tidptr
= parent_tidptr
;
4413 ret
= pthread_attr_init(&attr
);
4414 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4415 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4416 /* It is not safe to deliver signals until the child has finished
4417 initializing, so temporarily block all signals. */
4418 sigfillset(&sigmask
);
4419 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4421 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4422 /* TODO: Free new CPU state if thread creation failed. */
4424 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4425 pthread_attr_destroy(&attr
);
4427 /* Wait for the child to initialize. */
4428 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4430 if (flags
& CLONE_PARENT_SETTID
)
4431 put_user_u32(ret
, parent_tidptr
);
4435 pthread_mutex_unlock(&info
.mutex
);
4436 pthread_cond_destroy(&info
.cond
);
4437 pthread_mutex_destroy(&info
.mutex
);
4438 pthread_mutex_unlock(&clone_lock
);
4440 if (flags
& CLONE_NPTL_FLAGS2
)
4442 /* This is probably going to die very quickly, but do it anyway. */
4443 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4445 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4447 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4451 /* if no CLONE_VM, we consider it is a fork */
4452 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4457 /* Child Process. */
4458 cpu_clone_regs(env
, newsp
);
4460 #if defined(CONFIG_USE_NPTL)
4461 /* There is a race condition here. The parent process could
4462 theoretically read the TID in the child process before the child
4463 tid is set. This would require using either ptrace
4464 (not implemented) or having *_tidptr to point at a shared memory
4465 mapping. We can't repeat the spinlock hack used above because
4466 the child process gets its own copy of the lock. */
4467 if (flags
& CLONE_CHILD_SETTID
)
4468 put_user_u32(gettid(), child_tidptr
);
4469 if (flags
& CLONE_PARENT_SETTID
)
4470 put_user_u32(gettid(), parent_tidptr
);
4471 ts
= (TaskState
*)env
->opaque
;
4472 if (flags
& CLONE_SETTLS
)
4473 cpu_set_tls (env
, newtls
);
4474 if (flags
& CLONE_CHILD_CLEARTID
)
4475 ts
->child_tidptr
= child_tidptr
;
4484 /* warning : doesn't handle linux specific flags... */
4485 static int target_to_host_fcntl_cmd(int cmd
)
4488 case TARGET_F_DUPFD
:
4489 case TARGET_F_GETFD
:
4490 case TARGET_F_SETFD
:
4491 case TARGET_F_GETFL
:
4492 case TARGET_F_SETFL
:
4494 case TARGET_F_GETLK
:
4496 case TARGET_F_SETLK
:
4498 case TARGET_F_SETLKW
:
4500 case TARGET_F_GETOWN
:
4502 case TARGET_F_SETOWN
:
4504 case TARGET_F_GETSIG
:
4506 case TARGET_F_SETSIG
:
4508 #if TARGET_ABI_BITS == 32
4509 case TARGET_F_GETLK64
:
4511 case TARGET_F_SETLK64
:
4513 case TARGET_F_SETLKW64
:
4516 case TARGET_F_SETLEASE
:
4518 case TARGET_F_GETLEASE
:
4520 #ifdef F_DUPFD_CLOEXEC
4521 case TARGET_F_DUPFD_CLOEXEC
:
4522 return F_DUPFD_CLOEXEC
;
4524 case TARGET_F_NOTIFY
:
4527 return -TARGET_EINVAL
;
4529 return -TARGET_EINVAL
;
4532 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4533 static const bitmask_transtbl flock_tbl
[] = {
4534 TRANSTBL_CONVERT(F_RDLCK
),
4535 TRANSTBL_CONVERT(F_WRLCK
),
4536 TRANSTBL_CONVERT(F_UNLCK
),
4537 TRANSTBL_CONVERT(F_EXLCK
),
4538 TRANSTBL_CONVERT(F_SHLCK
),
4542 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4545 struct target_flock
*target_fl
;
4546 struct flock64 fl64
;
4547 struct target_flock64
*target_fl64
;
4549 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4551 if (host_cmd
== -TARGET_EINVAL
)
4555 case TARGET_F_GETLK
:
4556 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4557 return -TARGET_EFAULT
;
4559 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4560 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4561 fl
.l_start
= tswapal(target_fl
->l_start
);
4562 fl
.l_len
= tswapal(target_fl
->l_len
);
4563 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4564 unlock_user_struct(target_fl
, arg
, 0);
4565 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4567 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4568 return -TARGET_EFAULT
;
4570 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4571 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4572 target_fl
->l_start
= tswapal(fl
.l_start
);
4573 target_fl
->l_len
= tswapal(fl
.l_len
);
4574 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4575 unlock_user_struct(target_fl
, arg
, 1);
4579 case TARGET_F_SETLK
:
4580 case TARGET_F_SETLKW
:
4581 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4582 return -TARGET_EFAULT
;
4584 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4585 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4586 fl
.l_start
= tswapal(target_fl
->l_start
);
4587 fl
.l_len
= tswapal(target_fl
->l_len
);
4588 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4589 unlock_user_struct(target_fl
, arg
, 0);
4590 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4593 case TARGET_F_GETLK64
:
4594 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4595 return -TARGET_EFAULT
;
4597 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4598 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4599 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4600 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4601 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4602 unlock_user_struct(target_fl64
, arg
, 0);
4603 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4605 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4606 return -TARGET_EFAULT
;
4607 target_fl64
->l_type
=
4608 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4609 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4610 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4611 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4612 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4613 unlock_user_struct(target_fl64
, arg
, 1);
4616 case TARGET_F_SETLK64
:
4617 case TARGET_F_SETLKW64
:
4618 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4619 return -TARGET_EFAULT
;
4621 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4622 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4623 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4624 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4625 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4626 unlock_user_struct(target_fl64
, arg
, 0);
4627 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4630 case TARGET_F_GETFL
:
4631 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4633 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4637 case TARGET_F_SETFL
:
4638 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4641 case TARGET_F_SETOWN
:
4642 case TARGET_F_GETOWN
:
4643 case TARGET_F_SETSIG
:
4644 case TARGET_F_GETSIG
:
4645 case TARGET_F_SETLEASE
:
4646 case TARGET_F_GETLEASE
:
4647 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4651 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4659 static inline int high2lowuid(int uid
)
4667 static inline int high2lowgid(int gid
)
4675 static inline int low2highuid(int uid
)
4677 if ((int16_t)uid
== -1)
4683 static inline int low2highgid(int gid
)
4685 if ((int16_t)gid
== -1)
4690 static inline int tswapid(int id
)
4694 #else /* !USE_UID16 */
4695 static inline int high2lowuid(int uid
)
4699 static inline int high2lowgid(int gid
)
4703 static inline int low2highuid(int uid
)
4707 static inline int low2highgid(int gid
)
4711 static inline int tswapid(int id
)
4715 #endif /* USE_UID16 */
4717 void syscall_init(void)
4720 const argtype
*arg_type
;
4724 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4725 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4726 #include "syscall_types.h"
4728 #undef STRUCT_SPECIAL
4730 /* Build target_to_host_errno_table[] table from
4731 * host_to_target_errno_table[]. */
4732 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4733 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4736 /* we patch the ioctl size if necessary. We rely on the fact that
4737 no ioctl has all the bits at '1' in the size field */
4739 while (ie
->target_cmd
!= 0) {
4740 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4741 TARGET_IOC_SIZEMASK
) {
4742 arg_type
= ie
->arg_type
;
4743 if (arg_type
[0] != TYPE_PTR
) {
4744 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4749 size
= thunk_type_size(arg_type
, 0);
4750 ie
->target_cmd
= (ie
->target_cmd
&
4751 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4752 (size
<< TARGET_IOC_SIZESHIFT
);
4755 /* automatic consistency check if same arch */
4756 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4757 (defined(__x86_64__) && defined(TARGET_X86_64))
4758 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4759 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4760 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4767 #if TARGET_ABI_BITS == 32
4768 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4770 #ifdef TARGET_WORDS_BIGENDIAN
4771 return ((uint64_t)word0
<< 32) | word1
;
4773 return ((uint64_t)word1
<< 32) | word0
;
4776 #else /* TARGET_ABI_BITS == 32 */
4777 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4781 #endif /* TARGET_ABI_BITS != 32 */
4783 #ifdef TARGET_NR_truncate64
4784 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4789 if (regpairs_aligned(cpu_env
)) {
4793 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4797 #ifdef TARGET_NR_ftruncate64
4798 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4803 if (regpairs_aligned(cpu_env
)) {
4807 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4811 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4812 abi_ulong target_addr
)
4814 struct target_timespec
*target_ts
;
4816 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4817 return -TARGET_EFAULT
;
4818 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4819 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4820 unlock_user_struct(target_ts
, target_addr
, 0);
4824 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4825 struct timespec
*host_ts
)
4827 struct target_timespec
*target_ts
;
4829 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4830 return -TARGET_EFAULT
;
4831 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4832 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4833 unlock_user_struct(target_ts
, target_addr
, 1);
4837 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4838 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4839 abi_ulong target_addr
,
4840 struct stat
*host_st
)
4843 if (((CPUARMState
*)cpu_env
)->eabi
) {
4844 struct target_eabi_stat64
*target_st
;
4846 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4847 return -TARGET_EFAULT
;
4848 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4849 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4850 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4851 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4852 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4854 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4855 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4856 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4857 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4858 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4859 __put_user(host_st
->st_size
, &target_st
->st_size
);
4860 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4861 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4862 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4863 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4864 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4865 unlock_user_struct(target_st
, target_addr
, 1);
4869 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4870 struct target_stat
*target_st
;
4872 struct target_stat64
*target_st
;
4875 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4876 return -TARGET_EFAULT
;
4877 memset(target_st
, 0, sizeof(*target_st
));
4878 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4879 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4880 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4881 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4883 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4884 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4885 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4886 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4887 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4888 /* XXX: better use of kernel struct */
4889 __put_user(host_st
->st_size
, &target_st
->st_size
);
4890 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4891 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4892 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4893 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4894 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4895 unlock_user_struct(target_st
, target_addr
, 1);
4902 #if defined(CONFIG_USE_NPTL)
4903 /* ??? Using host futex calls even when target atomic operations
4904 are not really atomic probably breaks things. However implementing
4905 futexes locally would make futexes shared between multiple processes
4906 tricky. However they're probably useless because guest atomic
4907 operations won't work either. */
4908 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4909 target_ulong uaddr2
, int val3
)
4911 struct timespec ts
, *pts
;
4914 /* ??? We assume FUTEX_* constants are the same on both host
4916 #ifdef FUTEX_CMD_MASK
4917 base_op
= op
& FUTEX_CMD_MASK
;
4925 target_to_host_timespec(pts
, timeout
);
4929 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4932 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4934 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4936 case FUTEX_CMP_REQUEUE
:
4938 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4939 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4940 But the prototype takes a `struct timespec *'; insert casts
4941 to satisfy the compiler. We do not need to tswap TIMEOUT
4942 since it's not compared to guest memory. */
4943 pts
= (struct timespec
*)(uintptr_t) timeout
;
4944 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4946 (base_op
== FUTEX_CMP_REQUEUE
4950 return -TARGET_ENOSYS
;
4955 /* Map host to target signal numbers for the wait family of syscalls.
4956 Assume all other status bits are the same. */
4957 int host_to_target_waitstatus(int status
)
4959 if (WIFSIGNALED(status
)) {
4960 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4962 if (WIFSTOPPED(status
)) {
4963 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4969 int get_osversion(void)
4971 static int osversion
;
4972 struct new_utsname buf
;
4977 if (qemu_uname_release
&& *qemu_uname_release
) {
4978 s
= qemu_uname_release
;
4980 if (sys_uname(&buf
))
4985 for (i
= 0; i
< 3; i
++) {
4987 while (*s
>= '0' && *s
<= '9') {
4992 tmp
= (tmp
<< 8) + n
;
5001 static int open_self_maps(void *cpu_env
, int fd
)
5003 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5004 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5011 fp
= fopen("/proc/self/maps", "r");
5016 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5017 int fields
, dev_maj
, dev_min
, inode
;
5018 uint64_t min
, max
, offset
;
5019 char flag_r
, flag_w
, flag_x
, flag_p
;
5020 char path
[512] = "";
5021 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5022 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5023 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5025 if ((fields
< 10) || (fields
> 11)) {
5028 if (!strncmp(path
, "[stack]", 7)) {
5031 if (h2g_valid(min
) && h2g_valid(max
)) {
5032 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5033 " %c%c%c%c %08" PRIx64
" %02x:%02x %d%s%s\n",
5034 h2g(min
), h2g(max
), flag_r
, flag_w
,
5035 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5036 path
[0] ? " " : "", path
);
5043 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5044 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5045 (unsigned long long)ts
->info
->stack_limit
,
5046 (unsigned long long)(ts
->info
->start_stack
+
5047 (TARGET_PAGE_SIZE
- 1)) & TARGET_PAGE_MASK
,
5048 (unsigned long long)0);
5054 static int open_self_stat(void *cpu_env
, int fd
)
5056 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5057 abi_ulong start_stack
= ts
->info
->start_stack
;
5060 for (i
= 0; i
< 44; i
++) {
5068 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5069 } else if (i
== 1) {
5071 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5072 } else if (i
== 27) {
5075 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5077 /* for the rest, there is MasterCard */
5078 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5082 if (write(fd
, buf
, len
) != len
) {
5090 static int open_self_auxv(void *cpu_env
, int fd
)
5092 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5093 abi_ulong auxv
= ts
->info
->saved_auxv
;
5094 abi_ulong len
= ts
->info
->auxv_len
;
5098 * Auxiliary vector is stored in target process stack.
5099 * read in whole auxv vector and copy it to file
5101 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5105 r
= write(fd
, ptr
, len
);
5112 lseek(fd
, 0, SEEK_SET
);
5113 unlock_user(ptr
, auxv
, len
);
5119 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
5122 const char *filename
;
5123 int (*fill
)(void *cpu_env
, int fd
);
5125 const struct fake_open
*fake_open
;
5126 static const struct fake_open fakes
[] = {
5127 { "/proc/self/maps", open_self_maps
},
5128 { "/proc/self/stat", open_self_stat
},
5129 { "/proc/self/auxv", open_self_auxv
},
5133 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5134 if (!strncmp(pathname
, fake_open
->filename
,
5135 strlen(fake_open
->filename
))) {
5140 if (fake_open
->filename
) {
5142 char filename
[PATH_MAX
];
5145 /* create temporary file to map stat to */
5146 tmpdir
= getenv("TMPDIR");
5149 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5150 fd
= mkstemp(filename
);
5156 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5160 lseek(fd
, 0, SEEK_SET
);
5165 return get_errno(open(path(pathname
), flags
, mode
));
5168 /* do_syscall() should always have a single exit point at the end so
5169 that actions, such as logging of syscall results, can be performed.
5170 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5171 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5172 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5173 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5182 gemu_log("syscall %d", num
);
5185 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5188 case TARGET_NR_exit
:
5189 #ifdef CONFIG_USE_NPTL
5190 /* In old applications this may be used to implement _exit(2).
5191 However in threaded applictions it is used for thread termination,
5192 and _exit_group is used for application termination.
5193 Do thread termination if we have more then one thread. */
5194 /* FIXME: This probably breaks if a signal arrives. We should probably
5195 be disabling signals. */
5196 if (first_cpu
->next_cpu
) {
5198 CPUArchState
**lastp
;
5204 while (p
&& p
!= (CPUArchState
*)cpu_env
) {
5205 lastp
= &p
->next_cpu
;
5208 /* If we didn't find the CPU for this thread then something is
5212 /* Remove the CPU from the list. */
5213 *lastp
= p
->next_cpu
;
5215 ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5216 if (ts
->child_tidptr
) {
5217 put_user_u32(0, ts
->child_tidptr
);
5218 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5222 object_unref(OBJECT(ENV_GET_CPU(cpu_env
)));
5230 gdb_exit(cpu_env
, arg1
);
5232 ret
= 0; /* avoid warning */
5234 case TARGET_NR_read
:
5238 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5240 ret
= get_errno(read(arg1
, p
, arg3
));
5241 unlock_user(p
, arg2
, ret
);
5244 case TARGET_NR_write
:
5245 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5247 ret
= get_errno(write(arg1
, p
, arg3
));
5248 unlock_user(p
, arg2
, 0);
5250 case TARGET_NR_open
:
5251 if (!(p
= lock_user_string(arg1
)))
5253 ret
= get_errno(do_open(cpu_env
, p
,
5254 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5256 unlock_user(p
, arg1
, 0);
5258 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5259 case TARGET_NR_openat
:
5260 if (!(p
= lock_user_string(arg2
)))
5262 ret
= get_errno(sys_openat(arg1
,
5264 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5266 unlock_user(p
, arg2
, 0);
5269 case TARGET_NR_close
:
5270 ret
= get_errno(close(arg1
));
5275 case TARGET_NR_fork
:
5276 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5278 #ifdef TARGET_NR_waitpid
5279 case TARGET_NR_waitpid
:
5282 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5283 if (!is_error(ret
) && arg2
&& ret
5284 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5289 #ifdef TARGET_NR_waitid
5290 case TARGET_NR_waitid
:
5294 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5295 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5296 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5298 host_to_target_siginfo(p
, &info
);
5299 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5304 #ifdef TARGET_NR_creat /* not on alpha */
5305 case TARGET_NR_creat
:
5306 if (!(p
= lock_user_string(arg1
)))
5308 ret
= get_errno(creat(p
, arg2
));
5309 unlock_user(p
, arg1
, 0);
5312 case TARGET_NR_link
:
5315 p
= lock_user_string(arg1
);
5316 p2
= lock_user_string(arg2
);
5318 ret
= -TARGET_EFAULT
;
5320 ret
= get_errno(link(p
, p2
));
5321 unlock_user(p2
, arg2
, 0);
5322 unlock_user(p
, arg1
, 0);
5325 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5326 case TARGET_NR_linkat
:
5331 p
= lock_user_string(arg2
);
5332 p2
= lock_user_string(arg4
);
5334 ret
= -TARGET_EFAULT
;
5336 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
5337 unlock_user(p
, arg2
, 0);
5338 unlock_user(p2
, arg4
, 0);
5342 case TARGET_NR_unlink
:
5343 if (!(p
= lock_user_string(arg1
)))
5345 ret
= get_errno(unlink(p
));
5346 unlock_user(p
, arg1
, 0);
5348 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5349 case TARGET_NR_unlinkat
:
5350 if (!(p
= lock_user_string(arg2
)))
5352 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
5353 unlock_user(p
, arg2
, 0);
5356 case TARGET_NR_execve
:
5358 char **argp
, **envp
;
5361 abi_ulong guest_argp
;
5362 abi_ulong guest_envp
;
5369 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5370 if (get_user_ual(addr
, gp
))
5378 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5379 if (get_user_ual(addr
, gp
))
5386 argp
= alloca((argc
+ 1) * sizeof(void *));
5387 envp
= alloca((envc
+ 1) * sizeof(void *));
5389 for (gp
= guest_argp
, q
= argp
; gp
;
5390 gp
+= sizeof(abi_ulong
), q
++) {
5391 if (get_user_ual(addr
, gp
))
5395 if (!(*q
= lock_user_string(addr
)))
5397 total_size
+= strlen(*q
) + 1;
5401 for (gp
= guest_envp
, q
= envp
; gp
;
5402 gp
+= sizeof(abi_ulong
), q
++) {
5403 if (get_user_ual(addr
, gp
))
5407 if (!(*q
= lock_user_string(addr
)))
5409 total_size
+= strlen(*q
) + 1;
5413 /* This case will not be caught by the host's execve() if its
5414 page size is bigger than the target's. */
5415 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5416 ret
= -TARGET_E2BIG
;
5419 if (!(p
= lock_user_string(arg1
)))
5421 ret
= get_errno(execve(p
, argp
, envp
));
5422 unlock_user(p
, arg1
, 0);
5427 ret
= -TARGET_EFAULT
;
5430 for (gp
= guest_argp
, q
= argp
; *q
;
5431 gp
+= sizeof(abi_ulong
), q
++) {
5432 if (get_user_ual(addr
, gp
)
5435 unlock_user(*q
, addr
, 0);
5437 for (gp
= guest_envp
, q
= envp
; *q
;
5438 gp
+= sizeof(abi_ulong
), q
++) {
5439 if (get_user_ual(addr
, gp
)
5442 unlock_user(*q
, addr
, 0);
5446 case TARGET_NR_chdir
:
5447 if (!(p
= lock_user_string(arg1
)))
5449 ret
= get_errno(chdir(p
));
5450 unlock_user(p
, arg1
, 0);
5452 #ifdef TARGET_NR_time
5453 case TARGET_NR_time
:
5456 ret
= get_errno(time(&host_time
));
5459 && put_user_sal(host_time
, arg1
))
5464 case TARGET_NR_mknod
:
5465 if (!(p
= lock_user_string(arg1
)))
5467 ret
= get_errno(mknod(p
, arg2
, arg3
));
5468 unlock_user(p
, arg1
, 0);
5470 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5471 case TARGET_NR_mknodat
:
5472 if (!(p
= lock_user_string(arg2
)))
5474 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
5475 unlock_user(p
, arg2
, 0);
5478 case TARGET_NR_chmod
:
5479 if (!(p
= lock_user_string(arg1
)))
5481 ret
= get_errno(chmod(p
, arg2
));
5482 unlock_user(p
, arg1
, 0);
5484 #ifdef TARGET_NR_break
5485 case TARGET_NR_break
:
5488 #ifdef TARGET_NR_oldstat
5489 case TARGET_NR_oldstat
:
5492 case TARGET_NR_lseek
:
5493 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5495 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5496 /* Alpha specific */
5497 case TARGET_NR_getxpid
:
5498 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5499 ret
= get_errno(getpid());
5502 #ifdef TARGET_NR_getpid
5503 case TARGET_NR_getpid
:
5504 ret
= get_errno(getpid());
5507 case TARGET_NR_mount
:
5509 /* need to look at the data field */
5511 p
= lock_user_string(arg1
);
5512 p2
= lock_user_string(arg2
);
5513 p3
= lock_user_string(arg3
);
5514 if (!p
|| !p2
|| !p3
)
5515 ret
= -TARGET_EFAULT
;
5517 /* FIXME - arg5 should be locked, but it isn't clear how to
5518 * do that since it's not guaranteed to be a NULL-terminated
5522 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5524 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5526 unlock_user(p
, arg1
, 0);
5527 unlock_user(p2
, arg2
, 0);
5528 unlock_user(p3
, arg3
, 0);
5531 #ifdef TARGET_NR_umount
5532 case TARGET_NR_umount
:
5533 if (!(p
= lock_user_string(arg1
)))
5535 ret
= get_errno(umount(p
));
5536 unlock_user(p
, arg1
, 0);
5539 #ifdef TARGET_NR_stime /* not on alpha */
5540 case TARGET_NR_stime
:
5543 if (get_user_sal(host_time
, arg1
))
5545 ret
= get_errno(stime(&host_time
));
5549 case TARGET_NR_ptrace
:
5551 #ifdef TARGET_NR_alarm /* not on alpha */
5552 case TARGET_NR_alarm
:
5556 #ifdef TARGET_NR_oldfstat
5557 case TARGET_NR_oldfstat
:
5560 #ifdef TARGET_NR_pause /* not on alpha */
5561 case TARGET_NR_pause
:
5562 ret
= get_errno(pause());
5565 #ifdef TARGET_NR_utime
5566 case TARGET_NR_utime
:
5568 struct utimbuf tbuf
, *host_tbuf
;
5569 struct target_utimbuf
*target_tbuf
;
5571 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5573 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5574 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5575 unlock_user_struct(target_tbuf
, arg2
, 0);
5580 if (!(p
= lock_user_string(arg1
)))
5582 ret
= get_errno(utime(p
, host_tbuf
));
5583 unlock_user(p
, arg1
, 0);
5587 case TARGET_NR_utimes
:
5589 struct timeval
*tvp
, tv
[2];
5591 if (copy_from_user_timeval(&tv
[0], arg2
)
5592 || copy_from_user_timeval(&tv
[1],
5593 arg2
+ sizeof(struct target_timeval
)))
5599 if (!(p
= lock_user_string(arg1
)))
5601 ret
= get_errno(utimes(p
, tvp
));
5602 unlock_user(p
, arg1
, 0);
5605 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5606 case TARGET_NR_futimesat
:
5608 struct timeval
*tvp
, tv
[2];
5610 if (copy_from_user_timeval(&tv
[0], arg3
)
5611 || copy_from_user_timeval(&tv
[1],
5612 arg3
+ sizeof(struct target_timeval
)))
5618 if (!(p
= lock_user_string(arg2
)))
5620 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5621 unlock_user(p
, arg2
, 0);
5625 #ifdef TARGET_NR_stty
5626 case TARGET_NR_stty
:
5629 #ifdef TARGET_NR_gtty
5630 case TARGET_NR_gtty
:
5633 case TARGET_NR_access
:
5634 if (!(p
= lock_user_string(arg1
)))
5636 ret
= get_errno(access(path(p
), arg2
));
5637 unlock_user(p
, arg1
, 0);
5639 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5640 case TARGET_NR_faccessat
:
5641 if (!(p
= lock_user_string(arg2
)))
5643 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5644 unlock_user(p
, arg2
, 0);
5647 #ifdef TARGET_NR_nice /* not on alpha */
5648 case TARGET_NR_nice
:
5649 ret
= get_errno(nice(arg1
));
5652 #ifdef TARGET_NR_ftime
5653 case TARGET_NR_ftime
:
5656 case TARGET_NR_sync
:
5660 case TARGET_NR_kill
:
5661 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5663 case TARGET_NR_rename
:
5666 p
= lock_user_string(arg1
);
5667 p2
= lock_user_string(arg2
);
5669 ret
= -TARGET_EFAULT
;
5671 ret
= get_errno(rename(p
, p2
));
5672 unlock_user(p2
, arg2
, 0);
5673 unlock_user(p
, arg1
, 0);
5676 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5677 case TARGET_NR_renameat
:
5680 p
= lock_user_string(arg2
);
5681 p2
= lock_user_string(arg4
);
5683 ret
= -TARGET_EFAULT
;
5685 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5686 unlock_user(p2
, arg4
, 0);
5687 unlock_user(p
, arg2
, 0);
5691 case TARGET_NR_mkdir
:
5692 if (!(p
= lock_user_string(arg1
)))
5694 ret
= get_errno(mkdir(p
, arg2
));
5695 unlock_user(p
, arg1
, 0);
5697 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5698 case TARGET_NR_mkdirat
:
5699 if (!(p
= lock_user_string(arg2
)))
5701 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5702 unlock_user(p
, arg2
, 0);
5705 case TARGET_NR_rmdir
:
5706 if (!(p
= lock_user_string(arg1
)))
5708 ret
= get_errno(rmdir(p
));
5709 unlock_user(p
, arg1
, 0);
5712 ret
= get_errno(dup(arg1
));
5714 case TARGET_NR_pipe
:
5715 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5717 #ifdef TARGET_NR_pipe2
5718 case TARGET_NR_pipe2
:
5719 ret
= do_pipe(cpu_env
, arg1
,
5720 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
5723 case TARGET_NR_times
:
5725 struct target_tms
*tmsp
;
5727 ret
= get_errno(times(&tms
));
5729 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5732 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5733 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5734 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5735 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5738 ret
= host_to_target_clock_t(ret
);
5741 #ifdef TARGET_NR_prof
5742 case TARGET_NR_prof
:
5745 #ifdef TARGET_NR_signal
5746 case TARGET_NR_signal
:
5749 case TARGET_NR_acct
:
5751 ret
= get_errno(acct(NULL
));
5753 if (!(p
= lock_user_string(arg1
)))
5755 ret
= get_errno(acct(path(p
)));
5756 unlock_user(p
, arg1
, 0);
5759 #ifdef TARGET_NR_umount2 /* not on alpha */
5760 case TARGET_NR_umount2
:
5761 if (!(p
= lock_user_string(arg1
)))
5763 ret
= get_errno(umount2(p
, arg2
));
5764 unlock_user(p
, arg1
, 0);
5767 #ifdef TARGET_NR_lock
5768 case TARGET_NR_lock
:
5771 case TARGET_NR_ioctl
:
5772 ret
= do_ioctl(arg1
, arg2
, arg3
);
5774 case TARGET_NR_fcntl
:
5775 ret
= do_fcntl(arg1
, arg2
, arg3
);
5777 #ifdef TARGET_NR_mpx
5781 case TARGET_NR_setpgid
:
5782 ret
= get_errno(setpgid(arg1
, arg2
));
5784 #ifdef TARGET_NR_ulimit
5785 case TARGET_NR_ulimit
:
5788 #ifdef TARGET_NR_oldolduname
5789 case TARGET_NR_oldolduname
:
5792 case TARGET_NR_umask
:
5793 ret
= get_errno(umask(arg1
));
5795 case TARGET_NR_chroot
:
5796 if (!(p
= lock_user_string(arg1
)))
5798 ret
= get_errno(chroot(p
));
5799 unlock_user(p
, arg1
, 0);
5801 case TARGET_NR_ustat
:
5803 case TARGET_NR_dup2
:
5804 ret
= get_errno(dup2(arg1
, arg2
));
5806 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5807 case TARGET_NR_dup3
:
5808 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5811 #ifdef TARGET_NR_getppid /* not on alpha */
5812 case TARGET_NR_getppid
:
5813 ret
= get_errno(getppid());
5816 case TARGET_NR_getpgrp
:
5817 ret
= get_errno(getpgrp());
5819 case TARGET_NR_setsid
:
5820 ret
= get_errno(setsid());
5822 #ifdef TARGET_NR_sigaction
5823 case TARGET_NR_sigaction
:
5825 #if defined(TARGET_ALPHA)
5826 struct target_sigaction act
, oact
, *pact
= 0;
5827 struct target_old_sigaction
*old_act
;
5829 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5831 act
._sa_handler
= old_act
->_sa_handler
;
5832 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5833 act
.sa_flags
= old_act
->sa_flags
;
5834 act
.sa_restorer
= 0;
5835 unlock_user_struct(old_act
, arg2
, 0);
5838 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5839 if (!is_error(ret
) && arg3
) {
5840 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5842 old_act
->_sa_handler
= oact
._sa_handler
;
5843 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5844 old_act
->sa_flags
= oact
.sa_flags
;
5845 unlock_user_struct(old_act
, arg3
, 1);
5847 #elif defined(TARGET_MIPS)
5848 struct target_sigaction act
, oact
, *pact
, *old_act
;
5851 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5853 act
._sa_handler
= old_act
->_sa_handler
;
5854 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5855 act
.sa_flags
= old_act
->sa_flags
;
5856 unlock_user_struct(old_act
, arg2
, 0);
5862 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5864 if (!is_error(ret
) && arg3
) {
5865 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5867 old_act
->_sa_handler
= oact
._sa_handler
;
5868 old_act
->sa_flags
= oact
.sa_flags
;
5869 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5870 old_act
->sa_mask
.sig
[1] = 0;
5871 old_act
->sa_mask
.sig
[2] = 0;
5872 old_act
->sa_mask
.sig
[3] = 0;
5873 unlock_user_struct(old_act
, arg3
, 1);
5876 struct target_old_sigaction
*old_act
;
5877 struct target_sigaction act
, oact
, *pact
;
5879 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5881 act
._sa_handler
= old_act
->_sa_handler
;
5882 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5883 act
.sa_flags
= old_act
->sa_flags
;
5884 act
.sa_restorer
= old_act
->sa_restorer
;
5885 unlock_user_struct(old_act
, arg2
, 0);
5890 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5891 if (!is_error(ret
) && arg3
) {
5892 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5894 old_act
->_sa_handler
= oact
._sa_handler
;
5895 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5896 old_act
->sa_flags
= oact
.sa_flags
;
5897 old_act
->sa_restorer
= oact
.sa_restorer
;
5898 unlock_user_struct(old_act
, arg3
, 1);
5904 case TARGET_NR_rt_sigaction
:
5906 #if defined(TARGET_ALPHA)
5907 struct target_sigaction act
, oact
, *pact
= 0;
5908 struct target_rt_sigaction
*rt_act
;
5909 /* ??? arg4 == sizeof(sigset_t). */
5911 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5913 act
._sa_handler
= rt_act
->_sa_handler
;
5914 act
.sa_mask
= rt_act
->sa_mask
;
5915 act
.sa_flags
= rt_act
->sa_flags
;
5916 act
.sa_restorer
= arg5
;
5917 unlock_user_struct(rt_act
, arg2
, 0);
5920 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5921 if (!is_error(ret
) && arg3
) {
5922 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5924 rt_act
->_sa_handler
= oact
._sa_handler
;
5925 rt_act
->sa_mask
= oact
.sa_mask
;
5926 rt_act
->sa_flags
= oact
.sa_flags
;
5927 unlock_user_struct(rt_act
, arg3
, 1);
5930 struct target_sigaction
*act
;
5931 struct target_sigaction
*oact
;
5934 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5939 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5940 ret
= -TARGET_EFAULT
;
5941 goto rt_sigaction_fail
;
5945 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5948 unlock_user_struct(act
, arg2
, 0);
5950 unlock_user_struct(oact
, arg3
, 1);
5954 #ifdef TARGET_NR_sgetmask /* not on alpha */
5955 case TARGET_NR_sgetmask
:
5958 abi_ulong target_set
;
5959 sigprocmask(0, NULL
, &cur_set
);
5960 host_to_target_old_sigset(&target_set
, &cur_set
);
5965 #ifdef TARGET_NR_ssetmask /* not on alpha */
5966 case TARGET_NR_ssetmask
:
5968 sigset_t set
, oset
, cur_set
;
5969 abi_ulong target_set
= arg1
;
5970 sigprocmask(0, NULL
, &cur_set
);
5971 target_to_host_old_sigset(&set
, &target_set
);
5972 sigorset(&set
, &set
, &cur_set
);
5973 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5974 host_to_target_old_sigset(&target_set
, &oset
);
5979 #ifdef TARGET_NR_sigprocmask
5980 case TARGET_NR_sigprocmask
:
5982 #if defined(TARGET_ALPHA)
5983 sigset_t set
, oldset
;
5988 case TARGET_SIG_BLOCK
:
5991 case TARGET_SIG_UNBLOCK
:
5994 case TARGET_SIG_SETMASK
:
5998 ret
= -TARGET_EINVAL
;
6002 target_to_host_old_sigset(&set
, &mask
);
6004 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
6005 if (!is_error(ret
)) {
6006 host_to_target_old_sigset(&mask
, &oldset
);
6008 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6011 sigset_t set
, oldset
, *set_ptr
;
6016 case TARGET_SIG_BLOCK
:
6019 case TARGET_SIG_UNBLOCK
:
6022 case TARGET_SIG_SETMASK
:
6026 ret
= -TARGET_EINVAL
;
6029 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6031 target_to_host_old_sigset(&set
, p
);
6032 unlock_user(p
, arg2
, 0);
6038 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6039 if (!is_error(ret
) && arg3
) {
6040 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6042 host_to_target_old_sigset(p
, &oldset
);
6043 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6049 case TARGET_NR_rt_sigprocmask
:
6052 sigset_t set
, oldset
, *set_ptr
;
6056 case TARGET_SIG_BLOCK
:
6059 case TARGET_SIG_UNBLOCK
:
6062 case TARGET_SIG_SETMASK
:
6066 ret
= -TARGET_EINVAL
;
6069 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6071 target_to_host_sigset(&set
, p
);
6072 unlock_user(p
, arg2
, 0);
6078 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6079 if (!is_error(ret
) && arg3
) {
6080 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6082 host_to_target_sigset(p
, &oldset
);
6083 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6087 #ifdef TARGET_NR_sigpending
6088 case TARGET_NR_sigpending
:
6091 ret
= get_errno(sigpending(&set
));
6092 if (!is_error(ret
)) {
6093 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6095 host_to_target_old_sigset(p
, &set
);
6096 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6101 case TARGET_NR_rt_sigpending
:
6104 ret
= get_errno(sigpending(&set
));
6105 if (!is_error(ret
)) {
6106 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6108 host_to_target_sigset(p
, &set
);
6109 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6113 #ifdef TARGET_NR_sigsuspend
6114 case TARGET_NR_sigsuspend
:
6117 #if defined(TARGET_ALPHA)
6118 abi_ulong mask
= arg1
;
6119 target_to_host_old_sigset(&set
, &mask
);
6121 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6123 target_to_host_old_sigset(&set
, p
);
6124 unlock_user(p
, arg1
, 0);
6126 ret
= get_errno(sigsuspend(&set
));
6130 case TARGET_NR_rt_sigsuspend
:
6133 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6135 target_to_host_sigset(&set
, p
);
6136 unlock_user(p
, arg1
, 0);
6137 ret
= get_errno(sigsuspend(&set
));
6140 case TARGET_NR_rt_sigtimedwait
:
6143 struct timespec uts
, *puts
;
6146 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6148 target_to_host_sigset(&set
, p
);
6149 unlock_user(p
, arg1
, 0);
6152 target_to_host_timespec(puts
, arg3
);
6156 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6157 if (!is_error(ret
) && arg2
) {
6158 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
6160 host_to_target_siginfo(p
, &uinfo
);
6161 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6165 case TARGET_NR_rt_sigqueueinfo
:
6168 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6170 target_to_host_siginfo(&uinfo
, p
);
6171 unlock_user(p
, arg1
, 0);
6172 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6175 #ifdef TARGET_NR_sigreturn
6176 case TARGET_NR_sigreturn
:
6177 /* NOTE: ret is eax, so not transcoding must be done */
6178 ret
= do_sigreturn(cpu_env
);
6181 case TARGET_NR_rt_sigreturn
:
6182 /* NOTE: ret is eax, so not transcoding must be done */
6183 ret
= do_rt_sigreturn(cpu_env
);
6185 case TARGET_NR_sethostname
:
6186 if (!(p
= lock_user_string(arg1
)))
6188 ret
= get_errno(sethostname(p
, arg2
));
6189 unlock_user(p
, arg1
, 0);
6191 case TARGET_NR_setrlimit
:
6193 int resource
= target_to_host_resource(arg1
);
6194 struct target_rlimit
*target_rlim
;
6196 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6198 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6199 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6200 unlock_user_struct(target_rlim
, arg2
, 0);
6201 ret
= get_errno(setrlimit(resource
, &rlim
));
6204 case TARGET_NR_getrlimit
:
6206 int resource
= target_to_host_resource(arg1
);
6207 struct target_rlimit
*target_rlim
;
6210 ret
= get_errno(getrlimit(resource
, &rlim
));
6211 if (!is_error(ret
)) {
6212 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6214 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6215 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6216 unlock_user_struct(target_rlim
, arg2
, 1);
6220 case TARGET_NR_getrusage
:
6222 struct rusage rusage
;
6223 ret
= get_errno(getrusage(arg1
, &rusage
));
6224 if (!is_error(ret
)) {
6225 host_to_target_rusage(arg2
, &rusage
);
6229 case TARGET_NR_gettimeofday
:
6232 ret
= get_errno(gettimeofday(&tv
, NULL
));
6233 if (!is_error(ret
)) {
6234 if (copy_to_user_timeval(arg1
, &tv
))
6239 case TARGET_NR_settimeofday
:
6242 if (copy_from_user_timeval(&tv
, arg1
))
6244 ret
= get_errno(settimeofday(&tv
, NULL
));
6247 #if defined(TARGET_NR_select)
6248 case TARGET_NR_select
:
6249 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6250 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6253 struct target_sel_arg_struct
*sel
;
6254 abi_ulong inp
, outp
, exp
, tvp
;
6257 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6259 nsel
= tswapal(sel
->n
);
6260 inp
= tswapal(sel
->inp
);
6261 outp
= tswapal(sel
->outp
);
6262 exp
= tswapal(sel
->exp
);
6263 tvp
= tswapal(sel
->tvp
);
6264 unlock_user_struct(sel
, arg1
, 0);
6265 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6270 #ifdef TARGET_NR_pselect6
6271 case TARGET_NR_pselect6
:
6273 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6274 fd_set rfds
, wfds
, efds
;
6275 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6276 struct timespec ts
, *ts_ptr
;
6279 * The 6th arg is actually two args smashed together,
6280 * so we cannot use the C library.
6288 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6289 target_sigset_t
*target_sigset
;
6297 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6301 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6305 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6311 * This takes a timespec, and not a timeval, so we cannot
6312 * use the do_select() helper ...
6315 if (target_to_host_timespec(&ts
, ts_addr
)) {
6323 /* Extract the two packed args for the sigset */
6326 sig
.size
= _NSIG
/ 8;
6328 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6332 arg_sigset
= tswapal(arg7
[0]);
6333 arg_sigsize
= tswapal(arg7
[1]);
6334 unlock_user(arg7
, arg6
, 0);
6338 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6339 /* Like the kernel, we enforce correct size sigsets */
6340 ret
= -TARGET_EINVAL
;
6343 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6344 sizeof(*target_sigset
), 1);
6345 if (!target_sigset
) {
6348 target_to_host_sigset(&set
, target_sigset
);
6349 unlock_user(target_sigset
, arg_sigset
, 0);
6357 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6360 if (!is_error(ret
)) {
6361 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6363 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6365 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6368 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6374 case TARGET_NR_symlink
:
6377 p
= lock_user_string(arg1
);
6378 p2
= lock_user_string(arg2
);
6380 ret
= -TARGET_EFAULT
;
6382 ret
= get_errno(symlink(p
, p2
));
6383 unlock_user(p2
, arg2
, 0);
6384 unlock_user(p
, arg1
, 0);
6387 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6388 case TARGET_NR_symlinkat
:
6391 p
= lock_user_string(arg1
);
6392 p2
= lock_user_string(arg3
);
6394 ret
= -TARGET_EFAULT
;
6396 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
6397 unlock_user(p2
, arg3
, 0);
6398 unlock_user(p
, arg1
, 0);
6402 #ifdef TARGET_NR_oldlstat
6403 case TARGET_NR_oldlstat
:
6406 case TARGET_NR_readlink
:
6409 p
= lock_user_string(arg1
);
6410 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6412 ret
= -TARGET_EFAULT
;
6414 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
6415 char real
[PATH_MAX
];
6416 temp
= realpath(exec_path
,real
);
6417 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
6418 snprintf((char *)p2
, arg3
, "%s", real
);
6421 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6423 unlock_user(p2
, arg2
, ret
);
6424 unlock_user(p
, arg1
, 0);
6427 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6428 case TARGET_NR_readlinkat
:
6431 p
= lock_user_string(arg2
);
6432 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6434 ret
= -TARGET_EFAULT
;
6436 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
6437 unlock_user(p2
, arg3
, ret
);
6438 unlock_user(p
, arg2
, 0);
6442 #ifdef TARGET_NR_uselib
6443 case TARGET_NR_uselib
:
6446 #ifdef TARGET_NR_swapon
6447 case TARGET_NR_swapon
:
6448 if (!(p
= lock_user_string(arg1
)))
6450 ret
= get_errno(swapon(p
, arg2
));
6451 unlock_user(p
, arg1
, 0);
6454 case TARGET_NR_reboot
:
6455 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
6456 /* arg4 must be ignored in all other cases */
6457 p
= lock_user_string(arg4
);
6461 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
6462 unlock_user(p
, arg4
, 0);
6464 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
6467 #ifdef TARGET_NR_readdir
6468 case TARGET_NR_readdir
:
6471 #ifdef TARGET_NR_mmap
6472 case TARGET_NR_mmap
:
6473 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6474 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6475 || defined(TARGET_S390X)
6478 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6479 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6487 unlock_user(v
, arg1
, 0);
6488 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6489 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6493 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6494 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6500 #ifdef TARGET_NR_mmap2
6501 case TARGET_NR_mmap2
:
6503 #define MMAP_SHIFT 12
6505 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6506 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6508 arg6
<< MMAP_SHIFT
));
6511 case TARGET_NR_munmap
:
6512 ret
= get_errno(target_munmap(arg1
, arg2
));
6514 case TARGET_NR_mprotect
:
6516 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
6517 /* Special hack to detect libc making the stack executable. */
6518 if ((arg3
& PROT_GROWSDOWN
)
6519 && arg1
>= ts
->info
->stack_limit
6520 && arg1
<= ts
->info
->start_stack
) {
6521 arg3
&= ~PROT_GROWSDOWN
;
6522 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6523 arg1
= ts
->info
->stack_limit
;
6526 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6528 #ifdef TARGET_NR_mremap
6529 case TARGET_NR_mremap
:
6530 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6533 /* ??? msync/mlock/munlock are broken for softmmu. */
6534 #ifdef TARGET_NR_msync
6535 case TARGET_NR_msync
:
6536 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6539 #ifdef TARGET_NR_mlock
6540 case TARGET_NR_mlock
:
6541 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6544 #ifdef TARGET_NR_munlock
6545 case TARGET_NR_munlock
:
6546 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6549 #ifdef TARGET_NR_mlockall
6550 case TARGET_NR_mlockall
:
6551 ret
= get_errno(mlockall(arg1
));
6554 #ifdef TARGET_NR_munlockall
6555 case TARGET_NR_munlockall
:
6556 ret
= get_errno(munlockall());
6559 case TARGET_NR_truncate
:
6560 if (!(p
= lock_user_string(arg1
)))
6562 ret
= get_errno(truncate(p
, arg2
));
6563 unlock_user(p
, arg1
, 0);
6565 case TARGET_NR_ftruncate
:
6566 ret
= get_errno(ftruncate(arg1
, arg2
));
6568 case TARGET_NR_fchmod
:
6569 ret
= get_errno(fchmod(arg1
, arg2
));
6571 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6572 case TARGET_NR_fchmodat
:
6573 if (!(p
= lock_user_string(arg2
)))
6575 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
6576 unlock_user(p
, arg2
, 0);
6579 case TARGET_NR_getpriority
:
6580 /* Note that negative values are valid for getpriority, so we must
6581 differentiate based on errno settings. */
6583 ret
= getpriority(arg1
, arg2
);
6584 if (ret
== -1 && errno
!= 0) {
6585 ret
= -host_to_target_errno(errno
);
6589 /* Return value is the unbiased priority. Signal no error. */
6590 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6592 /* Return value is a biased priority to avoid negative numbers. */
6596 case TARGET_NR_setpriority
:
6597 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6599 #ifdef TARGET_NR_profil
6600 case TARGET_NR_profil
:
6603 case TARGET_NR_statfs
:
6604 if (!(p
= lock_user_string(arg1
)))
6606 ret
= get_errno(statfs(path(p
), &stfs
));
6607 unlock_user(p
, arg1
, 0);
6609 if (!is_error(ret
)) {
6610 struct target_statfs
*target_stfs
;
6612 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6614 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6615 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6616 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6617 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6618 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6619 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6620 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6621 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6622 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6623 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6624 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6625 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6626 unlock_user_struct(target_stfs
, arg2
, 1);
6629 case TARGET_NR_fstatfs
:
6630 ret
= get_errno(fstatfs(arg1
, &stfs
));
6631 goto convert_statfs
;
6632 #ifdef TARGET_NR_statfs64
6633 case TARGET_NR_statfs64
:
6634 if (!(p
= lock_user_string(arg1
)))
6636 ret
= get_errno(statfs(path(p
), &stfs
));
6637 unlock_user(p
, arg1
, 0);
6639 if (!is_error(ret
)) {
6640 struct target_statfs64
*target_stfs
;
6642 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6644 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6645 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6646 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6647 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6648 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6649 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6650 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6651 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6652 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6653 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6654 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6655 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6656 unlock_user_struct(target_stfs
, arg3
, 1);
6659 case TARGET_NR_fstatfs64
:
6660 ret
= get_errno(fstatfs(arg1
, &stfs
));
6661 goto convert_statfs64
;
6663 #ifdef TARGET_NR_ioperm
6664 case TARGET_NR_ioperm
:
6667 #ifdef TARGET_NR_socketcall
6668 case TARGET_NR_socketcall
:
6669 ret
= do_socketcall(arg1
, arg2
);
6672 #ifdef TARGET_NR_accept
6673 case TARGET_NR_accept
:
6674 ret
= do_accept(arg1
, arg2
, arg3
);
6677 #ifdef TARGET_NR_bind
6678 case TARGET_NR_bind
:
6679 ret
= do_bind(arg1
, arg2
, arg3
);
6682 #ifdef TARGET_NR_connect
6683 case TARGET_NR_connect
:
6684 ret
= do_connect(arg1
, arg2
, arg3
);
6687 #ifdef TARGET_NR_getpeername
6688 case TARGET_NR_getpeername
:
6689 ret
= do_getpeername(arg1
, arg2
, arg3
);
6692 #ifdef TARGET_NR_getsockname
6693 case TARGET_NR_getsockname
:
6694 ret
= do_getsockname(arg1
, arg2
, arg3
);
6697 #ifdef TARGET_NR_getsockopt
6698 case TARGET_NR_getsockopt
:
6699 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6702 #ifdef TARGET_NR_listen
6703 case TARGET_NR_listen
:
6704 ret
= get_errno(listen(arg1
, arg2
));
6707 #ifdef TARGET_NR_recv
6708 case TARGET_NR_recv
:
6709 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6712 #ifdef TARGET_NR_recvfrom
6713 case TARGET_NR_recvfrom
:
6714 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6717 #ifdef TARGET_NR_recvmsg
6718 case TARGET_NR_recvmsg
:
6719 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6722 #ifdef TARGET_NR_send
6723 case TARGET_NR_send
:
6724 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6727 #ifdef TARGET_NR_sendmsg
6728 case TARGET_NR_sendmsg
:
6729 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6732 #ifdef TARGET_NR_sendto
6733 case TARGET_NR_sendto
:
6734 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6737 #ifdef TARGET_NR_shutdown
6738 case TARGET_NR_shutdown
:
6739 ret
= get_errno(shutdown(arg1
, arg2
));
6742 #ifdef TARGET_NR_socket
6743 case TARGET_NR_socket
:
6744 ret
= do_socket(arg1
, arg2
, arg3
);
6747 #ifdef TARGET_NR_socketpair
6748 case TARGET_NR_socketpair
:
6749 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6752 #ifdef TARGET_NR_setsockopt
6753 case TARGET_NR_setsockopt
:
6754 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6758 case TARGET_NR_syslog
:
6759 if (!(p
= lock_user_string(arg2
)))
6761 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6762 unlock_user(p
, arg2
, 0);
6765 case TARGET_NR_setitimer
:
6767 struct itimerval value
, ovalue
, *pvalue
;
6771 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6772 || copy_from_user_timeval(&pvalue
->it_value
,
6773 arg2
+ sizeof(struct target_timeval
)))
6778 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6779 if (!is_error(ret
) && arg3
) {
6780 if (copy_to_user_timeval(arg3
,
6781 &ovalue
.it_interval
)
6782 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6788 case TARGET_NR_getitimer
:
6790 struct itimerval value
;
6792 ret
= get_errno(getitimer(arg1
, &value
));
6793 if (!is_error(ret
) && arg2
) {
6794 if (copy_to_user_timeval(arg2
,
6796 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6802 case TARGET_NR_stat
:
6803 if (!(p
= lock_user_string(arg1
)))
6805 ret
= get_errno(stat(path(p
), &st
));
6806 unlock_user(p
, arg1
, 0);
6808 case TARGET_NR_lstat
:
6809 if (!(p
= lock_user_string(arg1
)))
6811 ret
= get_errno(lstat(path(p
), &st
));
6812 unlock_user(p
, arg1
, 0);
6814 case TARGET_NR_fstat
:
6816 ret
= get_errno(fstat(arg1
, &st
));
6818 if (!is_error(ret
)) {
6819 struct target_stat
*target_st
;
6821 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6823 memset(target_st
, 0, sizeof(*target_st
));
6824 __put_user(st
.st_dev
, &target_st
->st_dev
);
6825 __put_user(st
.st_ino
, &target_st
->st_ino
);
6826 __put_user(st
.st_mode
, &target_st
->st_mode
);
6827 __put_user(st
.st_uid
, &target_st
->st_uid
);
6828 __put_user(st
.st_gid
, &target_st
->st_gid
);
6829 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6830 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6831 __put_user(st
.st_size
, &target_st
->st_size
);
6832 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6833 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6834 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6835 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6836 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6837 unlock_user_struct(target_st
, arg2
, 1);
6841 #ifdef TARGET_NR_olduname
6842 case TARGET_NR_olduname
:
6845 #ifdef TARGET_NR_iopl
6846 case TARGET_NR_iopl
:
6849 case TARGET_NR_vhangup
:
6850 ret
= get_errno(vhangup());
6852 #ifdef TARGET_NR_idle
6853 case TARGET_NR_idle
:
6856 #ifdef TARGET_NR_syscall
6857 case TARGET_NR_syscall
:
6858 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6859 arg6
, arg7
, arg8
, 0);
6862 case TARGET_NR_wait4
:
6865 abi_long status_ptr
= arg2
;
6866 struct rusage rusage
, *rusage_ptr
;
6867 abi_ulong target_rusage
= arg4
;
6869 rusage_ptr
= &rusage
;
6872 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6873 if (!is_error(ret
)) {
6874 if (status_ptr
&& ret
) {
6875 status
= host_to_target_waitstatus(status
);
6876 if (put_user_s32(status
, status_ptr
))
6880 host_to_target_rusage(target_rusage
, &rusage
);
6884 #ifdef TARGET_NR_swapoff
6885 case TARGET_NR_swapoff
:
6886 if (!(p
= lock_user_string(arg1
)))
6888 ret
= get_errno(swapoff(p
));
6889 unlock_user(p
, arg1
, 0);
6892 case TARGET_NR_sysinfo
:
6894 struct target_sysinfo
*target_value
;
6895 struct sysinfo value
;
6896 ret
= get_errno(sysinfo(&value
));
6897 if (!is_error(ret
) && arg1
)
6899 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6901 __put_user(value
.uptime
, &target_value
->uptime
);
6902 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6903 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6904 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6905 __put_user(value
.totalram
, &target_value
->totalram
);
6906 __put_user(value
.freeram
, &target_value
->freeram
);
6907 __put_user(value
.sharedram
, &target_value
->sharedram
);
6908 __put_user(value
.bufferram
, &target_value
->bufferram
);
6909 __put_user(value
.totalswap
, &target_value
->totalswap
);
6910 __put_user(value
.freeswap
, &target_value
->freeswap
);
6911 __put_user(value
.procs
, &target_value
->procs
);
6912 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6913 __put_user(value
.freehigh
, &target_value
->freehigh
);
6914 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6915 unlock_user_struct(target_value
, arg1
, 1);
6919 #ifdef TARGET_NR_ipc
6921 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6924 #ifdef TARGET_NR_semget
6925 case TARGET_NR_semget
:
6926 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6929 #ifdef TARGET_NR_semop
6930 case TARGET_NR_semop
:
6931 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6934 #ifdef TARGET_NR_semctl
6935 case TARGET_NR_semctl
:
6936 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6939 #ifdef TARGET_NR_msgctl
6940 case TARGET_NR_msgctl
:
6941 ret
= do_msgctl(arg1
, arg2
, arg3
);
6944 #ifdef TARGET_NR_msgget
6945 case TARGET_NR_msgget
:
6946 ret
= get_errno(msgget(arg1
, arg2
));
6949 #ifdef TARGET_NR_msgrcv
6950 case TARGET_NR_msgrcv
:
6951 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6954 #ifdef TARGET_NR_msgsnd
6955 case TARGET_NR_msgsnd
:
6956 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6959 #ifdef TARGET_NR_shmget
6960 case TARGET_NR_shmget
:
6961 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6964 #ifdef TARGET_NR_shmctl
6965 case TARGET_NR_shmctl
:
6966 ret
= do_shmctl(arg1
, arg2
, arg3
);
6969 #ifdef TARGET_NR_shmat
6970 case TARGET_NR_shmat
:
6971 ret
= do_shmat(arg1
, arg2
, arg3
);
6974 #ifdef TARGET_NR_shmdt
6975 case TARGET_NR_shmdt
:
6976 ret
= do_shmdt(arg1
);
6979 case TARGET_NR_fsync
:
6980 ret
= get_errno(fsync(arg1
));
6982 case TARGET_NR_clone
:
6983 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6984 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6985 #elif defined(TARGET_CRIS)
6986 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6987 #elif defined(TARGET_MICROBLAZE)
6988 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
6989 #elif defined(TARGET_S390X)
6990 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6992 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6995 #ifdef __NR_exit_group
6996 /* new thread calls */
6997 case TARGET_NR_exit_group
:
7001 gdb_exit(cpu_env
, arg1
);
7002 ret
= get_errno(exit_group(arg1
));
7005 case TARGET_NR_setdomainname
:
7006 if (!(p
= lock_user_string(arg1
)))
7008 ret
= get_errno(setdomainname(p
, arg2
));
7009 unlock_user(p
, arg1
, 0);
7011 case TARGET_NR_uname
:
7012 /* no need to transcode because we use the linux syscall */
7014 struct new_utsname
* buf
;
7016 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7018 ret
= get_errno(sys_uname(buf
));
7019 if (!is_error(ret
)) {
7020 /* Overrite the native machine name with whatever is being
7022 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7023 /* Allow the user to override the reported release. */
7024 if (qemu_uname_release
&& *qemu_uname_release
)
7025 strcpy (buf
->release
, qemu_uname_release
);
7027 unlock_user_struct(buf
, arg1
, 1);
7031 case TARGET_NR_modify_ldt
:
7032 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7034 #if !defined(TARGET_X86_64)
7035 case TARGET_NR_vm86old
:
7037 case TARGET_NR_vm86
:
7038 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7042 case TARGET_NR_adjtimex
:
7044 #ifdef TARGET_NR_create_module
7045 case TARGET_NR_create_module
:
7047 case TARGET_NR_init_module
:
7048 case TARGET_NR_delete_module
:
7049 #ifdef TARGET_NR_get_kernel_syms
7050 case TARGET_NR_get_kernel_syms
:
7053 case TARGET_NR_quotactl
:
7055 case TARGET_NR_getpgid
:
7056 ret
= get_errno(getpgid(arg1
));
7058 case TARGET_NR_fchdir
:
7059 ret
= get_errno(fchdir(arg1
));
7061 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7062 case TARGET_NR_bdflush
:
7065 #ifdef TARGET_NR_sysfs
7066 case TARGET_NR_sysfs
:
7069 case TARGET_NR_personality
:
7070 ret
= get_errno(personality(arg1
));
7072 #ifdef TARGET_NR_afs_syscall
7073 case TARGET_NR_afs_syscall
:
7076 #ifdef TARGET_NR__llseek /* Not on alpha */
7077 case TARGET_NR__llseek
:
7080 #if !defined(__NR_llseek)
7081 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7083 ret
= get_errno(res
);
7088 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7090 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7096 case TARGET_NR_getdents
:
7097 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7099 struct target_dirent
*target_dirp
;
7100 struct linux_dirent
*dirp
;
7101 abi_long count
= arg3
;
7103 dirp
= malloc(count
);
7105 ret
= -TARGET_ENOMEM
;
7109 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7110 if (!is_error(ret
)) {
7111 struct linux_dirent
*de
;
7112 struct target_dirent
*tde
;
7114 int reclen
, treclen
;
7115 int count1
, tnamelen
;
7119 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7123 reclen
= de
->d_reclen
;
7124 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7125 assert(tnamelen
>= 0);
7126 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7127 assert(count1
+ treclen
<= count
);
7128 tde
->d_reclen
= tswap16(treclen
);
7129 tde
->d_ino
= tswapal(de
->d_ino
);
7130 tde
->d_off
= tswapal(de
->d_off
);
7131 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7132 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7134 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7138 unlock_user(target_dirp
, arg2
, ret
);
7144 struct linux_dirent
*dirp
;
7145 abi_long count
= arg3
;
7147 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7149 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7150 if (!is_error(ret
)) {
7151 struct linux_dirent
*de
;
7156 reclen
= de
->d_reclen
;
7159 de
->d_reclen
= tswap16(reclen
);
7160 tswapls(&de
->d_ino
);
7161 tswapls(&de
->d_off
);
7162 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7166 unlock_user(dirp
, arg2
, ret
);
7170 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7171 case TARGET_NR_getdents64
:
7173 struct linux_dirent64
*dirp
;
7174 abi_long count
= arg3
;
7175 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7177 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7178 if (!is_error(ret
)) {
7179 struct linux_dirent64
*de
;
7184 reclen
= de
->d_reclen
;
7187 de
->d_reclen
= tswap16(reclen
);
7188 tswap64s((uint64_t *)&de
->d_ino
);
7189 tswap64s((uint64_t *)&de
->d_off
);
7190 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7194 unlock_user(dirp
, arg2
, ret
);
7197 #endif /* TARGET_NR_getdents64 */
7198 #if defined(TARGET_NR__newselect)
7199 case TARGET_NR__newselect
:
7200 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7203 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7204 # ifdef TARGET_NR_poll
7205 case TARGET_NR_poll
:
7207 # ifdef TARGET_NR_ppoll
7208 case TARGET_NR_ppoll
:
7211 struct target_pollfd
*target_pfd
;
7212 unsigned int nfds
= arg2
;
7217 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7221 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7222 for(i
= 0; i
< nfds
; i
++) {
7223 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7224 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7227 # ifdef TARGET_NR_ppoll
7228 if (num
== TARGET_NR_ppoll
) {
7229 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7230 target_sigset_t
*target_set
;
7231 sigset_t _set
, *set
= &_set
;
7234 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7235 unlock_user(target_pfd
, arg1
, 0);
7243 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7245 unlock_user(target_pfd
, arg1
, 0);
7248 target_to_host_sigset(set
, target_set
);
7253 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7255 if (!is_error(ret
) && arg3
) {
7256 host_to_target_timespec(arg3
, timeout_ts
);
7259 unlock_user(target_set
, arg4
, 0);
7263 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7265 if (!is_error(ret
)) {
7266 for(i
= 0; i
< nfds
; i
++) {
7267 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7270 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7274 case TARGET_NR_flock
:
7275 /* NOTE: the flock constant seems to be the same for every
7277 ret
= get_errno(flock(arg1
, arg2
));
7279 case TARGET_NR_readv
:
7281 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7283 ret
= get_errno(readv(arg1
, vec
, arg3
));
7284 unlock_iovec(vec
, arg2
, arg3
, 1);
7286 ret
= -host_to_target_errno(errno
);
7290 case TARGET_NR_writev
:
7292 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7294 ret
= get_errno(writev(arg1
, vec
, arg3
));
7295 unlock_iovec(vec
, arg2
, arg3
, 0);
7297 ret
= -host_to_target_errno(errno
);
7301 case TARGET_NR_getsid
:
7302 ret
= get_errno(getsid(arg1
));
7304 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7305 case TARGET_NR_fdatasync
:
7306 ret
= get_errno(fdatasync(arg1
));
7309 case TARGET_NR__sysctl
:
7310 /* We don't implement this, but ENOTDIR is always a safe
7312 ret
= -TARGET_ENOTDIR
;
7314 case TARGET_NR_sched_getaffinity
:
7316 unsigned int mask_size
;
7317 unsigned long *mask
;
7320 * sched_getaffinity needs multiples of ulong, so need to take
7321 * care of mismatches between target ulong and host ulong sizes.
7323 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7324 ret
= -TARGET_EINVAL
;
7327 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7329 mask
= alloca(mask_size
);
7330 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7332 if (!is_error(ret
)) {
7333 if (copy_to_user(arg3
, mask
, ret
)) {
7339 case TARGET_NR_sched_setaffinity
:
7341 unsigned int mask_size
;
7342 unsigned long *mask
;
7345 * sched_setaffinity needs multiples of ulong, so need to take
7346 * care of mismatches between target ulong and host ulong sizes.
7348 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7349 ret
= -TARGET_EINVAL
;
7352 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7354 mask
= alloca(mask_size
);
7355 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7358 memcpy(mask
, p
, arg2
);
7359 unlock_user_struct(p
, arg2
, 0);
7361 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7364 case TARGET_NR_sched_setparam
:
7366 struct sched_param
*target_schp
;
7367 struct sched_param schp
;
7369 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7371 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7372 unlock_user_struct(target_schp
, arg2
, 0);
7373 ret
= get_errno(sched_setparam(arg1
, &schp
));
7376 case TARGET_NR_sched_getparam
:
7378 struct sched_param
*target_schp
;
7379 struct sched_param schp
;
7380 ret
= get_errno(sched_getparam(arg1
, &schp
));
7381 if (!is_error(ret
)) {
7382 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7384 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7385 unlock_user_struct(target_schp
, arg2
, 1);
7389 case TARGET_NR_sched_setscheduler
:
7391 struct sched_param
*target_schp
;
7392 struct sched_param schp
;
7393 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7395 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7396 unlock_user_struct(target_schp
, arg3
, 0);
7397 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7400 case TARGET_NR_sched_getscheduler
:
7401 ret
= get_errno(sched_getscheduler(arg1
));
7403 case TARGET_NR_sched_yield
:
7404 ret
= get_errno(sched_yield());
7406 case TARGET_NR_sched_get_priority_max
:
7407 ret
= get_errno(sched_get_priority_max(arg1
));
7409 case TARGET_NR_sched_get_priority_min
:
7410 ret
= get_errno(sched_get_priority_min(arg1
));
7412 case TARGET_NR_sched_rr_get_interval
:
7415 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7416 if (!is_error(ret
)) {
7417 host_to_target_timespec(arg2
, &ts
);
7421 case TARGET_NR_nanosleep
:
7423 struct timespec req
, rem
;
7424 target_to_host_timespec(&req
, arg1
);
7425 ret
= get_errno(nanosleep(&req
, &rem
));
7426 if (is_error(ret
) && arg2
) {
7427 host_to_target_timespec(arg2
, &rem
);
7431 #ifdef TARGET_NR_query_module
7432 case TARGET_NR_query_module
:
7435 #ifdef TARGET_NR_nfsservctl
7436 case TARGET_NR_nfsservctl
:
7439 case TARGET_NR_prctl
:
7441 case PR_GET_PDEATHSIG
:
7444 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7445 if (!is_error(ret
) && arg2
7446 && put_user_ual(deathsig
, arg2
)) {
7454 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7458 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7460 unlock_user(name
, arg2
, 16);
7465 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7469 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7471 unlock_user(name
, arg2
, 0);
7476 /* Most prctl options have no pointer arguments */
7477 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7481 #ifdef TARGET_NR_arch_prctl
7482 case TARGET_NR_arch_prctl
:
7483 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7484 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7490 #ifdef TARGET_NR_pread64
7491 case TARGET_NR_pread64
:
7492 if (regpairs_aligned(cpu_env
)) {
7496 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7498 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7499 unlock_user(p
, arg2
, ret
);
7501 case TARGET_NR_pwrite64
:
7502 if (regpairs_aligned(cpu_env
)) {
7506 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7508 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7509 unlock_user(p
, arg2
, 0);
7512 case TARGET_NR_getcwd
:
7513 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7515 ret
= get_errno(sys_getcwd1(p
, arg2
));
7516 unlock_user(p
, arg1
, ret
);
7518 case TARGET_NR_capget
:
7520 case TARGET_NR_capset
:
7522 case TARGET_NR_sigaltstack
:
7523 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7524 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7525 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7526 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7531 case TARGET_NR_sendfile
:
7533 #ifdef TARGET_NR_getpmsg
7534 case TARGET_NR_getpmsg
:
7537 #ifdef TARGET_NR_putpmsg
7538 case TARGET_NR_putpmsg
:
7541 #ifdef TARGET_NR_vfork
7542 case TARGET_NR_vfork
:
7543 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7547 #ifdef TARGET_NR_ugetrlimit
7548 case TARGET_NR_ugetrlimit
:
7551 int resource
= target_to_host_resource(arg1
);
7552 ret
= get_errno(getrlimit(resource
, &rlim
));
7553 if (!is_error(ret
)) {
7554 struct target_rlimit
*target_rlim
;
7555 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7557 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7558 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7559 unlock_user_struct(target_rlim
, arg2
, 1);
7564 #ifdef TARGET_NR_truncate64
7565 case TARGET_NR_truncate64
:
7566 if (!(p
= lock_user_string(arg1
)))
7568 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7569 unlock_user(p
, arg1
, 0);
7572 #ifdef TARGET_NR_ftruncate64
7573 case TARGET_NR_ftruncate64
:
7574 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7577 #ifdef TARGET_NR_stat64
7578 case TARGET_NR_stat64
:
7579 if (!(p
= lock_user_string(arg1
)))
7581 ret
= get_errno(stat(path(p
), &st
));
7582 unlock_user(p
, arg1
, 0);
7584 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7587 #ifdef TARGET_NR_lstat64
7588 case TARGET_NR_lstat64
:
7589 if (!(p
= lock_user_string(arg1
)))
7591 ret
= get_errno(lstat(path(p
), &st
));
7592 unlock_user(p
, arg1
, 0);
7594 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7597 #ifdef TARGET_NR_fstat64
7598 case TARGET_NR_fstat64
:
7599 ret
= get_errno(fstat(arg1
, &st
));
7601 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7604 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7605 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7606 #ifdef TARGET_NR_fstatat64
7607 case TARGET_NR_fstatat64
:
7609 #ifdef TARGET_NR_newfstatat
7610 case TARGET_NR_newfstatat
:
7612 if (!(p
= lock_user_string(arg2
)))
7614 #ifdef __NR_fstatat64
7615 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7617 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7620 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7623 case TARGET_NR_lchown
:
7624 if (!(p
= lock_user_string(arg1
)))
7626 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7627 unlock_user(p
, arg1
, 0);
7629 #ifdef TARGET_NR_getuid
7630 case TARGET_NR_getuid
:
7631 ret
= get_errno(high2lowuid(getuid()));
7634 #ifdef TARGET_NR_getgid
7635 case TARGET_NR_getgid
:
7636 ret
= get_errno(high2lowgid(getgid()));
7639 #ifdef TARGET_NR_geteuid
7640 case TARGET_NR_geteuid
:
7641 ret
= get_errno(high2lowuid(geteuid()));
7644 #ifdef TARGET_NR_getegid
7645 case TARGET_NR_getegid
:
7646 ret
= get_errno(high2lowgid(getegid()));
7649 case TARGET_NR_setreuid
:
7650 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7652 case TARGET_NR_setregid
:
7653 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7655 case TARGET_NR_getgroups
:
7657 int gidsetsize
= arg1
;
7658 target_id
*target_grouplist
;
7662 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7663 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7664 if (gidsetsize
== 0)
7666 if (!is_error(ret
)) {
7667 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7668 if (!target_grouplist
)
7670 for(i
= 0;i
< ret
; i
++)
7671 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7672 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7676 case TARGET_NR_setgroups
:
7678 int gidsetsize
= arg1
;
7679 target_id
*target_grouplist
;
7683 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7684 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7685 if (!target_grouplist
) {
7686 ret
= -TARGET_EFAULT
;
7689 for(i
= 0;i
< gidsetsize
; i
++)
7690 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7691 unlock_user(target_grouplist
, arg2
, 0);
7692 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7695 case TARGET_NR_fchown
:
7696 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7698 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7699 case TARGET_NR_fchownat
:
7700 if (!(p
= lock_user_string(arg2
)))
7702 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7703 unlock_user(p
, arg2
, 0);
7706 #ifdef TARGET_NR_setresuid
7707 case TARGET_NR_setresuid
:
7708 ret
= get_errno(setresuid(low2highuid(arg1
),
7710 low2highuid(arg3
)));
7713 #ifdef TARGET_NR_getresuid
7714 case TARGET_NR_getresuid
:
7716 uid_t ruid
, euid
, suid
;
7717 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7718 if (!is_error(ret
)) {
7719 if (put_user_u16(high2lowuid(ruid
), arg1
)
7720 || put_user_u16(high2lowuid(euid
), arg2
)
7721 || put_user_u16(high2lowuid(suid
), arg3
))
7727 #ifdef TARGET_NR_getresgid
7728 case TARGET_NR_setresgid
:
7729 ret
= get_errno(setresgid(low2highgid(arg1
),
7731 low2highgid(arg3
)));
7734 #ifdef TARGET_NR_getresgid
7735 case TARGET_NR_getresgid
:
7737 gid_t rgid
, egid
, sgid
;
7738 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7739 if (!is_error(ret
)) {
7740 if (put_user_u16(high2lowgid(rgid
), arg1
)
7741 || put_user_u16(high2lowgid(egid
), arg2
)
7742 || put_user_u16(high2lowgid(sgid
), arg3
))
7748 case TARGET_NR_chown
:
7749 if (!(p
= lock_user_string(arg1
)))
7751 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7752 unlock_user(p
, arg1
, 0);
7754 case TARGET_NR_setuid
:
7755 ret
= get_errno(setuid(low2highuid(arg1
)));
7757 case TARGET_NR_setgid
:
7758 ret
= get_errno(setgid(low2highgid(arg1
)));
7760 case TARGET_NR_setfsuid
:
7761 ret
= get_errno(setfsuid(arg1
));
7763 case TARGET_NR_setfsgid
:
7764 ret
= get_errno(setfsgid(arg1
));
7767 #ifdef TARGET_NR_lchown32
7768 case TARGET_NR_lchown32
:
7769 if (!(p
= lock_user_string(arg1
)))
7771 ret
= get_errno(lchown(p
, arg2
, arg3
));
7772 unlock_user(p
, arg1
, 0);
7775 #ifdef TARGET_NR_getuid32
7776 case TARGET_NR_getuid32
:
7777 ret
= get_errno(getuid());
7781 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7782 /* Alpha specific */
7783 case TARGET_NR_getxuid
:
7787 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7789 ret
= get_errno(getuid());
7792 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7793 /* Alpha specific */
7794 case TARGET_NR_getxgid
:
7798 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7800 ret
= get_errno(getgid());
7803 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7804 /* Alpha specific */
7805 case TARGET_NR_osf_getsysinfo
:
7806 ret
= -TARGET_EOPNOTSUPP
;
7808 case TARGET_GSI_IEEE_FP_CONTROL
:
7810 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7812 /* Copied from linux ieee_fpcr_to_swcr. */
7813 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7814 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7815 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7816 | SWCR_TRAP_ENABLE_DZE
7817 | SWCR_TRAP_ENABLE_OVF
);
7818 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7819 | SWCR_TRAP_ENABLE_INE
);
7820 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7821 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7823 if (put_user_u64 (swcr
, arg2
))
7829 /* case GSI_IEEE_STATE_AT_SIGNAL:
7830 -- Not implemented in linux kernel.
7832 -- Retrieves current unaligned access state; not much used.
7834 -- Retrieves implver information; surely not used.
7836 -- Grabs a copy of the HWRPB; surely not used.
7841 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7842 /* Alpha specific */
7843 case TARGET_NR_osf_setsysinfo
:
7844 ret
= -TARGET_EOPNOTSUPP
;
7846 case TARGET_SSI_IEEE_FP_CONTROL
:
7848 uint64_t swcr
, fpcr
, orig_fpcr
;
7850 if (get_user_u64 (swcr
, arg2
)) {
7853 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7854 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7856 /* Copied from linux ieee_swcr_to_fpcr. */
7857 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7858 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7859 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7860 | SWCR_TRAP_ENABLE_DZE
7861 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7862 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7863 | SWCR_TRAP_ENABLE_INE
)) << 57;
7864 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7865 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7867 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7872 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7874 uint64_t exc
, fpcr
, orig_fpcr
;
7877 if (get_user_u64(exc
, arg2
)) {
7881 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7883 /* We only add to the exception status here. */
7884 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
7886 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7889 /* Old exceptions are not signaled. */
7890 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7892 /* If any exceptions set by this call,
7893 and are unmasked, send a signal. */
7895 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
7896 si_code
= TARGET_FPE_FLTRES
;
7898 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
7899 si_code
= TARGET_FPE_FLTUND
;
7901 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
7902 si_code
= TARGET_FPE_FLTOVF
;
7904 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
7905 si_code
= TARGET_FPE_FLTDIV
;
7907 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
7908 si_code
= TARGET_FPE_FLTINV
;
7911 target_siginfo_t info
;
7912 info
.si_signo
= SIGFPE
;
7914 info
.si_code
= si_code
;
7915 info
._sifields
._sigfault
._addr
7916 = ((CPUArchState
*)cpu_env
)->pc
;
7917 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
7922 /* case SSI_NVPAIRS:
7923 -- Used with SSIN_UACPROC to enable unaligned accesses.
7924 case SSI_IEEE_STATE_AT_SIGNAL:
7925 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7926 -- Not implemented in linux kernel
7931 #ifdef TARGET_NR_osf_sigprocmask
7932 /* Alpha specific. */
7933 case TARGET_NR_osf_sigprocmask
:
7937 sigset_t set
, oldset
;
7940 case TARGET_SIG_BLOCK
:
7943 case TARGET_SIG_UNBLOCK
:
7946 case TARGET_SIG_SETMASK
:
7950 ret
= -TARGET_EINVAL
;
7954 target_to_host_old_sigset(&set
, &mask
);
7955 sigprocmask(how
, &set
, &oldset
);
7956 host_to_target_old_sigset(&mask
, &oldset
);
7962 #ifdef TARGET_NR_getgid32
7963 case TARGET_NR_getgid32
:
7964 ret
= get_errno(getgid());
7967 #ifdef TARGET_NR_geteuid32
7968 case TARGET_NR_geteuid32
:
7969 ret
= get_errno(geteuid());
7972 #ifdef TARGET_NR_getegid32
7973 case TARGET_NR_getegid32
:
7974 ret
= get_errno(getegid());
7977 #ifdef TARGET_NR_setreuid32
7978 case TARGET_NR_setreuid32
:
7979 ret
= get_errno(setreuid(arg1
, arg2
));
7982 #ifdef TARGET_NR_setregid32
7983 case TARGET_NR_setregid32
:
7984 ret
= get_errno(setregid(arg1
, arg2
));
7987 #ifdef TARGET_NR_getgroups32
7988 case TARGET_NR_getgroups32
:
7990 int gidsetsize
= arg1
;
7991 uint32_t *target_grouplist
;
7995 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7996 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7997 if (gidsetsize
== 0)
7999 if (!is_error(ret
)) {
8000 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
8001 if (!target_grouplist
) {
8002 ret
= -TARGET_EFAULT
;
8005 for(i
= 0;i
< ret
; i
++)
8006 target_grouplist
[i
] = tswap32(grouplist
[i
]);
8007 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
8012 #ifdef TARGET_NR_setgroups32
8013 case TARGET_NR_setgroups32
:
8015 int gidsetsize
= arg1
;
8016 uint32_t *target_grouplist
;
8020 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8021 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8022 if (!target_grouplist
) {
8023 ret
= -TARGET_EFAULT
;
8026 for(i
= 0;i
< gidsetsize
; i
++)
8027 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8028 unlock_user(target_grouplist
, arg2
, 0);
8029 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8033 #ifdef TARGET_NR_fchown32
8034 case TARGET_NR_fchown32
:
8035 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8038 #ifdef TARGET_NR_setresuid32
8039 case TARGET_NR_setresuid32
:
8040 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8043 #ifdef TARGET_NR_getresuid32
8044 case TARGET_NR_getresuid32
:
8046 uid_t ruid
, euid
, suid
;
8047 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8048 if (!is_error(ret
)) {
8049 if (put_user_u32(ruid
, arg1
)
8050 || put_user_u32(euid
, arg2
)
8051 || put_user_u32(suid
, arg3
))
8057 #ifdef TARGET_NR_setresgid32
8058 case TARGET_NR_setresgid32
:
8059 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8062 #ifdef TARGET_NR_getresgid32
8063 case TARGET_NR_getresgid32
:
8065 gid_t rgid
, egid
, sgid
;
8066 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8067 if (!is_error(ret
)) {
8068 if (put_user_u32(rgid
, arg1
)
8069 || put_user_u32(egid
, arg2
)
8070 || put_user_u32(sgid
, arg3
))
8076 #ifdef TARGET_NR_chown32
8077 case TARGET_NR_chown32
:
8078 if (!(p
= lock_user_string(arg1
)))
8080 ret
= get_errno(chown(p
, arg2
, arg3
));
8081 unlock_user(p
, arg1
, 0);
8084 #ifdef TARGET_NR_setuid32
8085 case TARGET_NR_setuid32
:
8086 ret
= get_errno(setuid(arg1
));
8089 #ifdef TARGET_NR_setgid32
8090 case TARGET_NR_setgid32
:
8091 ret
= get_errno(setgid(arg1
));
8094 #ifdef TARGET_NR_setfsuid32
8095 case TARGET_NR_setfsuid32
:
8096 ret
= get_errno(setfsuid(arg1
));
8099 #ifdef TARGET_NR_setfsgid32
8100 case TARGET_NR_setfsgid32
:
8101 ret
= get_errno(setfsgid(arg1
));
8105 case TARGET_NR_pivot_root
:
8107 #ifdef TARGET_NR_mincore
8108 case TARGET_NR_mincore
:
8111 ret
= -TARGET_EFAULT
;
8112 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8114 if (!(p
= lock_user_string(arg3
)))
8116 ret
= get_errno(mincore(a
, arg2
, p
));
8117 unlock_user(p
, arg3
, ret
);
8119 unlock_user(a
, arg1
, 0);
8123 #ifdef TARGET_NR_arm_fadvise64_64
8124 case TARGET_NR_arm_fadvise64_64
:
8127 * arm_fadvise64_64 looks like fadvise64_64 but
8128 * with different argument order
8136 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8137 #ifdef TARGET_NR_fadvise64_64
8138 case TARGET_NR_fadvise64_64
:
8140 #ifdef TARGET_NR_fadvise64
8141 case TARGET_NR_fadvise64
:
8145 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8146 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8147 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8148 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8152 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8155 #ifdef TARGET_NR_madvise
8156 case TARGET_NR_madvise
:
8157 /* A straight passthrough may not be safe because qemu sometimes
8158 turns private flie-backed mappings into anonymous mappings.
8159 This will break MADV_DONTNEED.
8160 This is a hint, so ignoring and returning success is ok. */
8164 #if TARGET_ABI_BITS == 32
8165 case TARGET_NR_fcntl64
:
8169 struct target_flock64
*target_fl
;
8171 struct target_eabi_flock64
*target_efl
;
8174 cmd
= target_to_host_fcntl_cmd(arg2
);
8175 if (cmd
== -TARGET_EINVAL
) {
8181 case TARGET_F_GETLK64
:
8183 if (((CPUARMState
*)cpu_env
)->eabi
) {
8184 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8186 fl
.l_type
= tswap16(target_efl
->l_type
);
8187 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8188 fl
.l_start
= tswap64(target_efl
->l_start
);
8189 fl
.l_len
= tswap64(target_efl
->l_len
);
8190 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8191 unlock_user_struct(target_efl
, arg3
, 0);
8195 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8197 fl
.l_type
= tswap16(target_fl
->l_type
);
8198 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8199 fl
.l_start
= tswap64(target_fl
->l_start
);
8200 fl
.l_len
= tswap64(target_fl
->l_len
);
8201 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8202 unlock_user_struct(target_fl
, arg3
, 0);
8204 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8207 if (((CPUARMState
*)cpu_env
)->eabi
) {
8208 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8210 target_efl
->l_type
= tswap16(fl
.l_type
);
8211 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8212 target_efl
->l_start
= tswap64(fl
.l_start
);
8213 target_efl
->l_len
= tswap64(fl
.l_len
);
8214 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8215 unlock_user_struct(target_efl
, arg3
, 1);
8219 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8221 target_fl
->l_type
= tswap16(fl
.l_type
);
8222 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8223 target_fl
->l_start
= tswap64(fl
.l_start
);
8224 target_fl
->l_len
= tswap64(fl
.l_len
);
8225 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8226 unlock_user_struct(target_fl
, arg3
, 1);
8231 case TARGET_F_SETLK64
:
8232 case TARGET_F_SETLKW64
:
8234 if (((CPUARMState
*)cpu_env
)->eabi
) {
8235 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8237 fl
.l_type
= tswap16(target_efl
->l_type
);
8238 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8239 fl
.l_start
= tswap64(target_efl
->l_start
);
8240 fl
.l_len
= tswap64(target_efl
->l_len
);
8241 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8242 unlock_user_struct(target_efl
, arg3
, 0);
8246 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8248 fl
.l_type
= tswap16(target_fl
->l_type
);
8249 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8250 fl
.l_start
= tswap64(target_fl
->l_start
);
8251 fl
.l_len
= tswap64(target_fl
->l_len
);
8252 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8253 unlock_user_struct(target_fl
, arg3
, 0);
8255 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8258 ret
= do_fcntl(arg1
, arg2
, arg3
);
8264 #ifdef TARGET_NR_cacheflush
8265 case TARGET_NR_cacheflush
:
8266 /* self-modifying code is handled automatically, so nothing needed */
8270 #ifdef TARGET_NR_security
8271 case TARGET_NR_security
:
8274 #ifdef TARGET_NR_getpagesize
8275 case TARGET_NR_getpagesize
:
8276 ret
= TARGET_PAGE_SIZE
;
8279 case TARGET_NR_gettid
:
8280 ret
= get_errno(gettid());
8282 #ifdef TARGET_NR_readahead
8283 case TARGET_NR_readahead
:
8284 #if TARGET_ABI_BITS == 32
8285 if (regpairs_aligned(cpu_env
)) {
8290 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8292 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8297 #ifdef TARGET_NR_setxattr
8298 case TARGET_NR_listxattr
:
8299 case TARGET_NR_llistxattr
:
8303 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8305 ret
= -TARGET_EFAULT
;
8309 p
= lock_user_string(arg1
);
8311 if (num
== TARGET_NR_listxattr
) {
8312 ret
= get_errno(listxattr(p
, b
, arg3
));
8314 ret
= get_errno(llistxattr(p
, b
, arg3
));
8317 ret
= -TARGET_EFAULT
;
8319 unlock_user(p
, arg1
, 0);
8320 unlock_user(b
, arg2
, arg3
);
8323 case TARGET_NR_flistxattr
:
8327 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8329 ret
= -TARGET_EFAULT
;
8333 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8334 unlock_user(b
, arg2
, arg3
);
8337 case TARGET_NR_setxattr
:
8338 case TARGET_NR_lsetxattr
:
8340 void *p
, *n
, *v
= 0;
8342 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8344 ret
= -TARGET_EFAULT
;
8348 p
= lock_user_string(arg1
);
8349 n
= lock_user_string(arg2
);
8351 if (num
== TARGET_NR_setxattr
) {
8352 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8354 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8357 ret
= -TARGET_EFAULT
;
8359 unlock_user(p
, arg1
, 0);
8360 unlock_user(n
, arg2
, 0);
8361 unlock_user(v
, arg3
, 0);
8364 case TARGET_NR_fsetxattr
:
8368 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8370 ret
= -TARGET_EFAULT
;
8374 n
= lock_user_string(arg2
);
8376 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8378 ret
= -TARGET_EFAULT
;
8380 unlock_user(n
, arg2
, 0);
8381 unlock_user(v
, arg3
, 0);
8384 case TARGET_NR_getxattr
:
8385 case TARGET_NR_lgetxattr
:
8387 void *p
, *n
, *v
= 0;
8389 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8391 ret
= -TARGET_EFAULT
;
8395 p
= lock_user_string(arg1
);
8396 n
= lock_user_string(arg2
);
8398 if (num
== TARGET_NR_getxattr
) {
8399 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8401 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8404 ret
= -TARGET_EFAULT
;
8406 unlock_user(p
, arg1
, 0);
8407 unlock_user(n
, arg2
, 0);
8408 unlock_user(v
, arg3
, arg4
);
8411 case TARGET_NR_fgetxattr
:
8415 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8417 ret
= -TARGET_EFAULT
;
8421 n
= lock_user_string(arg2
);
8423 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8425 ret
= -TARGET_EFAULT
;
8427 unlock_user(n
, arg2
, 0);
8428 unlock_user(v
, arg3
, arg4
);
8431 case TARGET_NR_removexattr
:
8432 case TARGET_NR_lremovexattr
:
8435 p
= lock_user_string(arg1
);
8436 n
= lock_user_string(arg2
);
8438 if (num
== TARGET_NR_removexattr
) {
8439 ret
= get_errno(removexattr(p
, n
));
8441 ret
= get_errno(lremovexattr(p
, n
));
8444 ret
= -TARGET_EFAULT
;
8446 unlock_user(p
, arg1
, 0);
8447 unlock_user(n
, arg2
, 0);
8450 case TARGET_NR_fremovexattr
:
8453 n
= lock_user_string(arg2
);
8455 ret
= get_errno(fremovexattr(arg1
, n
));
8457 ret
= -TARGET_EFAULT
;
8459 unlock_user(n
, arg2
, 0);
8463 #endif /* CONFIG_ATTR */
8464 #ifdef TARGET_NR_set_thread_area
8465 case TARGET_NR_set_thread_area
:
8466 #if defined(TARGET_MIPS)
8467 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8470 #elif defined(TARGET_CRIS)
8472 ret
= -TARGET_EINVAL
;
8474 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8478 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8479 ret
= do_set_thread_area(cpu_env
, arg1
);
8482 goto unimplemented_nowarn
;
8485 #ifdef TARGET_NR_get_thread_area
8486 case TARGET_NR_get_thread_area
:
8487 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8488 ret
= do_get_thread_area(cpu_env
, arg1
);
8490 goto unimplemented_nowarn
;
8493 #ifdef TARGET_NR_getdomainname
8494 case TARGET_NR_getdomainname
:
8495 goto unimplemented_nowarn
;
8498 #ifdef TARGET_NR_clock_gettime
8499 case TARGET_NR_clock_gettime
:
8502 ret
= get_errno(clock_gettime(arg1
, &ts
));
8503 if (!is_error(ret
)) {
8504 host_to_target_timespec(arg2
, &ts
);
8509 #ifdef TARGET_NR_clock_getres
8510 case TARGET_NR_clock_getres
:
8513 ret
= get_errno(clock_getres(arg1
, &ts
));
8514 if (!is_error(ret
)) {
8515 host_to_target_timespec(arg2
, &ts
);
8520 #ifdef TARGET_NR_clock_nanosleep
8521 case TARGET_NR_clock_nanosleep
:
8524 target_to_host_timespec(&ts
, arg3
);
8525 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8527 host_to_target_timespec(arg4
, &ts
);
8532 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8533 case TARGET_NR_set_tid_address
:
8534 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8538 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8539 case TARGET_NR_tkill
:
8540 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8544 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8545 case TARGET_NR_tgkill
:
8546 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8547 target_to_host_signal(arg3
)));
8551 #ifdef TARGET_NR_set_robust_list
8552 case TARGET_NR_set_robust_list
:
8553 goto unimplemented_nowarn
;
8556 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8557 case TARGET_NR_utimensat
:
8559 struct timespec
*tsp
, ts
[2];
8563 target_to_host_timespec(ts
, arg3
);
8564 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8568 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8570 if (!(p
= lock_user_string(arg2
))) {
8571 ret
= -TARGET_EFAULT
;
8574 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8575 unlock_user(p
, arg2
, 0);
8580 #if defined(CONFIG_USE_NPTL)
8581 case TARGET_NR_futex
:
8582 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8585 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8586 case TARGET_NR_inotify_init
:
8587 ret
= get_errno(sys_inotify_init());
8590 #ifdef CONFIG_INOTIFY1
8591 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8592 case TARGET_NR_inotify_init1
:
8593 ret
= get_errno(sys_inotify_init1(arg1
));
8597 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8598 case TARGET_NR_inotify_add_watch
:
8599 p
= lock_user_string(arg2
);
8600 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8601 unlock_user(p
, arg2
, 0);
8604 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8605 case TARGET_NR_inotify_rm_watch
:
8606 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8610 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8611 case TARGET_NR_mq_open
:
8613 struct mq_attr posix_mq_attr
;
8615 p
= lock_user_string(arg1
- 1);
8617 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8618 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8619 unlock_user (p
, arg1
, 0);
8623 case TARGET_NR_mq_unlink
:
8624 p
= lock_user_string(arg1
- 1);
8625 ret
= get_errno(mq_unlink(p
));
8626 unlock_user (p
, arg1
, 0);
8629 case TARGET_NR_mq_timedsend
:
8633 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8635 target_to_host_timespec(&ts
, arg5
);
8636 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8637 host_to_target_timespec(arg5
, &ts
);
8640 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8641 unlock_user (p
, arg2
, arg3
);
8645 case TARGET_NR_mq_timedreceive
:
8650 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8652 target_to_host_timespec(&ts
, arg5
);
8653 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8654 host_to_target_timespec(arg5
, &ts
);
8657 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8658 unlock_user (p
, arg2
, arg3
);
8660 put_user_u32(prio
, arg4
);
8664 /* Not implemented for now... */
8665 /* case TARGET_NR_mq_notify: */
8668 case TARGET_NR_mq_getsetattr
:
8670 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8673 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8674 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8677 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8678 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8685 #ifdef CONFIG_SPLICE
8686 #ifdef TARGET_NR_tee
8689 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8693 #ifdef TARGET_NR_splice
8694 case TARGET_NR_splice
:
8696 loff_t loff_in
, loff_out
;
8697 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8699 get_user_u64(loff_in
, arg2
);
8700 ploff_in
= &loff_in
;
8703 get_user_u64(loff_out
, arg2
);
8704 ploff_out
= &loff_out
;
8706 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8710 #ifdef TARGET_NR_vmsplice
8711 case TARGET_NR_vmsplice
:
8713 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8715 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
8716 unlock_iovec(vec
, arg2
, arg3
, 0);
8718 ret
= -host_to_target_errno(errno
);
8723 #endif /* CONFIG_SPLICE */
8724 #ifdef CONFIG_EVENTFD
8725 #if defined(TARGET_NR_eventfd)
8726 case TARGET_NR_eventfd
:
8727 ret
= get_errno(eventfd(arg1
, 0));
8730 #if defined(TARGET_NR_eventfd2)
8731 case TARGET_NR_eventfd2
:
8732 ret
= get_errno(eventfd(arg1
, arg2
));
8735 #endif /* CONFIG_EVENTFD */
8736 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8737 case TARGET_NR_fallocate
:
8738 #if TARGET_ABI_BITS == 32
8739 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8740 target_offset64(arg5
, arg6
)));
8742 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8746 #if defined(CONFIG_SYNC_FILE_RANGE)
8747 #if defined(TARGET_NR_sync_file_range)
8748 case TARGET_NR_sync_file_range
:
8749 #if TARGET_ABI_BITS == 32
8750 #if defined(TARGET_MIPS)
8751 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8752 target_offset64(arg5
, arg6
), arg7
));
8754 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8755 target_offset64(arg4
, arg5
), arg6
));
8756 #endif /* !TARGET_MIPS */
8758 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8762 #if defined(TARGET_NR_sync_file_range2)
8763 case TARGET_NR_sync_file_range2
:
8764 /* This is like sync_file_range but the arguments are reordered */
8765 #if TARGET_ABI_BITS == 32
8766 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8767 target_offset64(arg5
, arg6
), arg2
));
8769 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8774 #if defined(CONFIG_EPOLL)
8775 #if defined(TARGET_NR_epoll_create)
8776 case TARGET_NR_epoll_create
:
8777 ret
= get_errno(epoll_create(arg1
));
8780 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8781 case TARGET_NR_epoll_create1
:
8782 ret
= get_errno(epoll_create1(arg1
));
8785 #if defined(TARGET_NR_epoll_ctl)
8786 case TARGET_NR_epoll_ctl
:
8788 struct epoll_event ep
;
8789 struct epoll_event
*epp
= 0;
8791 struct target_epoll_event
*target_ep
;
8792 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8795 ep
.events
= tswap32(target_ep
->events
);
8796 /* The epoll_data_t union is just opaque data to the kernel,
8797 * so we transfer all 64 bits across and need not worry what
8798 * actual data type it is.
8800 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8801 unlock_user_struct(target_ep
, arg4
, 0);
8804 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8809 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8810 #define IMPLEMENT_EPOLL_PWAIT
8812 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8813 #if defined(TARGET_NR_epoll_wait)
8814 case TARGET_NR_epoll_wait
:
8816 #if defined(IMPLEMENT_EPOLL_PWAIT)
8817 case TARGET_NR_epoll_pwait
:
8820 struct target_epoll_event
*target_ep
;
8821 struct epoll_event
*ep
;
8823 int maxevents
= arg3
;
8826 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8827 maxevents
* sizeof(struct target_epoll_event
), 1);
8832 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8835 #if defined(IMPLEMENT_EPOLL_PWAIT)
8836 case TARGET_NR_epoll_pwait
:
8838 target_sigset_t
*target_set
;
8839 sigset_t _set
, *set
= &_set
;
8842 target_set
= lock_user(VERIFY_READ
, arg5
,
8843 sizeof(target_sigset_t
), 1);
8845 unlock_user(target_ep
, arg2
, 0);
8848 target_to_host_sigset(set
, target_set
);
8849 unlock_user(target_set
, arg5
, 0);
8854 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8858 #if defined(TARGET_NR_epoll_wait)
8859 case TARGET_NR_epoll_wait
:
8860 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8864 ret
= -TARGET_ENOSYS
;
8866 if (!is_error(ret
)) {
8868 for (i
= 0; i
< ret
; i
++) {
8869 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8870 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8873 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8878 #ifdef TARGET_NR_prlimit64
8879 case TARGET_NR_prlimit64
:
8881 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8882 struct target_rlimit64
*target_rnew
, *target_rold
;
8883 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8885 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8888 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8889 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8890 unlock_user_struct(target_rnew
, arg3
, 0);
8894 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8895 if (!is_error(ret
) && arg4
) {
8896 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8899 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8900 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8901 unlock_user_struct(target_rold
, arg4
, 1);
8906 #ifdef TARGET_NR_gethostname
8907 case TARGET_NR_gethostname
:
8909 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8911 ret
= get_errno(gethostname(name
, arg2
));
8912 unlock_user(name
, arg1
, arg2
);
8914 ret
= -TARGET_EFAULT
;
8921 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8922 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8923 unimplemented_nowarn
:
8925 ret
= -TARGET_ENOSYS
;
8930 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8933 print_syscall_ret(num
, ret
);
8936 ret
= -TARGET_EFAULT
;