4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
88 #include <linux/mtio.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
202 #define __NR__llseek __NR_lseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
218 _syscall2(int, sys_getpriority
, int, which
, int, who
);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
221 loff_t
*, res
, uint
, wh
);
223 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
224 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group
,int,error_code
)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address
,int *,tidptr
)
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
240 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
245 unsigned long *, user_mask_ptr
);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
250 static bitmask_transtbl fcntl_flags_tbl
[] = {
251 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
252 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
253 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
254 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
255 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
256 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
257 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
258 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
259 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
260 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
261 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
262 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
263 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
270 #define COPY_UTSNAME_FIELD(dest, src) \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
277 static int sys_uname(struct new_utsname
*buf
)
279 struct utsname uts_buf
;
281 if (uname(&uts_buf
) < 0)
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 memset(buf
, 0, sizeof(*buf
));
291 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
292 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
293 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
294 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
295 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
297 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf
, size_t size
)
306 if (getcwd(buf
, size
) == NULL
) {
307 /* getcwd() sets errno */
310 return strlen(buf
)+1;
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
322 return (faccessat(dirfd
, pathname
, mode
, 0));
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
328 return (fchmodat(dirfd
, pathname
, mode
, 0));
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
333 gid_t group
, int flags
)
335 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
342 return (fstatat(dirfd
, pathname
, buf
, flags
));
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
349 return (fstatat(dirfd
, pathname
, buf
, flags
));
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd
, const char *pathname
,
354 const struct timeval times
[2])
356 return (futimesat(dirfd
, pathname
, times
));
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd
, const char *oldpath
,
361 int newdirfd
, const char *newpath
, int flags
)
363 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
369 return (mkdirat(dirfd
, pathname
, mode
));
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
376 return (mknodat(dirfd
, pathname
, mode
, dev
));
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
383 * open(2) has extra parameter 'mode' when called with
386 if ((flags
& O_CREAT
) != 0) {
391 * Get the 'mode' parameter and translate it to
395 mode
= va_arg(ap
, mode_t
);
396 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
399 return (openat(dirfd
, pathname
, flags
, mode
));
401 return (openat(dirfd
, pathname
, flags
));
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
407 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd
, const char *oldpath
,
412 int newdirfd
, const char *newpath
)
414 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
420 return (symlinkat(oldpath
, newdirfd
, newpath
));
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
426 return (unlinkat(dirfd
, pathname
, flags
));
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
442 uid_t
,owner
,gid_t
,group
,int,flags
)
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
447 struct stat
*,buf
,int,flags
)
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
451 const struct timeval
*,times
)
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
456 struct stat
*,buf
,int,flags
)
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
460 int,newdirfd
,const char *,newpath
,int,flags
)
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
467 mode_t
,mode
,dev_t
,dev
)
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
474 char *,buf
,size_t,bufsize
)
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
478 int,newdirfd
,const char *,newpath
)
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
482 int,newdirfd
,const char *,newpath
)
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd
, const char *pathname
,
492 const struct timespec times
[2], int flags
)
494 if (pathname
== NULL
)
495 return futimens(dirfd
, times
);
497 return utimensat(dirfd
, pathname
, times
, flags
);
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
502 const struct timespec
*,tsp
,int,flags
)
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
518 return (inotify_add_watch(fd
, pathname
, mask
));
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
524 return (inotify_rm_watch(fd
, wd
));
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags
)
531 return (inotify_init1(flags
));
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
545 # define __NR_ppoll -1
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
549 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
553 #if defined(TARGET_NR_pselect6)
554 #ifndef __NR_pselect6
555 # define __NR_pselect6 -1
557 #define __NR_sys_pselect6 __NR_pselect6
558 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
559 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
562 #if defined(TARGET_NR_prlimit64)
563 #ifndef __NR_prlimit64
564 # define __NR_prlimit64 -1
566 #define __NR_sys_prlimit64 __NR_prlimit64
567 /* The glibc rlimit structure may not be that used by the underlying syscall */
568 struct host_rlimit64
{
572 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
573 const struct host_rlimit64
*, new_limit
,
574 struct host_rlimit64
*, old_limit
)
577 extern int personality(int);
578 extern int flock(int, int);
579 extern int setfsuid(int);
580 extern int setfsgid(int);
581 extern int setgroups(int, gid_t
*);
583 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
585 static inline int regpairs_aligned(void *cpu_env
) {
586 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
588 #elif defined(TARGET_MIPS)
589 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
591 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
594 #define ERRNO_TABLE_SIZE 1200
596 /* target_to_host_errno_table[] is initialized from
597 * host_to_target_errno_table[] in syscall_init(). */
598 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
602 * This list is the union of errno values overridden in asm-<arch>/errno.h
603 * minus the errnos that are not actually generic to all archs.
605 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
606 [EIDRM
] = TARGET_EIDRM
,
607 [ECHRNG
] = TARGET_ECHRNG
,
608 [EL2NSYNC
] = TARGET_EL2NSYNC
,
609 [EL3HLT
] = TARGET_EL3HLT
,
610 [EL3RST
] = TARGET_EL3RST
,
611 [ELNRNG
] = TARGET_ELNRNG
,
612 [EUNATCH
] = TARGET_EUNATCH
,
613 [ENOCSI
] = TARGET_ENOCSI
,
614 [EL2HLT
] = TARGET_EL2HLT
,
615 [EDEADLK
] = TARGET_EDEADLK
,
616 [ENOLCK
] = TARGET_ENOLCK
,
617 [EBADE
] = TARGET_EBADE
,
618 [EBADR
] = TARGET_EBADR
,
619 [EXFULL
] = TARGET_EXFULL
,
620 [ENOANO
] = TARGET_ENOANO
,
621 [EBADRQC
] = TARGET_EBADRQC
,
622 [EBADSLT
] = TARGET_EBADSLT
,
623 [EBFONT
] = TARGET_EBFONT
,
624 [ENOSTR
] = TARGET_ENOSTR
,
625 [ENODATA
] = TARGET_ENODATA
,
626 [ETIME
] = TARGET_ETIME
,
627 [ENOSR
] = TARGET_ENOSR
,
628 [ENONET
] = TARGET_ENONET
,
629 [ENOPKG
] = TARGET_ENOPKG
,
630 [EREMOTE
] = TARGET_EREMOTE
,
631 [ENOLINK
] = TARGET_ENOLINK
,
632 [EADV
] = TARGET_EADV
,
633 [ESRMNT
] = TARGET_ESRMNT
,
634 [ECOMM
] = TARGET_ECOMM
,
635 [EPROTO
] = TARGET_EPROTO
,
636 [EDOTDOT
] = TARGET_EDOTDOT
,
637 [EMULTIHOP
] = TARGET_EMULTIHOP
,
638 [EBADMSG
] = TARGET_EBADMSG
,
639 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
640 [EOVERFLOW
] = TARGET_EOVERFLOW
,
641 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
642 [EBADFD
] = TARGET_EBADFD
,
643 [EREMCHG
] = TARGET_EREMCHG
,
644 [ELIBACC
] = TARGET_ELIBACC
,
645 [ELIBBAD
] = TARGET_ELIBBAD
,
646 [ELIBSCN
] = TARGET_ELIBSCN
,
647 [ELIBMAX
] = TARGET_ELIBMAX
,
648 [ELIBEXEC
] = TARGET_ELIBEXEC
,
649 [EILSEQ
] = TARGET_EILSEQ
,
650 [ENOSYS
] = TARGET_ENOSYS
,
651 [ELOOP
] = TARGET_ELOOP
,
652 [ERESTART
] = TARGET_ERESTART
,
653 [ESTRPIPE
] = TARGET_ESTRPIPE
,
654 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
655 [EUSERS
] = TARGET_EUSERS
,
656 [ENOTSOCK
] = TARGET_ENOTSOCK
,
657 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
658 [EMSGSIZE
] = TARGET_EMSGSIZE
,
659 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
660 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
661 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
662 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
663 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
664 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
665 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
666 [EADDRINUSE
] = TARGET_EADDRINUSE
,
667 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
668 [ENETDOWN
] = TARGET_ENETDOWN
,
669 [ENETUNREACH
] = TARGET_ENETUNREACH
,
670 [ENETRESET
] = TARGET_ENETRESET
,
671 [ECONNABORTED
] = TARGET_ECONNABORTED
,
672 [ECONNRESET
] = TARGET_ECONNRESET
,
673 [ENOBUFS
] = TARGET_ENOBUFS
,
674 [EISCONN
] = TARGET_EISCONN
,
675 [ENOTCONN
] = TARGET_ENOTCONN
,
676 [EUCLEAN
] = TARGET_EUCLEAN
,
677 [ENOTNAM
] = TARGET_ENOTNAM
,
678 [ENAVAIL
] = TARGET_ENAVAIL
,
679 [EISNAM
] = TARGET_EISNAM
,
680 [EREMOTEIO
] = TARGET_EREMOTEIO
,
681 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
682 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
683 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
684 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
685 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
686 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
687 [EALREADY
] = TARGET_EALREADY
,
688 [EINPROGRESS
] = TARGET_EINPROGRESS
,
689 [ESTALE
] = TARGET_ESTALE
,
690 [ECANCELED
] = TARGET_ECANCELED
,
691 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
692 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
694 [ENOKEY
] = TARGET_ENOKEY
,
697 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
700 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
703 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
706 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
708 #ifdef ENOTRECOVERABLE
709 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
713 static inline int host_to_target_errno(int err
)
715 if(host_to_target_errno_table
[err
])
716 return host_to_target_errno_table
[err
];
720 static inline int target_to_host_errno(int err
)
722 if (target_to_host_errno_table
[err
])
723 return target_to_host_errno_table
[err
];
727 static inline abi_long
get_errno(abi_long ret
)
730 return -host_to_target_errno(errno
);
735 static inline int is_error(abi_long ret
)
737 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
740 char *target_strerror(int err
)
742 return strerror(target_to_host_errno(err
));
745 static abi_ulong target_brk
;
746 static abi_ulong target_original_brk
;
747 static abi_ulong brk_page
;
749 void target_set_brk(abi_ulong new_brk
)
751 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
752 brk_page
= HOST_PAGE_ALIGN(target_brk
);
755 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
756 #define DEBUGF_BRK(message, args...)
758 /* do_brk() must return target values and target errnos. */
759 abi_long
do_brk(abi_ulong new_brk
)
761 abi_long mapped_addr
;
764 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk
);
767 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk
);
770 if (new_brk
< target_original_brk
) {
771 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk
);
775 /* If the new brk is less than the highest page reserved to the
776 * target heap allocation, set it and we're almost done... */
777 if (new_brk
<= brk_page
) {
778 /* Heap contents are initialized to zero, as for anonymous
780 if (new_brk
> target_brk
) {
781 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
783 target_brk
= new_brk
;
784 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk
);
788 /* We need to allocate more memory after the brk... Note that
789 * we don't use MAP_FIXED because that will map over the top of
790 * any existing mapping (like the one with the host libc or qemu
791 * itself); instead we treat "mapped but at wrong address" as
792 * a failure and unmap again.
794 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
795 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
796 PROT_READ
|PROT_WRITE
,
797 MAP_ANON
|MAP_PRIVATE
, 0, 0));
799 if (mapped_addr
== brk_page
) {
800 target_brk
= new_brk
;
801 brk_page
= HOST_PAGE_ALIGN(target_brk
);
802 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk
);
804 } else if (mapped_addr
!= -1) {
805 /* Mapped but at wrong address, meaning there wasn't actually
806 * enough space for this brk.
808 target_munmap(mapped_addr
, new_alloc_size
);
810 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk
);
813 DEBUGF_BRK("%#010x (otherwise)\n", target_brk
);
816 #if defined(TARGET_ALPHA)
817 /* We (partially) emulate OSF/1 on Alpha, which requires we
818 return a proper errno, not an unchanged brk value. */
819 return -TARGET_ENOMEM
;
821 /* For everything else, return the previous break. */
825 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
826 abi_ulong target_fds_addr
,
830 abi_ulong b
, *target_fds
;
832 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
833 if (!(target_fds
= lock_user(VERIFY_READ
,
835 sizeof(abi_ulong
) * nw
,
837 return -TARGET_EFAULT
;
841 for (i
= 0; i
< nw
; i
++) {
842 /* grab the abi_ulong */
843 __get_user(b
, &target_fds
[i
]);
844 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
845 /* check the bit inside the abi_ulong */
852 unlock_user(target_fds
, target_fds_addr
, 0);
857 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
858 abi_ulong target_fds_addr
,
861 if (target_fds_addr
) {
862 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
863 return -TARGET_EFAULT
;
871 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
877 abi_ulong
*target_fds
;
879 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
880 if (!(target_fds
= lock_user(VERIFY_WRITE
,
882 sizeof(abi_ulong
) * nw
,
884 return -TARGET_EFAULT
;
887 for (i
= 0; i
< nw
; i
++) {
889 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
890 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
893 __put_user(v
, &target_fds
[i
]);
896 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
901 #if defined(__alpha__)
907 static inline abi_long
host_to_target_clock_t(long ticks
)
909 #if HOST_HZ == TARGET_HZ
912 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
916 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
917 const struct rusage
*rusage
)
919 struct target_rusage
*target_rusage
;
921 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
922 return -TARGET_EFAULT
;
923 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
924 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
925 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
926 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
927 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
928 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
929 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
930 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
931 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
932 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
933 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
934 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
935 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
936 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
937 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
938 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
939 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
940 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
941 unlock_user_struct(target_rusage
, target_addr
, 1);
946 static inline rlim_t
target_to_host_rlim(target_ulong target_rlim
)
948 target_ulong target_rlim_swap
;
951 target_rlim_swap
= tswapl(target_rlim
);
952 if (target_rlim_swap
== TARGET_RLIM_INFINITY
|| target_rlim_swap
!= (rlim_t
)target_rlim_swap
)
953 result
= RLIM_INFINITY
;
955 result
= target_rlim_swap
;
960 static inline target_ulong
host_to_target_rlim(rlim_t rlim
)
962 target_ulong target_rlim_swap
;
965 if (rlim
== RLIM_INFINITY
|| rlim
!= (target_long
)rlim
)
966 target_rlim_swap
= TARGET_RLIM_INFINITY
;
968 target_rlim_swap
= rlim
;
969 result
= tswapl(target_rlim_swap
);
974 static inline int target_to_host_resource(int code
)
977 case TARGET_RLIMIT_AS
:
979 case TARGET_RLIMIT_CORE
:
981 case TARGET_RLIMIT_CPU
:
983 case TARGET_RLIMIT_DATA
:
985 case TARGET_RLIMIT_FSIZE
:
987 case TARGET_RLIMIT_LOCKS
:
989 case TARGET_RLIMIT_MEMLOCK
:
990 return RLIMIT_MEMLOCK
;
991 case TARGET_RLIMIT_MSGQUEUE
:
992 return RLIMIT_MSGQUEUE
;
993 case TARGET_RLIMIT_NICE
:
995 case TARGET_RLIMIT_NOFILE
:
996 return RLIMIT_NOFILE
;
997 case TARGET_RLIMIT_NPROC
:
999 case TARGET_RLIMIT_RSS
:
1001 case TARGET_RLIMIT_RTPRIO
:
1002 return RLIMIT_RTPRIO
;
1003 case TARGET_RLIMIT_SIGPENDING
:
1004 return RLIMIT_SIGPENDING
;
1005 case TARGET_RLIMIT_STACK
:
1006 return RLIMIT_STACK
;
1012 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1013 abi_ulong target_tv_addr
)
1015 struct target_timeval
*target_tv
;
1017 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1018 return -TARGET_EFAULT
;
1020 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1021 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1023 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1028 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1029 const struct timeval
*tv
)
1031 struct target_timeval
*target_tv
;
1033 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1034 return -TARGET_EFAULT
;
1036 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1037 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1039 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1044 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1047 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1048 abi_ulong target_mq_attr_addr
)
1050 struct target_mq_attr
*target_mq_attr
;
1052 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1053 target_mq_attr_addr
, 1))
1054 return -TARGET_EFAULT
;
1056 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1057 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1058 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1059 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1061 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1066 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1067 const struct mq_attr
*attr
)
1069 struct target_mq_attr
*target_mq_attr
;
1071 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1072 target_mq_attr_addr
, 0))
1073 return -TARGET_EFAULT
;
1075 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1076 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1077 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1078 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1080 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1086 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1087 /* do_select() must return target values and target errnos. */
1088 static abi_long
do_select(int n
,
1089 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1090 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1092 fd_set rfds
, wfds
, efds
;
1093 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1094 struct timeval tv
, *tv_ptr
;
1097 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1101 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1105 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1110 if (target_tv_addr
) {
1111 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1112 return -TARGET_EFAULT
;
1118 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1120 if (!is_error(ret
)) {
1121 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1122 return -TARGET_EFAULT
;
1123 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1124 return -TARGET_EFAULT
;
1125 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1126 return -TARGET_EFAULT
;
1128 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1129 return -TARGET_EFAULT
;
1136 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1139 return pipe2(host_pipe
, flags
);
1145 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1146 int flags
, int is_pipe2
)
1150 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1153 return get_errno(ret
);
1155 /* Several targets have special calling conventions for the original
1156 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1158 #if defined(TARGET_ALPHA)
1159 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1160 return host_pipe
[0];
1161 #elif defined(TARGET_MIPS)
1162 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1163 return host_pipe
[0];
1164 #elif defined(TARGET_SH4)
1165 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1166 return host_pipe
[0];
1170 if (put_user_s32(host_pipe
[0], pipedes
)
1171 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1172 return -TARGET_EFAULT
;
1173 return get_errno(ret
);
1176 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1177 abi_ulong target_addr
,
1180 struct target_ip_mreqn
*target_smreqn
;
1182 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1184 return -TARGET_EFAULT
;
1185 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1186 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1187 if (len
== sizeof(struct target_ip_mreqn
))
1188 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
1189 unlock_user(target_smreqn
, target_addr
, 0);
1194 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1195 abi_ulong target_addr
,
1198 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1199 sa_family_t sa_family
;
1200 struct target_sockaddr
*target_saddr
;
1202 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1204 return -TARGET_EFAULT
;
1206 sa_family
= tswap16(target_saddr
->sa_family
);
1208 /* Oops. The caller might send a incomplete sun_path; sun_path
1209 * must be terminated by \0 (see the manual page), but
1210 * unfortunately it is quite common to specify sockaddr_un
1211 * length as "strlen(x->sun_path)" while it should be
1212 * "strlen(...) + 1". We'll fix that here if needed.
1213 * Linux kernel has a similar feature.
1216 if (sa_family
== AF_UNIX
) {
1217 if (len
< unix_maxlen
&& len
> 0) {
1218 char *cp
= (char*)target_saddr
;
1220 if ( cp
[len
-1] && !cp
[len
] )
1223 if (len
> unix_maxlen
)
1227 memcpy(addr
, target_saddr
, len
);
1228 addr
->sa_family
= sa_family
;
1229 unlock_user(target_saddr
, target_addr
, 0);
1234 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1235 struct sockaddr
*addr
,
1238 struct target_sockaddr
*target_saddr
;
1240 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1242 return -TARGET_EFAULT
;
1243 memcpy(target_saddr
, addr
, len
);
1244 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1245 unlock_user(target_saddr
, target_addr
, len
);
1250 /* ??? Should this also swap msgh->name? */
1251 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1252 struct target_msghdr
*target_msgh
)
1254 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1255 abi_long msg_controllen
;
1256 abi_ulong target_cmsg_addr
;
1257 struct target_cmsghdr
*target_cmsg
;
1258 socklen_t space
= 0;
1260 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1261 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1263 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1264 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1266 return -TARGET_EFAULT
;
1268 while (cmsg
&& target_cmsg
) {
1269 void *data
= CMSG_DATA(cmsg
);
1270 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1272 int len
= tswapl(target_cmsg
->cmsg_len
)
1273 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1275 space
+= CMSG_SPACE(len
);
1276 if (space
> msgh
->msg_controllen
) {
1277 space
-= CMSG_SPACE(len
);
1278 gemu_log("Host cmsg overflow\n");
1282 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1283 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1284 cmsg
->cmsg_len
= CMSG_LEN(len
);
1286 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1287 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1288 memcpy(data
, target_data
, len
);
1290 int *fd
= (int *)data
;
1291 int *target_fd
= (int *)target_data
;
1292 int i
, numfds
= len
/ sizeof(int);
1294 for (i
= 0; i
< numfds
; i
++)
1295 fd
[i
] = tswap32(target_fd
[i
]);
1298 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1299 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1301 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1303 msgh
->msg_controllen
= space
;
1307 /* ??? Should this also swap msgh->name? */
1308 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1309 struct msghdr
*msgh
)
1311 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1312 abi_long msg_controllen
;
1313 abi_ulong target_cmsg_addr
;
1314 struct target_cmsghdr
*target_cmsg
;
1315 socklen_t space
= 0;
1317 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1318 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1320 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1321 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1323 return -TARGET_EFAULT
;
1325 while (cmsg
&& target_cmsg
) {
1326 void *data
= CMSG_DATA(cmsg
);
1327 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1329 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1331 space
+= TARGET_CMSG_SPACE(len
);
1332 if (space
> msg_controllen
) {
1333 space
-= TARGET_CMSG_SPACE(len
);
1334 gemu_log("Target cmsg overflow\n");
1338 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1339 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1340 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1342 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1343 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1344 memcpy(target_data
, data
, len
);
1346 int *fd
= (int *)data
;
1347 int *target_fd
= (int *)target_data
;
1348 int i
, numfds
= len
/ sizeof(int);
1350 for (i
= 0; i
< numfds
; i
++)
1351 target_fd
[i
] = tswap32(fd
[i
]);
1354 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1355 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1357 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1359 target_msgh
->msg_controllen
= tswapl(space
);
1363 /* do_setsockopt() Must return target values and target errnos. */
1364 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1365 abi_ulong optval_addr
, socklen_t optlen
)
1369 struct ip_mreqn
*ip_mreq
;
1370 struct ip_mreq_source
*ip_mreq_source
;
1374 /* TCP options all take an 'int' value. */
1375 if (optlen
< sizeof(uint32_t))
1376 return -TARGET_EINVAL
;
1378 if (get_user_u32(val
, optval_addr
))
1379 return -TARGET_EFAULT
;
1380 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1387 case IP_ROUTER_ALERT
:
1391 case IP_MTU_DISCOVER
:
1397 case IP_MULTICAST_TTL
:
1398 case IP_MULTICAST_LOOP
:
1400 if (optlen
>= sizeof(uint32_t)) {
1401 if (get_user_u32(val
, optval_addr
))
1402 return -TARGET_EFAULT
;
1403 } else if (optlen
>= 1) {
1404 if (get_user_u8(val
, optval_addr
))
1405 return -TARGET_EFAULT
;
1407 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1409 case IP_ADD_MEMBERSHIP
:
1410 case IP_DROP_MEMBERSHIP
:
1411 if (optlen
< sizeof (struct target_ip_mreq
) ||
1412 optlen
> sizeof (struct target_ip_mreqn
))
1413 return -TARGET_EINVAL
;
1415 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1416 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1417 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1420 case IP_BLOCK_SOURCE
:
1421 case IP_UNBLOCK_SOURCE
:
1422 case IP_ADD_SOURCE_MEMBERSHIP
:
1423 case IP_DROP_SOURCE_MEMBERSHIP
:
1424 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1425 return -TARGET_EINVAL
;
1427 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1428 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1429 unlock_user (ip_mreq_source
, optval_addr
, 0);
1436 case TARGET_SOL_SOCKET
:
1438 /* Options with 'int' argument. */
1439 case TARGET_SO_DEBUG
:
1442 case TARGET_SO_REUSEADDR
:
1443 optname
= SO_REUSEADDR
;
1445 case TARGET_SO_TYPE
:
1448 case TARGET_SO_ERROR
:
1451 case TARGET_SO_DONTROUTE
:
1452 optname
= SO_DONTROUTE
;
1454 case TARGET_SO_BROADCAST
:
1455 optname
= SO_BROADCAST
;
1457 case TARGET_SO_SNDBUF
:
1458 optname
= SO_SNDBUF
;
1460 case TARGET_SO_RCVBUF
:
1461 optname
= SO_RCVBUF
;
1463 case TARGET_SO_KEEPALIVE
:
1464 optname
= SO_KEEPALIVE
;
1466 case TARGET_SO_OOBINLINE
:
1467 optname
= SO_OOBINLINE
;
1469 case TARGET_SO_NO_CHECK
:
1470 optname
= SO_NO_CHECK
;
1472 case TARGET_SO_PRIORITY
:
1473 optname
= SO_PRIORITY
;
1476 case TARGET_SO_BSDCOMPAT
:
1477 optname
= SO_BSDCOMPAT
;
1480 case TARGET_SO_PASSCRED
:
1481 optname
= SO_PASSCRED
;
1483 case TARGET_SO_TIMESTAMP
:
1484 optname
= SO_TIMESTAMP
;
1486 case TARGET_SO_RCVLOWAT
:
1487 optname
= SO_RCVLOWAT
;
1489 case TARGET_SO_RCVTIMEO
:
1490 optname
= SO_RCVTIMEO
;
1492 case TARGET_SO_SNDTIMEO
:
1493 optname
= SO_SNDTIMEO
;
1499 if (optlen
< sizeof(uint32_t))
1500 return -TARGET_EINVAL
;
1502 if (get_user_u32(val
, optval_addr
))
1503 return -TARGET_EFAULT
;
1504 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1508 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1509 ret
= -TARGET_ENOPROTOOPT
;
1514 /* do_getsockopt() Must return target values and target errnos. */
1515 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1516 abi_ulong optval_addr
, abi_ulong optlen
)
1523 case TARGET_SOL_SOCKET
:
1526 /* These don't just return a single integer */
1527 case TARGET_SO_LINGER
:
1528 case TARGET_SO_RCVTIMEO
:
1529 case TARGET_SO_SNDTIMEO
:
1530 case TARGET_SO_PEERCRED
:
1531 case TARGET_SO_PEERNAME
:
1533 /* Options with 'int' argument. */
1534 case TARGET_SO_DEBUG
:
1537 case TARGET_SO_REUSEADDR
:
1538 optname
= SO_REUSEADDR
;
1540 case TARGET_SO_TYPE
:
1543 case TARGET_SO_ERROR
:
1546 case TARGET_SO_DONTROUTE
:
1547 optname
= SO_DONTROUTE
;
1549 case TARGET_SO_BROADCAST
:
1550 optname
= SO_BROADCAST
;
1552 case TARGET_SO_SNDBUF
:
1553 optname
= SO_SNDBUF
;
1555 case TARGET_SO_RCVBUF
:
1556 optname
= SO_RCVBUF
;
1558 case TARGET_SO_KEEPALIVE
:
1559 optname
= SO_KEEPALIVE
;
1561 case TARGET_SO_OOBINLINE
:
1562 optname
= SO_OOBINLINE
;
1564 case TARGET_SO_NO_CHECK
:
1565 optname
= SO_NO_CHECK
;
1567 case TARGET_SO_PRIORITY
:
1568 optname
= SO_PRIORITY
;
1571 case TARGET_SO_BSDCOMPAT
:
1572 optname
= SO_BSDCOMPAT
;
1575 case TARGET_SO_PASSCRED
:
1576 optname
= SO_PASSCRED
;
1578 case TARGET_SO_TIMESTAMP
:
1579 optname
= SO_TIMESTAMP
;
1581 case TARGET_SO_RCVLOWAT
:
1582 optname
= SO_RCVLOWAT
;
1589 /* TCP options all take an 'int' value. */
1591 if (get_user_u32(len
, optlen
))
1592 return -TARGET_EFAULT
;
1594 return -TARGET_EINVAL
;
1596 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1602 if (put_user_u32(val
, optval_addr
))
1603 return -TARGET_EFAULT
;
1605 if (put_user_u8(val
, optval_addr
))
1606 return -TARGET_EFAULT
;
1608 if (put_user_u32(len
, optlen
))
1609 return -TARGET_EFAULT
;
1616 case IP_ROUTER_ALERT
:
1620 case IP_MTU_DISCOVER
:
1626 case IP_MULTICAST_TTL
:
1627 case IP_MULTICAST_LOOP
:
1628 if (get_user_u32(len
, optlen
))
1629 return -TARGET_EFAULT
;
1631 return -TARGET_EINVAL
;
1633 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1636 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1638 if (put_user_u32(len
, optlen
)
1639 || put_user_u8(val
, optval_addr
))
1640 return -TARGET_EFAULT
;
1642 if (len
> sizeof(int))
1644 if (put_user_u32(len
, optlen
)
1645 || put_user_u32(val
, optval_addr
))
1646 return -TARGET_EFAULT
;
1650 ret
= -TARGET_ENOPROTOOPT
;
1656 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1658 ret
= -TARGET_EOPNOTSUPP
;
1665 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1666 * other lock functions have a return code of 0 for failure.
1668 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1669 int count
, int copy
)
1671 struct target_iovec
*target_vec
;
1675 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1677 return -TARGET_EFAULT
;
1678 for(i
= 0;i
< count
; i
++) {
1679 base
= tswapl(target_vec
[i
].iov_base
);
1680 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1681 if (vec
[i
].iov_len
!= 0) {
1682 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1683 /* Don't check lock_user return value. We must call writev even
1684 if a element has invalid base address. */
1686 /* zero length pointer is ignored */
1687 vec
[i
].iov_base
= NULL
;
1690 unlock_user (target_vec
, target_addr
, 0);
1694 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1695 int count
, int copy
)
1697 struct target_iovec
*target_vec
;
1701 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1703 return -TARGET_EFAULT
;
1704 for(i
= 0;i
< count
; i
++) {
1705 if (target_vec
[i
].iov_base
) {
1706 base
= tswapl(target_vec
[i
].iov_base
);
1707 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1710 unlock_user (target_vec
, target_addr
, 0);
1715 /* do_socket() Must return target values and target errnos. */
1716 static abi_long
do_socket(int domain
, int type
, int protocol
)
1718 #if defined(TARGET_MIPS)
1720 case TARGET_SOCK_DGRAM
:
1723 case TARGET_SOCK_STREAM
:
1726 case TARGET_SOCK_RAW
:
1729 case TARGET_SOCK_RDM
:
1732 case TARGET_SOCK_SEQPACKET
:
1733 type
= SOCK_SEQPACKET
;
1735 case TARGET_SOCK_PACKET
:
1740 if (domain
== PF_NETLINK
)
1741 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1742 return get_errno(socket(domain
, type
, protocol
));
1745 /* do_bind() Must return target values and target errnos. */
1746 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1752 if ((int)addrlen
< 0) {
1753 return -TARGET_EINVAL
;
1756 addr
= alloca(addrlen
+1);
1758 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1762 return get_errno(bind(sockfd
, addr
, addrlen
));
1765 /* do_connect() Must return target values and target errnos. */
1766 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1772 if ((int)addrlen
< 0) {
1773 return -TARGET_EINVAL
;
1776 addr
= alloca(addrlen
);
1778 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1782 return get_errno(connect(sockfd
, addr
, addrlen
));
1785 /* do_sendrecvmsg() Must return target values and target errnos. */
1786 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1787 int flags
, int send
)
1790 struct target_msghdr
*msgp
;
1794 abi_ulong target_vec
;
1797 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1801 return -TARGET_EFAULT
;
1802 if (msgp
->msg_name
) {
1803 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1804 msg
.msg_name
= alloca(msg
.msg_namelen
);
1805 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1808 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1812 msg
.msg_name
= NULL
;
1813 msg
.msg_namelen
= 0;
1815 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1816 msg
.msg_control
= alloca(msg
.msg_controllen
);
1817 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1819 count
= tswapl(msgp
->msg_iovlen
);
1820 vec
= alloca(count
* sizeof(struct iovec
));
1821 target_vec
= tswapl(msgp
->msg_iov
);
1822 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1823 msg
.msg_iovlen
= count
;
1827 ret
= target_to_host_cmsg(&msg
, msgp
);
1829 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1831 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1832 if (!is_error(ret
)) {
1834 ret
= host_to_target_cmsg(msgp
, &msg
);
1839 unlock_iovec(vec
, target_vec
, count
, !send
);
1840 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1844 /* do_accept() Must return target values and target errnos. */
1845 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1846 abi_ulong target_addrlen_addr
)
1852 if (target_addr
== 0)
1853 return get_errno(accept(fd
, NULL
, NULL
));
1855 /* linux returns EINVAL if addrlen pointer is invalid */
1856 if (get_user_u32(addrlen
, target_addrlen_addr
))
1857 return -TARGET_EINVAL
;
1859 if ((int)addrlen
< 0) {
1860 return -TARGET_EINVAL
;
1863 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1864 return -TARGET_EINVAL
;
1866 addr
= alloca(addrlen
);
1868 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1869 if (!is_error(ret
)) {
1870 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1871 if (put_user_u32(addrlen
, target_addrlen_addr
))
1872 ret
= -TARGET_EFAULT
;
1877 /* do_getpeername() Must return target values and target errnos. */
1878 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1879 abi_ulong target_addrlen_addr
)
1885 if (get_user_u32(addrlen
, target_addrlen_addr
))
1886 return -TARGET_EFAULT
;
1888 if ((int)addrlen
< 0) {
1889 return -TARGET_EINVAL
;
1892 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1893 return -TARGET_EFAULT
;
1895 addr
= alloca(addrlen
);
1897 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1898 if (!is_error(ret
)) {
1899 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1900 if (put_user_u32(addrlen
, target_addrlen_addr
))
1901 ret
= -TARGET_EFAULT
;
1906 /* do_getsockname() Must return target values and target errnos. */
1907 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1908 abi_ulong target_addrlen_addr
)
1914 if (get_user_u32(addrlen
, target_addrlen_addr
))
1915 return -TARGET_EFAULT
;
1917 if ((int)addrlen
< 0) {
1918 return -TARGET_EINVAL
;
1921 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1922 return -TARGET_EFAULT
;
1924 addr
= alloca(addrlen
);
1926 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1927 if (!is_error(ret
)) {
1928 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1929 if (put_user_u32(addrlen
, target_addrlen_addr
))
1930 ret
= -TARGET_EFAULT
;
1935 /* do_socketpair() Must return target values and target errnos. */
1936 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1937 abi_ulong target_tab_addr
)
1942 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1943 if (!is_error(ret
)) {
1944 if (put_user_s32(tab
[0], target_tab_addr
)
1945 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1946 ret
= -TARGET_EFAULT
;
1951 /* do_sendto() Must return target values and target errnos. */
1952 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1953 abi_ulong target_addr
, socklen_t addrlen
)
1959 if ((int)addrlen
< 0) {
1960 return -TARGET_EINVAL
;
1963 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1965 return -TARGET_EFAULT
;
1967 addr
= alloca(addrlen
);
1968 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1970 unlock_user(host_msg
, msg
, 0);
1973 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1975 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1977 unlock_user(host_msg
, msg
, 0);
1981 /* do_recvfrom() Must return target values and target errnos. */
1982 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1983 abi_ulong target_addr
,
1984 abi_ulong target_addrlen
)
1991 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1993 return -TARGET_EFAULT
;
1995 if (get_user_u32(addrlen
, target_addrlen
)) {
1996 ret
= -TARGET_EFAULT
;
1999 if ((int)addrlen
< 0) {
2000 ret
= -TARGET_EINVAL
;
2003 addr
= alloca(addrlen
);
2004 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2006 addr
= NULL
; /* To keep compiler quiet. */
2007 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2009 if (!is_error(ret
)) {
2011 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2012 if (put_user_u32(addrlen
, target_addrlen
)) {
2013 ret
= -TARGET_EFAULT
;
2017 unlock_user(host_msg
, msg
, len
);
2020 unlock_user(host_msg
, msg
, 0);
2025 #ifdef TARGET_NR_socketcall
2026 /* do_socketcall() Must return target values and target errnos. */
2027 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2030 const int n
= sizeof(abi_ulong
);
2035 abi_ulong domain
, type
, protocol
;
2037 if (get_user_ual(domain
, vptr
)
2038 || get_user_ual(type
, vptr
+ n
)
2039 || get_user_ual(protocol
, vptr
+ 2 * n
))
2040 return -TARGET_EFAULT
;
2042 ret
= do_socket(domain
, type
, protocol
);
2048 abi_ulong target_addr
;
2051 if (get_user_ual(sockfd
, vptr
)
2052 || get_user_ual(target_addr
, vptr
+ n
)
2053 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2054 return -TARGET_EFAULT
;
2056 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2059 case SOCKOP_connect
:
2062 abi_ulong target_addr
;
2065 if (get_user_ual(sockfd
, vptr
)
2066 || get_user_ual(target_addr
, vptr
+ n
)
2067 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2068 return -TARGET_EFAULT
;
2070 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2075 abi_ulong sockfd
, backlog
;
2077 if (get_user_ual(sockfd
, vptr
)
2078 || get_user_ual(backlog
, vptr
+ n
))
2079 return -TARGET_EFAULT
;
2081 ret
= get_errno(listen(sockfd
, backlog
));
2087 abi_ulong target_addr
, target_addrlen
;
2089 if (get_user_ual(sockfd
, vptr
)
2090 || get_user_ual(target_addr
, vptr
+ n
)
2091 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2092 return -TARGET_EFAULT
;
2094 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2097 case SOCKOP_getsockname
:
2100 abi_ulong target_addr
, target_addrlen
;
2102 if (get_user_ual(sockfd
, vptr
)
2103 || get_user_ual(target_addr
, vptr
+ n
)
2104 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2105 return -TARGET_EFAULT
;
2107 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2110 case SOCKOP_getpeername
:
2113 abi_ulong target_addr
, target_addrlen
;
2115 if (get_user_ual(sockfd
, vptr
)
2116 || get_user_ual(target_addr
, vptr
+ n
)
2117 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2118 return -TARGET_EFAULT
;
2120 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2123 case SOCKOP_socketpair
:
2125 abi_ulong domain
, type
, protocol
;
2128 if (get_user_ual(domain
, vptr
)
2129 || get_user_ual(type
, vptr
+ n
)
2130 || get_user_ual(protocol
, vptr
+ 2 * n
)
2131 || get_user_ual(tab
, vptr
+ 3 * n
))
2132 return -TARGET_EFAULT
;
2134 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2144 if (get_user_ual(sockfd
, vptr
)
2145 || get_user_ual(msg
, vptr
+ n
)
2146 || get_user_ual(len
, vptr
+ 2 * n
)
2147 || get_user_ual(flags
, vptr
+ 3 * n
))
2148 return -TARGET_EFAULT
;
2150 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2160 if (get_user_ual(sockfd
, vptr
)
2161 || get_user_ual(msg
, vptr
+ n
)
2162 || get_user_ual(len
, vptr
+ 2 * n
)
2163 || get_user_ual(flags
, vptr
+ 3 * n
))
2164 return -TARGET_EFAULT
;
2166 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2178 if (get_user_ual(sockfd
, vptr
)
2179 || get_user_ual(msg
, vptr
+ n
)
2180 || get_user_ual(len
, vptr
+ 2 * n
)
2181 || get_user_ual(flags
, vptr
+ 3 * n
)
2182 || get_user_ual(addr
, vptr
+ 4 * n
)
2183 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2184 return -TARGET_EFAULT
;
2186 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2189 case SOCKOP_recvfrom
:
2198 if (get_user_ual(sockfd
, vptr
)
2199 || get_user_ual(msg
, vptr
+ n
)
2200 || get_user_ual(len
, vptr
+ 2 * n
)
2201 || get_user_ual(flags
, vptr
+ 3 * n
)
2202 || get_user_ual(addr
, vptr
+ 4 * n
)
2203 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2204 return -TARGET_EFAULT
;
2206 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2209 case SOCKOP_shutdown
:
2211 abi_ulong sockfd
, how
;
2213 if (get_user_ual(sockfd
, vptr
)
2214 || get_user_ual(how
, vptr
+ n
))
2215 return -TARGET_EFAULT
;
2217 ret
= get_errno(shutdown(sockfd
, how
));
2220 case SOCKOP_sendmsg
:
2221 case SOCKOP_recvmsg
:
2224 abi_ulong target_msg
;
2227 if (get_user_ual(fd
, vptr
)
2228 || get_user_ual(target_msg
, vptr
+ n
)
2229 || get_user_ual(flags
, vptr
+ 2 * n
))
2230 return -TARGET_EFAULT
;
2232 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2233 (num
== SOCKOP_sendmsg
));
2236 case SOCKOP_setsockopt
:
2244 if (get_user_ual(sockfd
, vptr
)
2245 || get_user_ual(level
, vptr
+ n
)
2246 || get_user_ual(optname
, vptr
+ 2 * n
)
2247 || get_user_ual(optval
, vptr
+ 3 * n
)
2248 || get_user_ual(optlen
, vptr
+ 4 * n
))
2249 return -TARGET_EFAULT
;
2251 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2254 case SOCKOP_getsockopt
:
2262 if (get_user_ual(sockfd
, vptr
)
2263 || get_user_ual(level
, vptr
+ n
)
2264 || get_user_ual(optname
, vptr
+ 2 * n
)
2265 || get_user_ual(optval
, vptr
+ 3 * n
)
2266 || get_user_ual(optlen
, vptr
+ 4 * n
))
2267 return -TARGET_EFAULT
;
2269 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2273 gemu_log("Unsupported socketcall: %d\n", num
);
2274 ret
= -TARGET_ENOSYS
;
2281 #define N_SHM_REGIONS 32
2283 static struct shm_region
{
2286 } shm_regions
[N_SHM_REGIONS
];
2288 struct target_ipc_perm
2295 unsigned short int mode
;
2296 unsigned short int __pad1
;
2297 unsigned short int __seq
;
2298 unsigned short int __pad2
;
2299 abi_ulong __unused1
;
2300 abi_ulong __unused2
;
2303 struct target_semid_ds
2305 struct target_ipc_perm sem_perm
;
2306 abi_ulong sem_otime
;
2307 abi_ulong __unused1
;
2308 abi_ulong sem_ctime
;
2309 abi_ulong __unused2
;
2310 abi_ulong sem_nsems
;
2311 abi_ulong __unused3
;
2312 abi_ulong __unused4
;
2315 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2316 abi_ulong target_addr
)
2318 struct target_ipc_perm
*target_ip
;
2319 struct target_semid_ds
*target_sd
;
2321 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2322 return -TARGET_EFAULT
;
2323 target_ip
= &(target_sd
->sem_perm
);
2324 host_ip
->__key
= tswapl(target_ip
->__key
);
2325 host_ip
->uid
= tswapl(target_ip
->uid
);
2326 host_ip
->gid
= tswapl(target_ip
->gid
);
2327 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2328 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2329 host_ip
->mode
= tswapl(target_ip
->mode
);
2330 unlock_user_struct(target_sd
, target_addr
, 0);
2334 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2335 struct ipc_perm
*host_ip
)
2337 struct target_ipc_perm
*target_ip
;
2338 struct target_semid_ds
*target_sd
;
2340 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2341 return -TARGET_EFAULT
;
2342 target_ip
= &(target_sd
->sem_perm
);
2343 target_ip
->__key
= tswapl(host_ip
->__key
);
2344 target_ip
->uid
= tswapl(host_ip
->uid
);
2345 target_ip
->gid
= tswapl(host_ip
->gid
);
2346 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2347 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2348 target_ip
->mode
= tswapl(host_ip
->mode
);
2349 unlock_user_struct(target_sd
, target_addr
, 1);
2353 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2354 abi_ulong target_addr
)
2356 struct target_semid_ds
*target_sd
;
2358 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2359 return -TARGET_EFAULT
;
2360 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2361 return -TARGET_EFAULT
;
2362 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2363 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2364 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2365 unlock_user_struct(target_sd
, target_addr
, 0);
2369 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2370 struct semid_ds
*host_sd
)
2372 struct target_semid_ds
*target_sd
;
2374 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2375 return -TARGET_EFAULT
;
2376 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2377 return -TARGET_EFAULT
;;
2378 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2379 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2380 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2381 unlock_user_struct(target_sd
, target_addr
, 1);
2385 struct target_seminfo
{
2398 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2399 struct seminfo
*host_seminfo
)
2401 struct target_seminfo
*target_seminfo
;
2402 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2403 return -TARGET_EFAULT
;
2404 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2405 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2406 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2407 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2408 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2409 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2410 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2411 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2412 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2413 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2414 unlock_user_struct(target_seminfo
, target_addr
, 1);
2420 struct semid_ds
*buf
;
2421 unsigned short *array
;
2422 struct seminfo
*__buf
;
2425 union target_semun
{
2432 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2433 abi_ulong target_addr
)
2436 unsigned short *array
;
2438 struct semid_ds semid_ds
;
2441 semun
.buf
= &semid_ds
;
2443 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2445 return get_errno(ret
);
2447 nsems
= semid_ds
.sem_nsems
;
2449 *host_array
= malloc(nsems
*sizeof(unsigned short));
2450 array
= lock_user(VERIFY_READ
, target_addr
,
2451 nsems
*sizeof(unsigned short), 1);
2453 return -TARGET_EFAULT
;
2455 for(i
=0; i
<nsems
; i
++) {
2456 __get_user((*host_array
)[i
], &array
[i
]);
2458 unlock_user(array
, target_addr
, 0);
2463 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2464 unsigned short **host_array
)
2467 unsigned short *array
;
2469 struct semid_ds semid_ds
;
2472 semun
.buf
= &semid_ds
;
2474 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2476 return get_errno(ret
);
2478 nsems
= semid_ds
.sem_nsems
;
2480 array
= lock_user(VERIFY_WRITE
, target_addr
,
2481 nsems
*sizeof(unsigned short), 0);
2483 return -TARGET_EFAULT
;
2485 for(i
=0; i
<nsems
; i
++) {
2486 __put_user((*host_array
)[i
], &array
[i
]);
2489 unlock_user(array
, target_addr
, 1);
2494 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2495 union target_semun target_su
)
2498 struct semid_ds dsarg
;
2499 unsigned short *array
= NULL
;
2500 struct seminfo seminfo
;
2501 abi_long ret
= -TARGET_EINVAL
;
2508 arg
.val
= tswapl(target_su
.val
);
2509 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2510 target_su
.val
= tswapl(arg
.val
);
2514 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2518 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2519 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2526 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2530 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2531 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2537 arg
.__buf
= &seminfo
;
2538 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2539 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2547 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2554 struct target_sembuf
{
2555 unsigned short sem_num
;
2560 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2561 abi_ulong target_addr
,
2564 struct target_sembuf
*target_sembuf
;
2567 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2568 nsops
*sizeof(struct target_sembuf
), 1);
2570 return -TARGET_EFAULT
;
2572 for(i
=0; i
<nsops
; i
++) {
2573 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2574 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2575 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2578 unlock_user(target_sembuf
, target_addr
, 0);
2583 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2585 struct sembuf sops
[nsops
];
2587 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2588 return -TARGET_EFAULT
;
2590 return semop(semid
, sops
, nsops
);
2593 struct target_msqid_ds
2595 struct target_ipc_perm msg_perm
;
2596 abi_ulong msg_stime
;
2597 #if TARGET_ABI_BITS == 32
2598 abi_ulong __unused1
;
2600 abi_ulong msg_rtime
;
2601 #if TARGET_ABI_BITS == 32
2602 abi_ulong __unused2
;
2604 abi_ulong msg_ctime
;
2605 #if TARGET_ABI_BITS == 32
2606 abi_ulong __unused3
;
2608 abi_ulong __msg_cbytes
;
2610 abi_ulong msg_qbytes
;
2611 abi_ulong msg_lspid
;
2612 abi_ulong msg_lrpid
;
2613 abi_ulong __unused4
;
2614 abi_ulong __unused5
;
2617 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2618 abi_ulong target_addr
)
2620 struct target_msqid_ds
*target_md
;
2622 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2623 return -TARGET_EFAULT
;
2624 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2625 return -TARGET_EFAULT
;
2626 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2627 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2628 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2629 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2630 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2631 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2632 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2633 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2634 unlock_user_struct(target_md
, target_addr
, 0);
2638 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2639 struct msqid_ds
*host_md
)
2641 struct target_msqid_ds
*target_md
;
2643 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2644 return -TARGET_EFAULT
;
2645 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2646 return -TARGET_EFAULT
;
2647 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2648 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2649 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2650 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2651 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2652 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2653 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2654 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2655 unlock_user_struct(target_md
, target_addr
, 1);
2659 struct target_msginfo
{
2667 unsigned short int msgseg
;
2670 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2671 struct msginfo
*host_msginfo
)
2673 struct target_msginfo
*target_msginfo
;
2674 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2675 return -TARGET_EFAULT
;
2676 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2677 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2678 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2679 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2680 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2681 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2682 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2683 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2684 unlock_user_struct(target_msginfo
, target_addr
, 1);
2688 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2690 struct msqid_ds dsarg
;
2691 struct msginfo msginfo
;
2692 abi_long ret
= -TARGET_EINVAL
;
2700 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2701 return -TARGET_EFAULT
;
2702 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2703 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2704 return -TARGET_EFAULT
;
2707 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2711 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2712 if (host_to_target_msginfo(ptr
, &msginfo
))
2713 return -TARGET_EFAULT
;
2720 struct target_msgbuf
{
2725 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2726 unsigned int msgsz
, int msgflg
)
2728 struct target_msgbuf
*target_mb
;
2729 struct msgbuf
*host_mb
;
2732 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2733 return -TARGET_EFAULT
;
2734 host_mb
= malloc(msgsz
+sizeof(long));
2735 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2736 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2737 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2739 unlock_user_struct(target_mb
, msgp
, 0);
2744 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2745 unsigned int msgsz
, abi_long msgtyp
,
2748 struct target_msgbuf
*target_mb
;
2750 struct msgbuf
*host_mb
;
2753 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2754 return -TARGET_EFAULT
;
2756 host_mb
= malloc(msgsz
+sizeof(long));
2757 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2760 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2761 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2762 if (!target_mtext
) {
2763 ret
= -TARGET_EFAULT
;
2766 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2767 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2770 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2775 unlock_user_struct(target_mb
, msgp
, 1);
2779 struct target_shmid_ds
2781 struct target_ipc_perm shm_perm
;
2782 abi_ulong shm_segsz
;
2783 abi_ulong shm_atime
;
2784 #if TARGET_ABI_BITS == 32
2785 abi_ulong __unused1
;
2787 abi_ulong shm_dtime
;
2788 #if TARGET_ABI_BITS == 32
2789 abi_ulong __unused2
;
2791 abi_ulong shm_ctime
;
2792 #if TARGET_ABI_BITS == 32
2793 abi_ulong __unused3
;
2797 abi_ulong shm_nattch
;
2798 unsigned long int __unused4
;
2799 unsigned long int __unused5
;
2802 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2803 abi_ulong target_addr
)
2805 struct target_shmid_ds
*target_sd
;
2807 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2808 return -TARGET_EFAULT
;
2809 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2810 return -TARGET_EFAULT
;
2811 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2812 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2813 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2814 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2815 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2816 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2817 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2818 unlock_user_struct(target_sd
, target_addr
, 0);
2822 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2823 struct shmid_ds
*host_sd
)
2825 struct target_shmid_ds
*target_sd
;
2827 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2828 return -TARGET_EFAULT
;
2829 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2830 return -TARGET_EFAULT
;
2831 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2832 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2833 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2834 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2835 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2836 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2837 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2838 unlock_user_struct(target_sd
, target_addr
, 1);
2842 struct target_shminfo
{
2850 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2851 struct shminfo
*host_shminfo
)
2853 struct target_shminfo
*target_shminfo
;
2854 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2855 return -TARGET_EFAULT
;
2856 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2857 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2858 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2859 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2860 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2861 unlock_user_struct(target_shminfo
, target_addr
, 1);
2865 struct target_shm_info
{
2870 abi_ulong swap_attempts
;
2871 abi_ulong swap_successes
;
2874 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2875 struct shm_info
*host_shm_info
)
2877 struct target_shm_info
*target_shm_info
;
2878 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2879 return -TARGET_EFAULT
;
2880 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2881 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2882 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2883 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2884 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2885 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2886 unlock_user_struct(target_shm_info
, target_addr
, 1);
2890 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2892 struct shmid_ds dsarg
;
2893 struct shminfo shminfo
;
2894 struct shm_info shm_info
;
2895 abi_long ret
= -TARGET_EINVAL
;
2903 if (target_to_host_shmid_ds(&dsarg
, buf
))
2904 return -TARGET_EFAULT
;
2905 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2906 if (host_to_target_shmid_ds(buf
, &dsarg
))
2907 return -TARGET_EFAULT
;
2910 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2911 if (host_to_target_shminfo(buf
, &shminfo
))
2912 return -TARGET_EFAULT
;
2915 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2916 if (host_to_target_shm_info(buf
, &shm_info
))
2917 return -TARGET_EFAULT
;
2922 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2929 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2933 struct shmid_ds shm_info
;
2936 /* find out the length of the shared memory segment */
2937 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2938 if (is_error(ret
)) {
2939 /* can't get length, bail out */
2946 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2948 abi_ulong mmap_start
;
2950 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2952 if (mmap_start
== -1) {
2954 host_raddr
= (void *)-1;
2956 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2959 if (host_raddr
== (void *)-1) {
2961 return get_errno((long)host_raddr
);
2963 raddr
=h2g((unsigned long)host_raddr
);
2965 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2966 PAGE_VALID
| PAGE_READ
|
2967 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2969 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2970 if (shm_regions
[i
].start
== 0) {
2971 shm_regions
[i
].start
= raddr
;
2972 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2982 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2986 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2987 if (shm_regions
[i
].start
== shmaddr
) {
2988 shm_regions
[i
].start
= 0;
2989 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
2994 return get_errno(shmdt(g2h(shmaddr
)));
2997 #ifdef TARGET_NR_ipc
2998 /* ??? This only works with linear mappings. */
2999 /* do_ipc() must return target values and target errnos. */
3000 static abi_long
do_ipc(unsigned int call
, int first
,
3001 int second
, int third
,
3002 abi_long ptr
, abi_long fifth
)
3007 version
= call
>> 16;
3012 ret
= do_semop(first
, ptr
, second
);
3016 ret
= get_errno(semget(first
, second
, third
));
3020 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3024 ret
= get_errno(msgget(first
, second
));
3028 ret
= do_msgsnd(first
, ptr
, second
, third
);
3032 ret
= do_msgctl(first
, second
, ptr
);
3039 struct target_ipc_kludge
{
3044 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3045 ret
= -TARGET_EFAULT
;
3049 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
3051 unlock_user_struct(tmp
, ptr
, 0);
3055 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3064 raddr
= do_shmat(first
, ptr
, second
);
3065 if (is_error(raddr
))
3066 return get_errno(raddr
);
3067 if (put_user_ual(raddr
, third
))
3068 return -TARGET_EFAULT
;
3072 ret
= -TARGET_EINVAL
;
3077 ret
= do_shmdt(ptr
);
3081 /* IPC_* flag values are the same on all linux platforms */
3082 ret
= get_errno(shmget(first
, second
, third
));
3085 /* IPC_* and SHM_* command values are the same on all linux platforms */
3087 ret
= do_shmctl(first
, second
, third
);
3090 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3091 ret
= -TARGET_ENOSYS
;
3098 /* kernel structure types definitions */
3100 #define STRUCT(name, ...) STRUCT_ ## name,
3101 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3103 #include "syscall_types.h"
3106 #undef STRUCT_SPECIAL
3108 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3109 #define STRUCT_SPECIAL(name)
3110 #include "syscall_types.h"
3112 #undef STRUCT_SPECIAL
3114 typedef struct IOCTLEntry IOCTLEntry
;
3116 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3117 int fd
, abi_long cmd
, abi_long arg
);
3120 unsigned int target_cmd
;
3121 unsigned int host_cmd
;
3124 do_ioctl_fn
*do_ioctl
;
3125 const argtype arg_type
[5];
3128 #define IOC_R 0x0001
3129 #define IOC_W 0x0002
3130 #define IOC_RW (IOC_R | IOC_W)
3132 #define MAX_STRUCT_SIZE 4096
3134 #ifdef CONFIG_FIEMAP
3135 /* So fiemap access checks don't overflow on 32 bit systems.
3136 * This is very slightly smaller than the limit imposed by
3137 * the underlying kernel.
3139 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3140 / sizeof(struct fiemap_extent))
3142 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3143 int fd
, abi_long cmd
, abi_long arg
)
3145 /* The parameter for this ioctl is a struct fiemap followed
3146 * by an array of struct fiemap_extent whose size is set
3147 * in fiemap->fm_extent_count. The array is filled in by the
3150 int target_size_in
, target_size_out
;
3152 const argtype
*arg_type
= ie
->arg_type
;
3153 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3156 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3160 assert(arg_type
[0] == TYPE_PTR
);
3161 assert(ie
->access
== IOC_RW
);
3163 target_size_in
= thunk_type_size(arg_type
, 0);
3164 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3166 return -TARGET_EFAULT
;
3168 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3169 unlock_user(argptr
, arg
, 0);
3170 fm
= (struct fiemap
*)buf_temp
;
3171 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3172 return -TARGET_EINVAL
;
3175 outbufsz
= sizeof (*fm
) +
3176 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3178 if (outbufsz
> MAX_STRUCT_SIZE
) {
3179 /* We can't fit all the extents into the fixed size buffer.
3180 * Allocate one that is large enough and use it instead.
3182 fm
= malloc(outbufsz
);
3184 return -TARGET_ENOMEM
;
3186 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3189 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3190 if (!is_error(ret
)) {
3191 target_size_out
= target_size_in
;
3192 /* An extent_count of 0 means we were only counting the extents
3193 * so there are no structs to copy
3195 if (fm
->fm_extent_count
!= 0) {
3196 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3198 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3200 ret
= -TARGET_EFAULT
;
3202 /* Convert the struct fiemap */
3203 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3204 if (fm
->fm_extent_count
!= 0) {
3205 p
= argptr
+ target_size_in
;
3206 /* ...and then all the struct fiemap_extents */
3207 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3208 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3213 unlock_user(argptr
, arg
, target_size_out
);
3223 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3224 int fd
, abi_long cmd
, abi_long arg
)
3226 const argtype
*arg_type
= ie
->arg_type
;
3230 struct ifconf
*host_ifconf
;
3232 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3233 int target_ifreq_size
;
3238 abi_long target_ifc_buf
;
3242 assert(arg_type
[0] == TYPE_PTR
);
3243 assert(ie
->access
== IOC_RW
);
3246 target_size
= thunk_type_size(arg_type
, 0);
3248 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3250 return -TARGET_EFAULT
;
3251 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3252 unlock_user(argptr
, arg
, 0);
3254 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3255 target_ifc_len
= host_ifconf
->ifc_len
;
3256 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3258 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3259 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3260 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3262 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3263 if (outbufsz
> MAX_STRUCT_SIZE
) {
3264 /* We can't fit all the extents into the fixed size buffer.
3265 * Allocate one that is large enough and use it instead.
3267 host_ifconf
= malloc(outbufsz
);
3269 return -TARGET_ENOMEM
;
3271 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3274 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3276 host_ifconf
->ifc_len
= host_ifc_len
;
3277 host_ifconf
->ifc_buf
= host_ifc_buf
;
3279 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3280 if (!is_error(ret
)) {
3281 /* convert host ifc_len to target ifc_len */
3283 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3284 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3285 host_ifconf
->ifc_len
= target_ifc_len
;
3287 /* restore target ifc_buf */
3289 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3291 /* copy struct ifconf to target user */
3293 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3295 return -TARGET_EFAULT
;
3296 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3297 unlock_user(argptr
, arg
, target_size
);
3299 /* copy ifreq[] to target user */
3301 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3302 for (i
= 0; i
< nb_ifreq
; i
++) {
3303 thunk_convert(argptr
+ i
* target_ifreq_size
,
3304 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3305 ifreq_arg_type
, THUNK_TARGET
);
3307 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3317 static IOCTLEntry ioctl_entries
[] = {
3318 #define IOCTL(cmd, access, ...) \
3319 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3320 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3321 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3326 /* ??? Implement proper locking for ioctls. */
3327 /* do_ioctl() Must return target values and target errnos. */
3328 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3330 const IOCTLEntry
*ie
;
3331 const argtype
*arg_type
;
3333 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3339 if (ie
->target_cmd
== 0) {
3340 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3341 return -TARGET_ENOSYS
;
3343 if (ie
->target_cmd
== cmd
)
3347 arg_type
= ie
->arg_type
;
3349 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3352 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3355 switch(arg_type
[0]) {
3358 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3363 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3367 target_size
= thunk_type_size(arg_type
, 0);
3368 switch(ie
->access
) {
3370 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3371 if (!is_error(ret
)) {
3372 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3374 return -TARGET_EFAULT
;
3375 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3376 unlock_user(argptr
, arg
, target_size
);
3380 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3382 return -TARGET_EFAULT
;
3383 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3384 unlock_user(argptr
, arg
, 0);
3385 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3389 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3391 return -TARGET_EFAULT
;
3392 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3393 unlock_user(argptr
, arg
, 0);
3394 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3395 if (!is_error(ret
)) {
3396 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3398 return -TARGET_EFAULT
;
3399 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3400 unlock_user(argptr
, arg
, target_size
);
3406 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3407 (long)cmd
, arg_type
[0]);
3408 ret
= -TARGET_ENOSYS
;
3414 static const bitmask_transtbl iflag_tbl
[] = {
3415 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3416 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3417 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3418 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3419 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3420 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3421 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3422 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3423 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3424 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3425 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3426 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3427 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3428 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3432 static const bitmask_transtbl oflag_tbl
[] = {
3433 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3434 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3435 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3436 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3437 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3438 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3439 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3440 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3441 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3442 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3443 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3444 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3445 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3446 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3447 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3448 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3449 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3450 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3451 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3452 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3453 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3454 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3455 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3456 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3460 static const bitmask_transtbl cflag_tbl
[] = {
3461 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3462 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3463 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3464 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3465 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3466 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3467 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3468 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3469 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3470 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3471 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3472 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3473 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3474 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3475 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3476 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3477 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3478 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3479 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3480 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3481 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3482 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3483 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3484 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3485 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3486 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3487 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3488 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3489 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3490 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3491 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3495 static const bitmask_transtbl lflag_tbl
[] = {
3496 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3497 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3498 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3499 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3500 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3501 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3502 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3503 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3504 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3505 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3506 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3507 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3508 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3509 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3510 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3514 static void target_to_host_termios (void *dst
, const void *src
)
3516 struct host_termios
*host
= dst
;
3517 const struct target_termios
*target
= src
;
3520 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3522 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3524 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3526 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3527 host
->c_line
= target
->c_line
;
3529 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3530 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3531 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3532 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3533 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3534 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3535 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3536 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3537 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3538 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3539 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3540 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3541 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3542 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3543 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3544 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3545 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3546 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3549 static void host_to_target_termios (void *dst
, const void *src
)
3551 struct target_termios
*target
= dst
;
3552 const struct host_termios
*host
= src
;
3555 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3557 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3559 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3561 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3562 target
->c_line
= host
->c_line
;
3564 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3565 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3566 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3567 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3568 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3569 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3570 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3571 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3572 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3573 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3574 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3575 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3576 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3577 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3578 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3579 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3580 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3581 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3584 static const StructEntry struct_termios_def
= {
3585 .convert
= { host_to_target_termios
, target_to_host_termios
},
3586 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3587 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3590 static bitmask_transtbl mmap_flags_tbl
[] = {
3591 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3592 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3593 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3594 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3595 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3596 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3597 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3598 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3602 #if defined(TARGET_I386)
3604 /* NOTE: there is really one LDT for all the threads */
3605 static uint8_t *ldt_table
;
3607 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3614 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3615 if (size
> bytecount
)
3617 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3619 return -TARGET_EFAULT
;
3620 /* ??? Should this by byteswapped? */
3621 memcpy(p
, ldt_table
, size
);
3622 unlock_user(p
, ptr
, size
);
3626 /* XXX: add locking support */
3627 static abi_long
write_ldt(CPUX86State
*env
,
3628 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3630 struct target_modify_ldt_ldt_s ldt_info
;
3631 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3632 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3633 int seg_not_present
, useable
, lm
;
3634 uint32_t *lp
, entry_1
, entry_2
;
3636 if (bytecount
!= sizeof(ldt_info
))
3637 return -TARGET_EINVAL
;
3638 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3639 return -TARGET_EFAULT
;
3640 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3641 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3642 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3643 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3644 unlock_user_struct(target_ldt_info
, ptr
, 0);
3646 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3647 return -TARGET_EINVAL
;
3648 seg_32bit
= ldt_info
.flags
& 1;
3649 contents
= (ldt_info
.flags
>> 1) & 3;
3650 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3651 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3652 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3653 useable
= (ldt_info
.flags
>> 6) & 1;
3657 lm
= (ldt_info
.flags
>> 7) & 1;
3659 if (contents
== 3) {
3661 return -TARGET_EINVAL
;
3662 if (seg_not_present
== 0)
3663 return -TARGET_EINVAL
;
3665 /* allocate the LDT */
3667 env
->ldt
.base
= target_mmap(0,
3668 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3669 PROT_READ
|PROT_WRITE
,
3670 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3671 if (env
->ldt
.base
== -1)
3672 return -TARGET_ENOMEM
;
3673 memset(g2h(env
->ldt
.base
), 0,
3674 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3675 env
->ldt
.limit
= 0xffff;
3676 ldt_table
= g2h(env
->ldt
.base
);
3679 /* NOTE: same code as Linux kernel */
3680 /* Allow LDTs to be cleared by the user. */
3681 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3684 read_exec_only
== 1 &&
3686 limit_in_pages
== 0 &&
3687 seg_not_present
== 1 &&
3695 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3696 (ldt_info
.limit
& 0x0ffff);
3697 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3698 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3699 (ldt_info
.limit
& 0xf0000) |
3700 ((read_exec_only
^ 1) << 9) |
3702 ((seg_not_present
^ 1) << 15) |
3704 (limit_in_pages
<< 23) |
3708 entry_2
|= (useable
<< 20);
3710 /* Install the new entry ... */
3712 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3713 lp
[0] = tswap32(entry_1
);
3714 lp
[1] = tswap32(entry_2
);
3718 /* specific and weird i386 syscalls */
3719 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3720 unsigned long bytecount
)
3726 ret
= read_ldt(ptr
, bytecount
);
3729 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3732 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3735 ret
= -TARGET_ENOSYS
;
3741 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3742 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3744 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3745 struct target_modify_ldt_ldt_s ldt_info
;
3746 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3747 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3748 int seg_not_present
, useable
, lm
;
3749 uint32_t *lp
, entry_1
, entry_2
;
3752 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3753 if (!target_ldt_info
)
3754 return -TARGET_EFAULT
;
3755 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3756 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3757 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3758 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3759 if (ldt_info
.entry_number
== -1) {
3760 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3761 if (gdt_table
[i
] == 0) {
3762 ldt_info
.entry_number
= i
;
3763 target_ldt_info
->entry_number
= tswap32(i
);
3768 unlock_user_struct(target_ldt_info
, ptr
, 1);
3770 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3771 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3772 return -TARGET_EINVAL
;
3773 seg_32bit
= ldt_info
.flags
& 1;
3774 contents
= (ldt_info
.flags
>> 1) & 3;
3775 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3776 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3777 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3778 useable
= (ldt_info
.flags
>> 6) & 1;
3782 lm
= (ldt_info
.flags
>> 7) & 1;
3785 if (contents
== 3) {
3786 if (seg_not_present
== 0)
3787 return -TARGET_EINVAL
;
3790 /* NOTE: same code as Linux kernel */
3791 /* Allow LDTs to be cleared by the user. */
3792 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3793 if ((contents
== 0 &&
3794 read_exec_only
== 1 &&
3796 limit_in_pages
== 0 &&
3797 seg_not_present
== 1 &&
3805 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3806 (ldt_info
.limit
& 0x0ffff);
3807 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3808 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3809 (ldt_info
.limit
& 0xf0000) |
3810 ((read_exec_only
^ 1) << 9) |
3812 ((seg_not_present
^ 1) << 15) |
3814 (limit_in_pages
<< 23) |
3819 /* Install the new entry ... */
3821 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3822 lp
[0] = tswap32(entry_1
);
3823 lp
[1] = tswap32(entry_2
);
3827 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3829 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3830 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3831 uint32_t base_addr
, limit
, flags
;
3832 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3833 int seg_not_present
, useable
, lm
;
3834 uint32_t *lp
, entry_1
, entry_2
;
3836 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3837 if (!target_ldt_info
)
3838 return -TARGET_EFAULT
;
3839 idx
= tswap32(target_ldt_info
->entry_number
);
3840 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3841 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3842 unlock_user_struct(target_ldt_info
, ptr
, 1);
3843 return -TARGET_EINVAL
;
3845 lp
= (uint32_t *)(gdt_table
+ idx
);
3846 entry_1
= tswap32(lp
[0]);
3847 entry_2
= tswap32(lp
[1]);
3849 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3850 contents
= (entry_2
>> 10) & 3;
3851 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3852 seg_32bit
= (entry_2
>> 22) & 1;
3853 limit_in_pages
= (entry_2
>> 23) & 1;
3854 useable
= (entry_2
>> 20) & 1;
3858 lm
= (entry_2
>> 21) & 1;
3860 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3861 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3862 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3863 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3864 base_addr
= (entry_1
>> 16) |
3865 (entry_2
& 0xff000000) |
3866 ((entry_2
& 0xff) << 16);
3867 target_ldt_info
->base_addr
= tswapl(base_addr
);
3868 target_ldt_info
->limit
= tswap32(limit
);
3869 target_ldt_info
->flags
= tswap32(flags
);
3870 unlock_user_struct(target_ldt_info
, ptr
, 1);
3873 #endif /* TARGET_I386 && TARGET_ABI32 */
3875 #ifndef TARGET_ABI32
3876 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3883 case TARGET_ARCH_SET_GS
:
3884 case TARGET_ARCH_SET_FS
:
3885 if (code
== TARGET_ARCH_SET_GS
)
3889 cpu_x86_load_seg(env
, idx
, 0);
3890 env
->segs
[idx
].base
= addr
;
3892 case TARGET_ARCH_GET_GS
:
3893 case TARGET_ARCH_GET_FS
:
3894 if (code
== TARGET_ARCH_GET_GS
)
3898 val
= env
->segs
[idx
].base
;
3899 if (put_user(val
, addr
, abi_ulong
))
3900 ret
= -TARGET_EFAULT
;
3903 ret
= -TARGET_EINVAL
;
3910 #endif /* defined(TARGET_I386) */
3912 #define NEW_STACK_SIZE 0x40000
3914 #if defined(CONFIG_USE_NPTL)
3916 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3919 pthread_mutex_t mutex
;
3920 pthread_cond_t cond
;
3923 abi_ulong child_tidptr
;
3924 abi_ulong parent_tidptr
;
3928 static void *clone_func(void *arg
)
3930 new_thread_info
*info
= arg
;
3936 ts
= (TaskState
*)thread_env
->opaque
;
3937 info
->tid
= gettid();
3938 env
->host_tid
= info
->tid
;
3940 if (info
->child_tidptr
)
3941 put_user_u32(info
->tid
, info
->child_tidptr
);
3942 if (info
->parent_tidptr
)
3943 put_user_u32(info
->tid
, info
->parent_tidptr
);
3944 /* Enable signals. */
3945 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3946 /* Signal to the parent that we're ready. */
3947 pthread_mutex_lock(&info
->mutex
);
3948 pthread_cond_broadcast(&info
->cond
);
3949 pthread_mutex_unlock(&info
->mutex
);
3950 /* Wait until the parent has finshed initializing the tls state. */
3951 pthread_mutex_lock(&clone_lock
);
3952 pthread_mutex_unlock(&clone_lock
);
3959 static int clone_func(void *arg
)
3961 CPUState
*env
= arg
;
3968 /* do_fork() Must return host values and target errnos (unlike most
3969 do_*() functions). */
3970 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3971 abi_ulong parent_tidptr
, target_ulong newtls
,
3972 abi_ulong child_tidptr
)
3977 #if defined(CONFIG_USE_NPTL)
3978 unsigned int nptl_flags
;
3984 /* Emulate vfork() with fork() */
3985 if (flags
& CLONE_VFORK
)
3986 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3988 if (flags
& CLONE_VM
) {
3989 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3990 #if defined(CONFIG_USE_NPTL)
3991 new_thread_info info
;
3992 pthread_attr_t attr
;
3994 ts
= qemu_mallocz(sizeof(TaskState
));
3995 init_task_state(ts
);
3996 /* we create a new CPU instance. */
3997 new_env
= cpu_copy(env
);
3998 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4001 /* Init regs that differ from the parent. */
4002 cpu_clone_regs(new_env
, newsp
);
4003 new_env
->opaque
= ts
;
4004 ts
->bprm
= parent_ts
->bprm
;
4005 ts
->info
= parent_ts
->info
;
4006 #if defined(CONFIG_USE_NPTL)
4008 flags
&= ~CLONE_NPTL_FLAGS2
;
4010 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4011 ts
->child_tidptr
= child_tidptr
;
4014 if (nptl_flags
& CLONE_SETTLS
)
4015 cpu_set_tls (new_env
, newtls
);
4017 /* Grab a mutex so that thread setup appears atomic. */
4018 pthread_mutex_lock(&clone_lock
);
4020 memset(&info
, 0, sizeof(info
));
4021 pthread_mutex_init(&info
.mutex
, NULL
);
4022 pthread_mutex_lock(&info
.mutex
);
4023 pthread_cond_init(&info
.cond
, NULL
);
4025 if (nptl_flags
& CLONE_CHILD_SETTID
)
4026 info
.child_tidptr
= child_tidptr
;
4027 if (nptl_flags
& CLONE_PARENT_SETTID
)
4028 info
.parent_tidptr
= parent_tidptr
;
4030 ret
= pthread_attr_init(&attr
);
4031 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4032 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4033 /* It is not safe to deliver signals until the child has finished
4034 initializing, so temporarily block all signals. */
4035 sigfillset(&sigmask
);
4036 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4038 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4039 /* TODO: Free new CPU state if thread creation failed. */
4041 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4042 pthread_attr_destroy(&attr
);
4044 /* Wait for the child to initialize. */
4045 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4047 if (flags
& CLONE_PARENT_SETTID
)
4048 put_user_u32(ret
, parent_tidptr
);
4052 pthread_mutex_unlock(&info
.mutex
);
4053 pthread_cond_destroy(&info
.cond
);
4054 pthread_mutex_destroy(&info
.mutex
);
4055 pthread_mutex_unlock(&clone_lock
);
4057 if (flags
& CLONE_NPTL_FLAGS2
)
4059 /* This is probably going to die very quickly, but do it anyway. */
4060 new_stack
= qemu_mallocz (NEW_STACK_SIZE
);
4062 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4064 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4068 /* if no CLONE_VM, we consider it is a fork */
4069 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4074 /* Child Process. */
4075 cpu_clone_regs(env
, newsp
);
4077 #if defined(CONFIG_USE_NPTL)
4078 /* There is a race condition here. The parent process could
4079 theoretically read the TID in the child process before the child
4080 tid is set. This would require using either ptrace
4081 (not implemented) or having *_tidptr to point at a shared memory
4082 mapping. We can't repeat the spinlock hack used above because
4083 the child process gets its own copy of the lock. */
4084 if (flags
& CLONE_CHILD_SETTID
)
4085 put_user_u32(gettid(), child_tidptr
);
4086 if (flags
& CLONE_PARENT_SETTID
)
4087 put_user_u32(gettid(), parent_tidptr
);
4088 ts
= (TaskState
*)env
->opaque
;
4089 if (flags
& CLONE_SETTLS
)
4090 cpu_set_tls (env
, newtls
);
4091 if (flags
& CLONE_CHILD_CLEARTID
)
4092 ts
->child_tidptr
= child_tidptr
;
4101 /* warning : doesn't handle linux specific flags... */
4102 static int target_to_host_fcntl_cmd(int cmd
)
4105 case TARGET_F_DUPFD
:
4106 case TARGET_F_GETFD
:
4107 case TARGET_F_SETFD
:
4108 case TARGET_F_GETFL
:
4109 case TARGET_F_SETFL
:
4111 case TARGET_F_GETLK
:
4113 case TARGET_F_SETLK
:
4115 case TARGET_F_SETLKW
:
4117 case TARGET_F_GETOWN
:
4119 case TARGET_F_SETOWN
:
4121 case TARGET_F_GETSIG
:
4123 case TARGET_F_SETSIG
:
4125 #if TARGET_ABI_BITS == 32
4126 case TARGET_F_GETLK64
:
4128 case TARGET_F_SETLK64
:
4130 case TARGET_F_SETLKW64
:
4133 case TARGET_F_SETLEASE
:
4135 case TARGET_F_GETLEASE
:
4137 #ifdef F_DUPFD_CLOEXEC
4138 case TARGET_F_DUPFD_CLOEXEC
:
4139 return F_DUPFD_CLOEXEC
;
4141 case TARGET_F_NOTIFY
:
4144 return -TARGET_EINVAL
;
4146 return -TARGET_EINVAL
;
4149 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4152 struct target_flock
*target_fl
;
4153 struct flock64 fl64
;
4154 struct target_flock64
*target_fl64
;
4156 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4158 if (host_cmd
== -TARGET_EINVAL
)
4162 case TARGET_F_GETLK
:
4163 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4164 return -TARGET_EFAULT
;
4165 fl
.l_type
= tswap16(target_fl
->l_type
);
4166 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4167 fl
.l_start
= tswapl(target_fl
->l_start
);
4168 fl
.l_len
= tswapl(target_fl
->l_len
);
4169 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4170 unlock_user_struct(target_fl
, arg
, 0);
4171 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4173 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4174 return -TARGET_EFAULT
;
4175 target_fl
->l_type
= tswap16(fl
.l_type
);
4176 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4177 target_fl
->l_start
= tswapl(fl
.l_start
);
4178 target_fl
->l_len
= tswapl(fl
.l_len
);
4179 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4180 unlock_user_struct(target_fl
, arg
, 1);
4184 case TARGET_F_SETLK
:
4185 case TARGET_F_SETLKW
:
4186 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4187 return -TARGET_EFAULT
;
4188 fl
.l_type
= tswap16(target_fl
->l_type
);
4189 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4190 fl
.l_start
= tswapl(target_fl
->l_start
);
4191 fl
.l_len
= tswapl(target_fl
->l_len
);
4192 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4193 unlock_user_struct(target_fl
, arg
, 0);
4194 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4197 case TARGET_F_GETLK64
:
4198 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4199 return -TARGET_EFAULT
;
4200 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4201 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4202 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4203 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4204 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4205 unlock_user_struct(target_fl64
, arg
, 0);
4206 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4208 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4209 return -TARGET_EFAULT
;
4210 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4211 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4212 target_fl64
->l_start
= tswapl(fl64
.l_start
);
4213 target_fl64
->l_len
= tswapl(fl64
.l_len
);
4214 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4215 unlock_user_struct(target_fl64
, arg
, 1);
4218 case TARGET_F_SETLK64
:
4219 case TARGET_F_SETLKW64
:
4220 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4221 return -TARGET_EFAULT
;
4222 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4223 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4224 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4225 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4226 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4227 unlock_user_struct(target_fl64
, arg
, 0);
4228 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4231 case TARGET_F_GETFL
:
4232 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4234 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4238 case TARGET_F_SETFL
:
4239 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4242 case TARGET_F_SETOWN
:
4243 case TARGET_F_GETOWN
:
4244 case TARGET_F_SETSIG
:
4245 case TARGET_F_GETSIG
:
4246 case TARGET_F_SETLEASE
:
4247 case TARGET_F_GETLEASE
:
4248 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4252 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4260 static inline int high2lowuid(int uid
)
4268 static inline int high2lowgid(int gid
)
4276 static inline int low2highuid(int uid
)
4278 if ((int16_t)uid
== -1)
4284 static inline int low2highgid(int gid
)
4286 if ((int16_t)gid
== -1)
4291 static inline int tswapid(int id
)
4295 #else /* !USE_UID16 */
4296 static inline int high2lowuid(int uid
)
4300 static inline int high2lowgid(int gid
)
4304 static inline int low2highuid(int uid
)
4308 static inline int low2highgid(int gid
)
4312 static inline int tswapid(int id
)
4316 #endif /* USE_UID16 */
4318 void syscall_init(void)
4321 const argtype
*arg_type
;
4325 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4326 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4327 #include "syscall_types.h"
4329 #undef STRUCT_SPECIAL
4331 /* we patch the ioctl size if necessary. We rely on the fact that
4332 no ioctl has all the bits at '1' in the size field */
4334 while (ie
->target_cmd
!= 0) {
4335 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4336 TARGET_IOC_SIZEMASK
) {
4337 arg_type
= ie
->arg_type
;
4338 if (arg_type
[0] != TYPE_PTR
) {
4339 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4344 size
= thunk_type_size(arg_type
, 0);
4345 ie
->target_cmd
= (ie
->target_cmd
&
4346 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4347 (size
<< TARGET_IOC_SIZESHIFT
);
4350 /* Build target_to_host_errno_table[] table from
4351 * host_to_target_errno_table[]. */
4352 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4353 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4355 /* automatic consistency check if same arch */
4356 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4357 (defined(__x86_64__) && defined(TARGET_X86_64))
4358 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4359 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4360 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4367 #if TARGET_ABI_BITS == 32
4368 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4370 #ifdef TARGET_WORDS_BIGENDIAN
4371 return ((uint64_t)word0
<< 32) | word1
;
4373 return ((uint64_t)word1
<< 32) | word0
;
4376 #else /* TARGET_ABI_BITS == 32 */
4377 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4381 #endif /* TARGET_ABI_BITS != 32 */
4383 #ifdef TARGET_NR_truncate64
4384 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4389 if (regpairs_aligned(cpu_env
)) {
4393 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4397 #ifdef TARGET_NR_ftruncate64
4398 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4403 if (regpairs_aligned(cpu_env
)) {
4407 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4411 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4412 abi_ulong target_addr
)
4414 struct target_timespec
*target_ts
;
4416 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4417 return -TARGET_EFAULT
;
4418 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
4419 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
4420 unlock_user_struct(target_ts
, target_addr
, 0);
4424 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4425 struct timespec
*host_ts
)
4427 struct target_timespec
*target_ts
;
4429 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4430 return -TARGET_EFAULT
;
4431 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
4432 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
4433 unlock_user_struct(target_ts
, target_addr
, 1);
4437 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4438 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4439 abi_ulong target_addr
,
4440 struct stat
*host_st
)
4443 if (((CPUARMState
*)cpu_env
)->eabi
) {
4444 struct target_eabi_stat64
*target_st
;
4446 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4447 return -TARGET_EFAULT
;
4448 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4449 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4450 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4451 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4452 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4454 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4455 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4456 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4457 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4458 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4459 __put_user(host_st
->st_size
, &target_st
->st_size
);
4460 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4461 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4462 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4463 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4464 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4465 unlock_user_struct(target_st
, target_addr
, 1);
4469 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4470 struct target_stat
*target_st
;
4472 struct target_stat64
*target_st
;
4475 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4476 return -TARGET_EFAULT
;
4477 memset(target_st
, 0, sizeof(*target_st
));
4478 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4479 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4480 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4481 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4483 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4484 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4485 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4486 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4487 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4488 /* XXX: better use of kernel struct */
4489 __put_user(host_st
->st_size
, &target_st
->st_size
);
4490 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4491 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4492 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4493 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4494 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4495 unlock_user_struct(target_st
, target_addr
, 1);
4502 #if defined(CONFIG_USE_NPTL)
4503 /* ??? Using host futex calls even when target atomic operations
4504 are not really atomic probably breaks things. However implementing
4505 futexes locally would make futexes shared between multiple processes
4506 tricky. However they're probably useless because guest atomic
4507 operations won't work either. */
4508 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4509 target_ulong uaddr2
, int val3
)
4511 struct timespec ts
, *pts
;
4514 /* ??? We assume FUTEX_* constants are the same on both host
4516 #ifdef FUTEX_CMD_MASK
4517 base_op
= op
& FUTEX_CMD_MASK
;
4525 target_to_host_timespec(pts
, timeout
);
4529 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4532 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4534 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4536 case FUTEX_CMP_REQUEUE
:
4538 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4539 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4540 But the prototype takes a `struct timespec *'; insert casts
4541 to satisfy the compiler. We do not need to tswap TIMEOUT
4542 since it's not compared to guest memory. */
4543 pts
= (struct timespec
*)(uintptr_t) timeout
;
4544 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4546 (base_op
== FUTEX_CMP_REQUEUE
4550 return -TARGET_ENOSYS
;
4555 /* Map host to target signal numbers for the wait family of syscalls.
4556 Assume all other status bits are the same. */
4557 static int host_to_target_waitstatus(int status
)
4559 if (WIFSIGNALED(status
)) {
4560 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4562 if (WIFSTOPPED(status
)) {
4563 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4569 int get_osversion(void)
4571 static int osversion
;
4572 struct new_utsname buf
;
4577 if (qemu_uname_release
&& *qemu_uname_release
) {
4578 s
= qemu_uname_release
;
4580 if (sys_uname(&buf
))
4585 for (i
= 0; i
< 3; i
++) {
4587 while (*s
>= '0' && *s
<= '9') {
4592 tmp
= (tmp
<< 8) + n
;
4600 /* do_syscall() should always have a single exit point at the end so
4601 that actions, such as logging of syscall results, can be performed.
4602 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4603 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4604 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4605 abi_long arg5
, abi_long arg6
, abi_long arg7
,
4614 gemu_log("syscall %d", num
);
4617 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4620 case TARGET_NR_exit
:
4621 #ifdef CONFIG_USE_NPTL
4622 /* In old applications this may be used to implement _exit(2).
4623 However in threaded applictions it is used for thread termination,
4624 and _exit_group is used for application termination.
4625 Do thread termination if we have more then one thread. */
4626 /* FIXME: This probably breaks if a signal arrives. We should probably
4627 be disabling signals. */
4628 if (first_cpu
->next_cpu
) {
4636 while (p
&& p
!= (CPUState
*)cpu_env
) {
4637 lastp
= &p
->next_cpu
;
4640 /* If we didn't find the CPU for this thread then something is
4644 /* Remove the CPU from the list. */
4645 *lastp
= p
->next_cpu
;
4647 ts
= ((CPUState
*)cpu_env
)->opaque
;
4648 if (ts
->child_tidptr
) {
4649 put_user_u32(0, ts
->child_tidptr
);
4650 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4662 gdb_exit(cpu_env
, arg1
);
4664 ret
= 0; /* avoid warning */
4666 case TARGET_NR_read
:
4670 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4672 ret
= get_errno(read(arg1
, p
, arg3
));
4673 unlock_user(p
, arg2
, ret
);
4676 case TARGET_NR_write
:
4677 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4679 ret
= get_errno(write(arg1
, p
, arg3
));
4680 unlock_user(p
, arg2
, 0);
4682 case TARGET_NR_open
:
4683 if (!(p
= lock_user_string(arg1
)))
4685 ret
= get_errno(open(path(p
),
4686 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4688 unlock_user(p
, arg1
, 0);
4690 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4691 case TARGET_NR_openat
:
4692 if (!(p
= lock_user_string(arg2
)))
4694 ret
= get_errno(sys_openat(arg1
,
4696 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4698 unlock_user(p
, arg2
, 0);
4701 case TARGET_NR_close
:
4702 ret
= get_errno(close(arg1
));
4707 case TARGET_NR_fork
:
4708 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4710 #ifdef TARGET_NR_waitpid
4711 case TARGET_NR_waitpid
:
4714 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4715 if (!is_error(ret
) && arg2
4716 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4721 #ifdef TARGET_NR_waitid
4722 case TARGET_NR_waitid
:
4726 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4727 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4728 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4730 host_to_target_siginfo(p
, &info
);
4731 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4736 #ifdef TARGET_NR_creat /* not on alpha */
4737 case TARGET_NR_creat
:
4738 if (!(p
= lock_user_string(arg1
)))
4740 ret
= get_errno(creat(p
, arg2
));
4741 unlock_user(p
, arg1
, 0);
4744 case TARGET_NR_link
:
4747 p
= lock_user_string(arg1
);
4748 p2
= lock_user_string(arg2
);
4750 ret
= -TARGET_EFAULT
;
4752 ret
= get_errno(link(p
, p2
));
4753 unlock_user(p2
, arg2
, 0);
4754 unlock_user(p
, arg1
, 0);
4757 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4758 case TARGET_NR_linkat
:
4763 p
= lock_user_string(arg2
);
4764 p2
= lock_user_string(arg4
);
4766 ret
= -TARGET_EFAULT
;
4768 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4769 unlock_user(p
, arg2
, 0);
4770 unlock_user(p2
, arg4
, 0);
4774 case TARGET_NR_unlink
:
4775 if (!(p
= lock_user_string(arg1
)))
4777 ret
= get_errno(unlink(p
));
4778 unlock_user(p
, arg1
, 0);
4780 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4781 case TARGET_NR_unlinkat
:
4782 if (!(p
= lock_user_string(arg2
)))
4784 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4785 unlock_user(p
, arg2
, 0);
4788 case TARGET_NR_execve
:
4790 char **argp
, **envp
;
4793 abi_ulong guest_argp
;
4794 abi_ulong guest_envp
;
4800 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4801 if (get_user_ual(addr
, gp
))
4809 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4810 if (get_user_ual(addr
, gp
))
4817 argp
= alloca((argc
+ 1) * sizeof(void *));
4818 envp
= alloca((envc
+ 1) * sizeof(void *));
4820 for (gp
= guest_argp
, q
= argp
; gp
;
4821 gp
+= sizeof(abi_ulong
), q
++) {
4822 if (get_user_ual(addr
, gp
))
4826 if (!(*q
= lock_user_string(addr
)))
4831 for (gp
= guest_envp
, q
= envp
; gp
;
4832 gp
+= sizeof(abi_ulong
), q
++) {
4833 if (get_user_ual(addr
, gp
))
4837 if (!(*q
= lock_user_string(addr
)))
4842 if (!(p
= lock_user_string(arg1
)))
4844 ret
= get_errno(execve(p
, argp
, envp
));
4845 unlock_user(p
, arg1
, 0);
4850 ret
= -TARGET_EFAULT
;
4853 for (gp
= guest_argp
, q
= argp
; *q
;
4854 gp
+= sizeof(abi_ulong
), q
++) {
4855 if (get_user_ual(addr
, gp
)
4858 unlock_user(*q
, addr
, 0);
4860 for (gp
= guest_envp
, q
= envp
; *q
;
4861 gp
+= sizeof(abi_ulong
), q
++) {
4862 if (get_user_ual(addr
, gp
)
4865 unlock_user(*q
, addr
, 0);
4869 case TARGET_NR_chdir
:
4870 if (!(p
= lock_user_string(arg1
)))
4872 ret
= get_errno(chdir(p
));
4873 unlock_user(p
, arg1
, 0);
4875 #ifdef TARGET_NR_time
4876 case TARGET_NR_time
:
4879 ret
= get_errno(time(&host_time
));
4882 && put_user_sal(host_time
, arg1
))
4887 case TARGET_NR_mknod
:
4888 if (!(p
= lock_user_string(arg1
)))
4890 ret
= get_errno(mknod(p
, arg2
, arg3
));
4891 unlock_user(p
, arg1
, 0);
4893 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4894 case TARGET_NR_mknodat
:
4895 if (!(p
= lock_user_string(arg2
)))
4897 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4898 unlock_user(p
, arg2
, 0);
4901 case TARGET_NR_chmod
:
4902 if (!(p
= lock_user_string(arg1
)))
4904 ret
= get_errno(chmod(p
, arg2
));
4905 unlock_user(p
, arg1
, 0);
4907 #ifdef TARGET_NR_break
4908 case TARGET_NR_break
:
4911 #ifdef TARGET_NR_oldstat
4912 case TARGET_NR_oldstat
:
4915 case TARGET_NR_lseek
:
4916 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4918 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4919 /* Alpha specific */
4920 case TARGET_NR_getxpid
:
4921 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4922 ret
= get_errno(getpid());
4925 #ifdef TARGET_NR_getpid
4926 case TARGET_NR_getpid
:
4927 ret
= get_errno(getpid());
4930 case TARGET_NR_mount
:
4932 /* need to look at the data field */
4934 p
= lock_user_string(arg1
);
4935 p2
= lock_user_string(arg2
);
4936 p3
= lock_user_string(arg3
);
4937 if (!p
|| !p2
|| !p3
)
4938 ret
= -TARGET_EFAULT
;
4940 /* FIXME - arg5 should be locked, but it isn't clear how to
4941 * do that since it's not guaranteed to be a NULL-terminated
4945 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4947 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4949 unlock_user(p
, arg1
, 0);
4950 unlock_user(p2
, arg2
, 0);
4951 unlock_user(p3
, arg3
, 0);
4954 #ifdef TARGET_NR_umount
4955 case TARGET_NR_umount
:
4956 if (!(p
= lock_user_string(arg1
)))
4958 ret
= get_errno(umount(p
));
4959 unlock_user(p
, arg1
, 0);
4962 #ifdef TARGET_NR_stime /* not on alpha */
4963 case TARGET_NR_stime
:
4966 if (get_user_sal(host_time
, arg1
))
4968 ret
= get_errno(stime(&host_time
));
4972 case TARGET_NR_ptrace
:
4974 #ifdef TARGET_NR_alarm /* not on alpha */
4975 case TARGET_NR_alarm
:
4979 #ifdef TARGET_NR_oldfstat
4980 case TARGET_NR_oldfstat
:
4983 #ifdef TARGET_NR_pause /* not on alpha */
4984 case TARGET_NR_pause
:
4985 ret
= get_errno(pause());
4988 #ifdef TARGET_NR_utime
4989 case TARGET_NR_utime
:
4991 struct utimbuf tbuf
, *host_tbuf
;
4992 struct target_utimbuf
*target_tbuf
;
4994 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4996 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4997 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4998 unlock_user_struct(target_tbuf
, arg2
, 0);
5003 if (!(p
= lock_user_string(arg1
)))
5005 ret
= get_errno(utime(p
, host_tbuf
));
5006 unlock_user(p
, arg1
, 0);
5010 case TARGET_NR_utimes
:
5012 struct timeval
*tvp
, tv
[2];
5014 if (copy_from_user_timeval(&tv
[0], arg2
)
5015 || copy_from_user_timeval(&tv
[1],
5016 arg2
+ sizeof(struct target_timeval
)))
5022 if (!(p
= lock_user_string(arg1
)))
5024 ret
= get_errno(utimes(p
, tvp
));
5025 unlock_user(p
, arg1
, 0);
5028 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5029 case TARGET_NR_futimesat
:
5031 struct timeval
*tvp
, tv
[2];
5033 if (copy_from_user_timeval(&tv
[0], arg3
)
5034 || copy_from_user_timeval(&tv
[1],
5035 arg3
+ sizeof(struct target_timeval
)))
5041 if (!(p
= lock_user_string(arg2
)))
5043 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5044 unlock_user(p
, arg2
, 0);
5048 #ifdef TARGET_NR_stty
5049 case TARGET_NR_stty
:
5052 #ifdef TARGET_NR_gtty
5053 case TARGET_NR_gtty
:
5056 case TARGET_NR_access
:
5057 if (!(p
= lock_user_string(arg1
)))
5059 ret
= get_errno(access(path(p
), arg2
));
5060 unlock_user(p
, arg1
, 0);
5062 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5063 case TARGET_NR_faccessat
:
5064 if (!(p
= lock_user_string(arg2
)))
5066 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5067 unlock_user(p
, arg2
, 0);
5070 #ifdef TARGET_NR_nice /* not on alpha */
5071 case TARGET_NR_nice
:
5072 ret
= get_errno(nice(arg1
));
5075 #ifdef TARGET_NR_ftime
5076 case TARGET_NR_ftime
:
5079 case TARGET_NR_sync
:
5083 case TARGET_NR_kill
:
5084 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5086 case TARGET_NR_rename
:
5089 p
= lock_user_string(arg1
);
5090 p2
= lock_user_string(arg2
);
5092 ret
= -TARGET_EFAULT
;
5094 ret
= get_errno(rename(p
, p2
));
5095 unlock_user(p2
, arg2
, 0);
5096 unlock_user(p
, arg1
, 0);
5099 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5100 case TARGET_NR_renameat
:
5103 p
= lock_user_string(arg2
);
5104 p2
= lock_user_string(arg4
);
5106 ret
= -TARGET_EFAULT
;
5108 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5109 unlock_user(p2
, arg4
, 0);
5110 unlock_user(p
, arg2
, 0);
5114 case TARGET_NR_mkdir
:
5115 if (!(p
= lock_user_string(arg1
)))
5117 ret
= get_errno(mkdir(p
, arg2
));
5118 unlock_user(p
, arg1
, 0);
5120 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5121 case TARGET_NR_mkdirat
:
5122 if (!(p
= lock_user_string(arg2
)))
5124 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5125 unlock_user(p
, arg2
, 0);
5128 case TARGET_NR_rmdir
:
5129 if (!(p
= lock_user_string(arg1
)))
5131 ret
= get_errno(rmdir(p
));
5132 unlock_user(p
, arg1
, 0);
5135 ret
= get_errno(dup(arg1
));
5137 case TARGET_NR_pipe
:
5138 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5140 #ifdef TARGET_NR_pipe2
5141 case TARGET_NR_pipe2
:
5142 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5145 case TARGET_NR_times
:
5147 struct target_tms
*tmsp
;
5149 ret
= get_errno(times(&tms
));
5151 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5154 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
5155 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
5156 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
5157 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
5160 ret
= host_to_target_clock_t(ret
);
5163 #ifdef TARGET_NR_prof
5164 case TARGET_NR_prof
:
5167 #ifdef TARGET_NR_signal
5168 case TARGET_NR_signal
:
5171 case TARGET_NR_acct
:
5173 ret
= get_errno(acct(NULL
));
5175 if (!(p
= lock_user_string(arg1
)))
5177 ret
= get_errno(acct(path(p
)));
5178 unlock_user(p
, arg1
, 0);
5181 #ifdef TARGET_NR_umount2 /* not on alpha */
5182 case TARGET_NR_umount2
:
5183 if (!(p
= lock_user_string(arg1
)))
5185 ret
= get_errno(umount2(p
, arg2
));
5186 unlock_user(p
, arg1
, 0);
5189 #ifdef TARGET_NR_lock
5190 case TARGET_NR_lock
:
5193 case TARGET_NR_ioctl
:
5194 ret
= do_ioctl(arg1
, arg2
, arg3
);
5196 case TARGET_NR_fcntl
:
5197 ret
= do_fcntl(arg1
, arg2
, arg3
);
5199 #ifdef TARGET_NR_mpx
5203 case TARGET_NR_setpgid
:
5204 ret
= get_errno(setpgid(arg1
, arg2
));
5206 #ifdef TARGET_NR_ulimit
5207 case TARGET_NR_ulimit
:
5210 #ifdef TARGET_NR_oldolduname
5211 case TARGET_NR_oldolduname
:
5214 case TARGET_NR_umask
:
5215 ret
= get_errno(umask(arg1
));
5217 case TARGET_NR_chroot
:
5218 if (!(p
= lock_user_string(arg1
)))
5220 ret
= get_errno(chroot(p
));
5221 unlock_user(p
, arg1
, 0);
5223 case TARGET_NR_ustat
:
5225 case TARGET_NR_dup2
:
5226 ret
= get_errno(dup2(arg1
, arg2
));
5228 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5229 case TARGET_NR_dup3
:
5230 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5233 #ifdef TARGET_NR_getppid /* not on alpha */
5234 case TARGET_NR_getppid
:
5235 ret
= get_errno(getppid());
5238 case TARGET_NR_getpgrp
:
5239 ret
= get_errno(getpgrp());
5241 case TARGET_NR_setsid
:
5242 ret
= get_errno(setsid());
5244 #ifdef TARGET_NR_sigaction
5245 case TARGET_NR_sigaction
:
5247 #if defined(TARGET_ALPHA)
5248 struct target_sigaction act
, oact
, *pact
= 0;
5249 struct target_old_sigaction
*old_act
;
5251 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5253 act
._sa_handler
= old_act
->_sa_handler
;
5254 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5255 act
.sa_flags
= old_act
->sa_flags
;
5256 act
.sa_restorer
= 0;
5257 unlock_user_struct(old_act
, arg2
, 0);
5260 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5261 if (!is_error(ret
) && arg3
) {
5262 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5264 old_act
->_sa_handler
= oact
._sa_handler
;
5265 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5266 old_act
->sa_flags
= oact
.sa_flags
;
5267 unlock_user_struct(old_act
, arg3
, 1);
5269 #elif defined(TARGET_MIPS)
5270 struct target_sigaction act
, oact
, *pact
, *old_act
;
5273 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5275 act
._sa_handler
= old_act
->_sa_handler
;
5276 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5277 act
.sa_flags
= old_act
->sa_flags
;
5278 unlock_user_struct(old_act
, arg2
, 0);
5284 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5286 if (!is_error(ret
) && arg3
) {
5287 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5289 old_act
->_sa_handler
= oact
._sa_handler
;
5290 old_act
->sa_flags
= oact
.sa_flags
;
5291 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5292 old_act
->sa_mask
.sig
[1] = 0;
5293 old_act
->sa_mask
.sig
[2] = 0;
5294 old_act
->sa_mask
.sig
[3] = 0;
5295 unlock_user_struct(old_act
, arg3
, 1);
5298 struct target_old_sigaction
*old_act
;
5299 struct target_sigaction act
, oact
, *pact
;
5301 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5303 act
._sa_handler
= old_act
->_sa_handler
;
5304 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5305 act
.sa_flags
= old_act
->sa_flags
;
5306 act
.sa_restorer
= old_act
->sa_restorer
;
5307 unlock_user_struct(old_act
, arg2
, 0);
5312 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5313 if (!is_error(ret
) && arg3
) {
5314 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5316 old_act
->_sa_handler
= oact
._sa_handler
;
5317 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5318 old_act
->sa_flags
= oact
.sa_flags
;
5319 old_act
->sa_restorer
= oact
.sa_restorer
;
5320 unlock_user_struct(old_act
, arg3
, 1);
5326 case TARGET_NR_rt_sigaction
:
5328 #if defined(TARGET_ALPHA)
5329 struct target_sigaction act
, oact
, *pact
= 0;
5330 struct target_rt_sigaction
*rt_act
;
5331 /* ??? arg4 == sizeof(sigset_t). */
5333 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5335 act
._sa_handler
= rt_act
->_sa_handler
;
5336 act
.sa_mask
= rt_act
->sa_mask
;
5337 act
.sa_flags
= rt_act
->sa_flags
;
5338 act
.sa_restorer
= arg5
;
5339 unlock_user_struct(rt_act
, arg2
, 0);
5342 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5343 if (!is_error(ret
) && arg3
) {
5344 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5346 rt_act
->_sa_handler
= oact
._sa_handler
;
5347 rt_act
->sa_mask
= oact
.sa_mask
;
5348 rt_act
->sa_flags
= oact
.sa_flags
;
5349 unlock_user_struct(rt_act
, arg3
, 1);
5352 struct target_sigaction
*act
;
5353 struct target_sigaction
*oact
;
5356 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5361 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5362 ret
= -TARGET_EFAULT
;
5363 goto rt_sigaction_fail
;
5367 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5370 unlock_user_struct(act
, arg2
, 0);
5372 unlock_user_struct(oact
, arg3
, 1);
5376 #ifdef TARGET_NR_sgetmask /* not on alpha */
5377 case TARGET_NR_sgetmask
:
5380 abi_ulong target_set
;
5381 sigprocmask(0, NULL
, &cur_set
);
5382 host_to_target_old_sigset(&target_set
, &cur_set
);
5387 #ifdef TARGET_NR_ssetmask /* not on alpha */
5388 case TARGET_NR_ssetmask
:
5390 sigset_t set
, oset
, cur_set
;
5391 abi_ulong target_set
= arg1
;
5392 sigprocmask(0, NULL
, &cur_set
);
5393 target_to_host_old_sigset(&set
, &target_set
);
5394 sigorset(&set
, &set
, &cur_set
);
5395 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5396 host_to_target_old_sigset(&target_set
, &oset
);
5401 #ifdef TARGET_NR_sigprocmask
5402 case TARGET_NR_sigprocmask
:
5404 #if defined(TARGET_ALPHA)
5405 sigset_t set
, oldset
;
5410 case TARGET_SIG_BLOCK
:
5413 case TARGET_SIG_UNBLOCK
:
5416 case TARGET_SIG_SETMASK
:
5420 ret
= -TARGET_EINVAL
;
5424 target_to_host_old_sigset(&set
, &mask
);
5426 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5428 if (!is_error(ret
)) {
5429 host_to_target_old_sigset(&mask
, &oldset
);
5431 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5434 sigset_t set
, oldset
, *set_ptr
;
5439 case TARGET_SIG_BLOCK
:
5442 case TARGET_SIG_UNBLOCK
:
5445 case TARGET_SIG_SETMASK
:
5449 ret
= -TARGET_EINVAL
;
5452 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5454 target_to_host_old_sigset(&set
, p
);
5455 unlock_user(p
, arg2
, 0);
5461 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5462 if (!is_error(ret
) && arg3
) {
5463 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5465 host_to_target_old_sigset(p
, &oldset
);
5466 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5472 case TARGET_NR_rt_sigprocmask
:
5475 sigset_t set
, oldset
, *set_ptr
;
5479 case TARGET_SIG_BLOCK
:
5482 case TARGET_SIG_UNBLOCK
:
5485 case TARGET_SIG_SETMASK
:
5489 ret
= -TARGET_EINVAL
;
5492 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5494 target_to_host_sigset(&set
, p
);
5495 unlock_user(p
, arg2
, 0);
5501 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5502 if (!is_error(ret
) && arg3
) {
5503 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5505 host_to_target_sigset(p
, &oldset
);
5506 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5510 #ifdef TARGET_NR_sigpending
5511 case TARGET_NR_sigpending
:
5514 ret
= get_errno(sigpending(&set
));
5515 if (!is_error(ret
)) {
5516 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5518 host_to_target_old_sigset(p
, &set
);
5519 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5524 case TARGET_NR_rt_sigpending
:
5527 ret
= get_errno(sigpending(&set
));
5528 if (!is_error(ret
)) {
5529 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5531 host_to_target_sigset(p
, &set
);
5532 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5536 #ifdef TARGET_NR_sigsuspend
5537 case TARGET_NR_sigsuspend
:
5540 #if defined(TARGET_ALPHA)
5541 abi_ulong mask
= arg1
;
5542 target_to_host_old_sigset(&set
, &mask
);
5544 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5546 target_to_host_old_sigset(&set
, p
);
5547 unlock_user(p
, arg1
, 0);
5549 ret
= get_errno(sigsuspend(&set
));
5553 case TARGET_NR_rt_sigsuspend
:
5556 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5558 target_to_host_sigset(&set
, p
);
5559 unlock_user(p
, arg1
, 0);
5560 ret
= get_errno(sigsuspend(&set
));
5563 case TARGET_NR_rt_sigtimedwait
:
5566 struct timespec uts
, *puts
;
5569 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5571 target_to_host_sigset(&set
, p
);
5572 unlock_user(p
, arg1
, 0);
5575 target_to_host_timespec(puts
, arg3
);
5579 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5580 if (!is_error(ret
) && arg2
) {
5581 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5583 host_to_target_siginfo(p
, &uinfo
);
5584 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5588 case TARGET_NR_rt_sigqueueinfo
:
5591 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5593 target_to_host_siginfo(&uinfo
, p
);
5594 unlock_user(p
, arg1
, 0);
5595 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5598 #ifdef TARGET_NR_sigreturn
5599 case TARGET_NR_sigreturn
:
5600 /* NOTE: ret is eax, so not transcoding must be done */
5601 ret
= do_sigreturn(cpu_env
);
5604 case TARGET_NR_rt_sigreturn
:
5605 /* NOTE: ret is eax, so not transcoding must be done */
5606 ret
= do_rt_sigreturn(cpu_env
);
5608 case TARGET_NR_sethostname
:
5609 if (!(p
= lock_user_string(arg1
)))
5611 ret
= get_errno(sethostname(p
, arg2
));
5612 unlock_user(p
, arg1
, 0);
5614 case TARGET_NR_setrlimit
:
5616 int resource
= target_to_host_resource(arg1
);
5617 struct target_rlimit
*target_rlim
;
5619 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5621 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5622 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5623 unlock_user_struct(target_rlim
, arg2
, 0);
5624 ret
= get_errno(setrlimit(resource
, &rlim
));
5627 case TARGET_NR_getrlimit
:
5629 int resource
= target_to_host_resource(arg1
);
5630 struct target_rlimit
*target_rlim
;
5633 ret
= get_errno(getrlimit(resource
, &rlim
));
5634 if (!is_error(ret
)) {
5635 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5637 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5638 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5639 unlock_user_struct(target_rlim
, arg2
, 1);
5643 case TARGET_NR_getrusage
:
5645 struct rusage rusage
;
5646 ret
= get_errno(getrusage(arg1
, &rusage
));
5647 if (!is_error(ret
)) {
5648 host_to_target_rusage(arg2
, &rusage
);
5652 case TARGET_NR_gettimeofday
:
5655 ret
= get_errno(gettimeofday(&tv
, NULL
));
5656 if (!is_error(ret
)) {
5657 if (copy_to_user_timeval(arg1
, &tv
))
5662 case TARGET_NR_settimeofday
:
5665 if (copy_from_user_timeval(&tv
, arg1
))
5667 ret
= get_errno(settimeofday(&tv
, NULL
));
5670 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5671 case TARGET_NR_select
:
5673 struct target_sel_arg_struct
*sel
;
5674 abi_ulong inp
, outp
, exp
, tvp
;
5677 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5679 nsel
= tswapl(sel
->n
);
5680 inp
= tswapl(sel
->inp
);
5681 outp
= tswapl(sel
->outp
);
5682 exp
= tswapl(sel
->exp
);
5683 tvp
= tswapl(sel
->tvp
);
5684 unlock_user_struct(sel
, arg1
, 0);
5685 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5689 #ifdef TARGET_NR_pselect6
5690 case TARGET_NR_pselect6
:
5692 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
5693 fd_set rfds
, wfds
, efds
;
5694 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
5695 struct timespec ts
, *ts_ptr
;
5698 * The 6th arg is actually two args smashed together,
5699 * so we cannot use the C library.
5707 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
5708 target_sigset_t
*target_sigset
;
5716 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
5720 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
5724 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
5730 * This takes a timespec, and not a timeval, so we cannot
5731 * use the do_select() helper ...
5734 if (target_to_host_timespec(&ts
, ts_addr
)) {
5742 /* Extract the two packed args for the sigset */
5745 sig
.size
= _NSIG
/ 8;
5747 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
5751 arg_sigset
= tswapl(arg7
[0]);
5752 arg_sigsize
= tswapl(arg7
[1]);
5753 unlock_user(arg7
, arg6
, 0);
5757 if (arg_sigsize
!= sizeof(*target_sigset
)) {
5758 /* Like the kernel, we enforce correct size sigsets */
5759 ret
= -TARGET_EINVAL
;
5762 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
5763 sizeof(*target_sigset
), 1);
5764 if (!target_sigset
) {
5767 target_to_host_sigset(&set
, target_sigset
);
5768 unlock_user(target_sigset
, arg_sigset
, 0);
5776 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
5779 if (!is_error(ret
)) {
5780 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
5782 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
5784 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
5787 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
5793 case TARGET_NR_symlink
:
5796 p
= lock_user_string(arg1
);
5797 p2
= lock_user_string(arg2
);
5799 ret
= -TARGET_EFAULT
;
5801 ret
= get_errno(symlink(p
, p2
));
5802 unlock_user(p2
, arg2
, 0);
5803 unlock_user(p
, arg1
, 0);
5806 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5807 case TARGET_NR_symlinkat
:
5810 p
= lock_user_string(arg1
);
5811 p2
= lock_user_string(arg3
);
5813 ret
= -TARGET_EFAULT
;
5815 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5816 unlock_user(p2
, arg3
, 0);
5817 unlock_user(p
, arg1
, 0);
5821 #ifdef TARGET_NR_oldlstat
5822 case TARGET_NR_oldlstat
:
5825 case TARGET_NR_readlink
:
5828 p
= lock_user_string(arg1
);
5829 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5831 ret
= -TARGET_EFAULT
;
5833 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5834 char real
[PATH_MAX
];
5835 temp
= realpath(exec_path
,real
);
5836 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5837 snprintf((char *)p2
, arg3
, "%s", real
);
5840 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5842 unlock_user(p2
, arg2
, ret
);
5843 unlock_user(p
, arg1
, 0);
5846 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5847 case TARGET_NR_readlinkat
:
5850 p
= lock_user_string(arg2
);
5851 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5853 ret
= -TARGET_EFAULT
;
5855 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5856 unlock_user(p2
, arg3
, ret
);
5857 unlock_user(p
, arg2
, 0);
5861 #ifdef TARGET_NR_uselib
5862 case TARGET_NR_uselib
:
5865 #ifdef TARGET_NR_swapon
5866 case TARGET_NR_swapon
:
5867 if (!(p
= lock_user_string(arg1
)))
5869 ret
= get_errno(swapon(p
, arg2
));
5870 unlock_user(p
, arg1
, 0);
5873 case TARGET_NR_reboot
:
5875 #ifdef TARGET_NR_readdir
5876 case TARGET_NR_readdir
:
5879 #ifdef TARGET_NR_mmap
5880 case TARGET_NR_mmap
:
5881 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5882 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5883 || defined(TARGET_S390X)
5886 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5887 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5895 unlock_user(v
, arg1
, 0);
5896 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5897 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5901 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5902 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5908 #ifdef TARGET_NR_mmap2
5909 case TARGET_NR_mmap2
:
5911 #define MMAP_SHIFT 12
5913 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5914 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5916 arg6
<< MMAP_SHIFT
));
5919 case TARGET_NR_munmap
:
5920 ret
= get_errno(target_munmap(arg1
, arg2
));
5922 case TARGET_NR_mprotect
:
5924 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
5925 /* Special hack to detect libc making the stack executable. */
5926 if ((arg3
& PROT_GROWSDOWN
)
5927 && arg1
>= ts
->info
->stack_limit
5928 && arg1
<= ts
->info
->start_stack
) {
5929 arg3
&= ~PROT_GROWSDOWN
;
5930 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
5931 arg1
= ts
->info
->stack_limit
;
5934 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5936 #ifdef TARGET_NR_mremap
5937 case TARGET_NR_mremap
:
5938 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5941 /* ??? msync/mlock/munlock are broken for softmmu. */
5942 #ifdef TARGET_NR_msync
5943 case TARGET_NR_msync
:
5944 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5947 #ifdef TARGET_NR_mlock
5948 case TARGET_NR_mlock
:
5949 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5952 #ifdef TARGET_NR_munlock
5953 case TARGET_NR_munlock
:
5954 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5957 #ifdef TARGET_NR_mlockall
5958 case TARGET_NR_mlockall
:
5959 ret
= get_errno(mlockall(arg1
));
5962 #ifdef TARGET_NR_munlockall
5963 case TARGET_NR_munlockall
:
5964 ret
= get_errno(munlockall());
5967 case TARGET_NR_truncate
:
5968 if (!(p
= lock_user_string(arg1
)))
5970 ret
= get_errno(truncate(p
, arg2
));
5971 unlock_user(p
, arg1
, 0);
5973 case TARGET_NR_ftruncate
:
5974 ret
= get_errno(ftruncate(arg1
, arg2
));
5976 case TARGET_NR_fchmod
:
5977 ret
= get_errno(fchmod(arg1
, arg2
));
5979 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5980 case TARGET_NR_fchmodat
:
5981 if (!(p
= lock_user_string(arg2
)))
5983 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5984 unlock_user(p
, arg2
, 0);
5987 case TARGET_NR_getpriority
:
5988 /* libc does special remapping of the return value of
5989 * sys_getpriority() so it's just easiest to call
5990 * sys_getpriority() directly rather than through libc. */
5991 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5993 case TARGET_NR_setpriority
:
5994 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5996 #ifdef TARGET_NR_profil
5997 case TARGET_NR_profil
:
6000 case TARGET_NR_statfs
:
6001 if (!(p
= lock_user_string(arg1
)))
6003 ret
= get_errno(statfs(path(p
), &stfs
));
6004 unlock_user(p
, arg1
, 0);
6006 if (!is_error(ret
)) {
6007 struct target_statfs
*target_stfs
;
6009 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6011 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6012 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6013 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6014 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6015 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6016 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6017 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6018 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6019 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6020 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6021 unlock_user_struct(target_stfs
, arg2
, 1);
6024 case TARGET_NR_fstatfs
:
6025 ret
= get_errno(fstatfs(arg1
, &stfs
));
6026 goto convert_statfs
;
6027 #ifdef TARGET_NR_statfs64
6028 case TARGET_NR_statfs64
:
6029 if (!(p
= lock_user_string(arg1
)))
6031 ret
= get_errno(statfs(path(p
), &stfs
));
6032 unlock_user(p
, arg1
, 0);
6034 if (!is_error(ret
)) {
6035 struct target_statfs64
*target_stfs
;
6037 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6039 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6040 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6041 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6042 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6043 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6044 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6045 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6046 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6047 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6048 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6049 unlock_user_struct(target_stfs
, arg3
, 1);
6052 case TARGET_NR_fstatfs64
:
6053 ret
= get_errno(fstatfs(arg1
, &stfs
));
6054 goto convert_statfs64
;
6056 #ifdef TARGET_NR_ioperm
6057 case TARGET_NR_ioperm
:
6060 #ifdef TARGET_NR_socketcall
6061 case TARGET_NR_socketcall
:
6062 ret
= do_socketcall(arg1
, arg2
);
6065 #ifdef TARGET_NR_accept
6066 case TARGET_NR_accept
:
6067 ret
= do_accept(arg1
, arg2
, arg3
);
6070 #ifdef TARGET_NR_bind
6071 case TARGET_NR_bind
:
6072 ret
= do_bind(arg1
, arg2
, arg3
);
6075 #ifdef TARGET_NR_connect
6076 case TARGET_NR_connect
:
6077 ret
= do_connect(arg1
, arg2
, arg3
);
6080 #ifdef TARGET_NR_getpeername
6081 case TARGET_NR_getpeername
:
6082 ret
= do_getpeername(arg1
, arg2
, arg3
);
6085 #ifdef TARGET_NR_getsockname
6086 case TARGET_NR_getsockname
:
6087 ret
= do_getsockname(arg1
, arg2
, arg3
);
6090 #ifdef TARGET_NR_getsockopt
6091 case TARGET_NR_getsockopt
:
6092 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6095 #ifdef TARGET_NR_listen
6096 case TARGET_NR_listen
:
6097 ret
= get_errno(listen(arg1
, arg2
));
6100 #ifdef TARGET_NR_recv
6101 case TARGET_NR_recv
:
6102 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6105 #ifdef TARGET_NR_recvfrom
6106 case TARGET_NR_recvfrom
:
6107 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6110 #ifdef TARGET_NR_recvmsg
6111 case TARGET_NR_recvmsg
:
6112 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6115 #ifdef TARGET_NR_send
6116 case TARGET_NR_send
:
6117 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6120 #ifdef TARGET_NR_sendmsg
6121 case TARGET_NR_sendmsg
:
6122 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6125 #ifdef TARGET_NR_sendto
6126 case TARGET_NR_sendto
:
6127 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6130 #ifdef TARGET_NR_shutdown
6131 case TARGET_NR_shutdown
:
6132 ret
= get_errno(shutdown(arg1
, arg2
));
6135 #ifdef TARGET_NR_socket
6136 case TARGET_NR_socket
:
6137 ret
= do_socket(arg1
, arg2
, arg3
);
6140 #ifdef TARGET_NR_socketpair
6141 case TARGET_NR_socketpair
:
6142 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6145 #ifdef TARGET_NR_setsockopt
6146 case TARGET_NR_setsockopt
:
6147 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6151 case TARGET_NR_syslog
:
6152 if (!(p
= lock_user_string(arg2
)))
6154 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6155 unlock_user(p
, arg2
, 0);
6158 case TARGET_NR_setitimer
:
6160 struct itimerval value
, ovalue
, *pvalue
;
6164 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6165 || copy_from_user_timeval(&pvalue
->it_value
,
6166 arg2
+ sizeof(struct target_timeval
)))
6171 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6172 if (!is_error(ret
) && arg3
) {
6173 if (copy_to_user_timeval(arg3
,
6174 &ovalue
.it_interval
)
6175 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6181 case TARGET_NR_getitimer
:
6183 struct itimerval value
;
6185 ret
= get_errno(getitimer(arg1
, &value
));
6186 if (!is_error(ret
) && arg2
) {
6187 if (copy_to_user_timeval(arg2
,
6189 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6195 case TARGET_NR_stat
:
6196 if (!(p
= lock_user_string(arg1
)))
6198 ret
= get_errno(stat(path(p
), &st
));
6199 unlock_user(p
, arg1
, 0);
6201 case TARGET_NR_lstat
:
6202 if (!(p
= lock_user_string(arg1
)))
6204 ret
= get_errno(lstat(path(p
), &st
));
6205 unlock_user(p
, arg1
, 0);
6207 case TARGET_NR_fstat
:
6209 ret
= get_errno(fstat(arg1
, &st
));
6211 if (!is_error(ret
)) {
6212 struct target_stat
*target_st
;
6214 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6216 memset(target_st
, 0, sizeof(*target_st
));
6217 __put_user(st
.st_dev
, &target_st
->st_dev
);
6218 __put_user(st
.st_ino
, &target_st
->st_ino
);
6219 __put_user(st
.st_mode
, &target_st
->st_mode
);
6220 __put_user(st
.st_uid
, &target_st
->st_uid
);
6221 __put_user(st
.st_gid
, &target_st
->st_gid
);
6222 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6223 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6224 __put_user(st
.st_size
, &target_st
->st_size
);
6225 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6226 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6227 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6228 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6229 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6230 unlock_user_struct(target_st
, arg2
, 1);
6234 #ifdef TARGET_NR_olduname
6235 case TARGET_NR_olduname
:
6238 #ifdef TARGET_NR_iopl
6239 case TARGET_NR_iopl
:
6242 case TARGET_NR_vhangup
:
6243 ret
= get_errno(vhangup());
6245 #ifdef TARGET_NR_idle
6246 case TARGET_NR_idle
:
6249 #ifdef TARGET_NR_syscall
6250 case TARGET_NR_syscall
:
6251 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6252 arg6
, arg7
, arg8
, 0);
6255 case TARGET_NR_wait4
:
6258 abi_long status_ptr
= arg2
;
6259 struct rusage rusage
, *rusage_ptr
;
6260 abi_ulong target_rusage
= arg4
;
6262 rusage_ptr
= &rusage
;
6265 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6266 if (!is_error(ret
)) {
6268 status
= host_to_target_waitstatus(status
);
6269 if (put_user_s32(status
, status_ptr
))
6273 host_to_target_rusage(target_rusage
, &rusage
);
6277 #ifdef TARGET_NR_swapoff
6278 case TARGET_NR_swapoff
:
6279 if (!(p
= lock_user_string(arg1
)))
6281 ret
= get_errno(swapoff(p
));
6282 unlock_user(p
, arg1
, 0);
6285 case TARGET_NR_sysinfo
:
6287 struct target_sysinfo
*target_value
;
6288 struct sysinfo value
;
6289 ret
= get_errno(sysinfo(&value
));
6290 if (!is_error(ret
) && arg1
)
6292 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6294 __put_user(value
.uptime
, &target_value
->uptime
);
6295 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6296 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6297 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6298 __put_user(value
.totalram
, &target_value
->totalram
);
6299 __put_user(value
.freeram
, &target_value
->freeram
);
6300 __put_user(value
.sharedram
, &target_value
->sharedram
);
6301 __put_user(value
.bufferram
, &target_value
->bufferram
);
6302 __put_user(value
.totalswap
, &target_value
->totalswap
);
6303 __put_user(value
.freeswap
, &target_value
->freeswap
);
6304 __put_user(value
.procs
, &target_value
->procs
);
6305 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6306 __put_user(value
.freehigh
, &target_value
->freehigh
);
6307 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6308 unlock_user_struct(target_value
, arg1
, 1);
6312 #ifdef TARGET_NR_ipc
6314 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6317 #ifdef TARGET_NR_semget
6318 case TARGET_NR_semget
:
6319 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6322 #ifdef TARGET_NR_semop
6323 case TARGET_NR_semop
:
6324 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6327 #ifdef TARGET_NR_semctl
6328 case TARGET_NR_semctl
:
6329 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6332 #ifdef TARGET_NR_msgctl
6333 case TARGET_NR_msgctl
:
6334 ret
= do_msgctl(arg1
, arg2
, arg3
);
6337 #ifdef TARGET_NR_msgget
6338 case TARGET_NR_msgget
:
6339 ret
= get_errno(msgget(arg1
, arg2
));
6342 #ifdef TARGET_NR_msgrcv
6343 case TARGET_NR_msgrcv
:
6344 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6347 #ifdef TARGET_NR_msgsnd
6348 case TARGET_NR_msgsnd
:
6349 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6352 #ifdef TARGET_NR_shmget
6353 case TARGET_NR_shmget
:
6354 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6357 #ifdef TARGET_NR_shmctl
6358 case TARGET_NR_shmctl
:
6359 ret
= do_shmctl(arg1
, arg2
, arg3
);
6362 #ifdef TARGET_NR_shmat
6363 case TARGET_NR_shmat
:
6364 ret
= do_shmat(arg1
, arg2
, arg3
);
6367 #ifdef TARGET_NR_shmdt
6368 case TARGET_NR_shmdt
:
6369 ret
= do_shmdt(arg1
);
6372 case TARGET_NR_fsync
:
6373 ret
= get_errno(fsync(arg1
));
6375 case TARGET_NR_clone
:
6376 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6377 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6378 #elif defined(TARGET_CRIS)
6379 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6380 #elif defined(TARGET_S390X)
6381 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6383 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6386 #ifdef __NR_exit_group
6387 /* new thread calls */
6388 case TARGET_NR_exit_group
:
6392 gdb_exit(cpu_env
, arg1
);
6393 ret
= get_errno(exit_group(arg1
));
6396 case TARGET_NR_setdomainname
:
6397 if (!(p
= lock_user_string(arg1
)))
6399 ret
= get_errno(setdomainname(p
, arg2
));
6400 unlock_user(p
, arg1
, 0);
6402 case TARGET_NR_uname
:
6403 /* no need to transcode because we use the linux syscall */
6405 struct new_utsname
* buf
;
6407 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6409 ret
= get_errno(sys_uname(buf
));
6410 if (!is_error(ret
)) {
6411 /* Overrite the native machine name with whatever is being
6413 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6414 /* Allow the user to override the reported release. */
6415 if (qemu_uname_release
&& *qemu_uname_release
)
6416 strcpy (buf
->release
, qemu_uname_release
);
6418 unlock_user_struct(buf
, arg1
, 1);
6422 case TARGET_NR_modify_ldt
:
6423 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6425 #if !defined(TARGET_X86_64)
6426 case TARGET_NR_vm86old
:
6428 case TARGET_NR_vm86
:
6429 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6433 case TARGET_NR_adjtimex
:
6435 #ifdef TARGET_NR_create_module
6436 case TARGET_NR_create_module
:
6438 case TARGET_NR_init_module
:
6439 case TARGET_NR_delete_module
:
6440 #ifdef TARGET_NR_get_kernel_syms
6441 case TARGET_NR_get_kernel_syms
:
6444 case TARGET_NR_quotactl
:
6446 case TARGET_NR_getpgid
:
6447 ret
= get_errno(getpgid(arg1
));
6449 case TARGET_NR_fchdir
:
6450 ret
= get_errno(fchdir(arg1
));
6452 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6453 case TARGET_NR_bdflush
:
6456 #ifdef TARGET_NR_sysfs
6457 case TARGET_NR_sysfs
:
6460 case TARGET_NR_personality
:
6461 ret
= get_errno(personality(arg1
));
6463 #ifdef TARGET_NR_afs_syscall
6464 case TARGET_NR_afs_syscall
:
6467 #ifdef TARGET_NR__llseek /* Not on alpha */
6468 case TARGET_NR__llseek
:
6471 #if !defined(__NR_llseek)
6472 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6474 ret
= get_errno(res
);
6479 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6481 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6487 case TARGET_NR_getdents
:
6488 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6490 struct target_dirent
*target_dirp
;
6491 struct linux_dirent
*dirp
;
6492 abi_long count
= arg3
;
6494 dirp
= malloc(count
);
6496 ret
= -TARGET_ENOMEM
;
6500 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6501 if (!is_error(ret
)) {
6502 struct linux_dirent
*de
;
6503 struct target_dirent
*tde
;
6505 int reclen
, treclen
;
6506 int count1
, tnamelen
;
6510 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6514 reclen
= de
->d_reclen
;
6515 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6516 tde
->d_reclen
= tswap16(treclen
);
6517 tde
->d_ino
= tswapl(de
->d_ino
);
6518 tde
->d_off
= tswapl(de
->d_off
);
6519 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6522 /* XXX: may not be correct */
6523 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6524 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6526 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6530 unlock_user(target_dirp
, arg2
, ret
);
6536 struct linux_dirent
*dirp
;
6537 abi_long count
= arg3
;
6539 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6541 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6542 if (!is_error(ret
)) {
6543 struct linux_dirent
*de
;
6548 reclen
= de
->d_reclen
;
6551 de
->d_reclen
= tswap16(reclen
);
6552 tswapls(&de
->d_ino
);
6553 tswapls(&de
->d_off
);
6554 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6558 unlock_user(dirp
, arg2
, ret
);
6562 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6563 case TARGET_NR_getdents64
:
6565 struct linux_dirent64
*dirp
;
6566 abi_long count
= arg3
;
6567 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6569 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6570 if (!is_error(ret
)) {
6571 struct linux_dirent64
*de
;
6576 reclen
= de
->d_reclen
;
6579 de
->d_reclen
= tswap16(reclen
);
6580 tswap64s((uint64_t *)&de
->d_ino
);
6581 tswap64s((uint64_t *)&de
->d_off
);
6582 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6586 unlock_user(dirp
, arg2
, ret
);
6589 #endif /* TARGET_NR_getdents64 */
6590 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6592 case TARGET_NR_select
:
6594 case TARGET_NR__newselect
:
6596 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6599 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6600 # ifdef TARGET_NR_poll
6601 case TARGET_NR_poll
:
6603 # ifdef TARGET_NR_ppoll
6604 case TARGET_NR_ppoll
:
6607 struct target_pollfd
*target_pfd
;
6608 unsigned int nfds
= arg2
;
6613 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6617 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6618 for(i
= 0; i
< nfds
; i
++) {
6619 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6620 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6623 # ifdef TARGET_NR_ppoll
6624 if (num
== TARGET_NR_ppoll
) {
6625 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
6626 target_sigset_t
*target_set
;
6627 sigset_t _set
, *set
= &_set
;
6630 if (target_to_host_timespec(timeout_ts
, arg3
)) {
6631 unlock_user(target_pfd
, arg1
, 0);
6639 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
6641 unlock_user(target_pfd
, arg1
, 0);
6644 target_to_host_sigset(set
, target_set
);
6649 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
6651 if (!is_error(ret
) && arg3
) {
6652 host_to_target_timespec(arg3
, timeout_ts
);
6655 unlock_user(target_set
, arg4
, 0);
6659 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6661 if (!is_error(ret
)) {
6662 for(i
= 0; i
< nfds
; i
++) {
6663 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6666 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
6670 case TARGET_NR_flock
:
6671 /* NOTE: the flock constant seems to be the same for every
6673 ret
= get_errno(flock(arg1
, arg2
));
6675 case TARGET_NR_readv
:
6680 vec
= alloca(count
* sizeof(struct iovec
));
6681 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6683 ret
= get_errno(readv(arg1
, vec
, count
));
6684 unlock_iovec(vec
, arg2
, count
, 1);
6687 case TARGET_NR_writev
:
6692 vec
= alloca(count
* sizeof(struct iovec
));
6693 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6695 ret
= get_errno(writev(arg1
, vec
, count
));
6696 unlock_iovec(vec
, arg2
, count
, 0);
6699 case TARGET_NR_getsid
:
6700 ret
= get_errno(getsid(arg1
));
6702 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6703 case TARGET_NR_fdatasync
:
6704 ret
= get_errno(fdatasync(arg1
));
6707 case TARGET_NR__sysctl
:
6708 /* We don't implement this, but ENOTDIR is always a safe
6710 ret
= -TARGET_ENOTDIR
;
6712 case TARGET_NR_sched_getaffinity
:
6714 unsigned int mask_size
;
6715 unsigned long *mask
;
6718 * sched_getaffinity needs multiples of ulong, so need to take
6719 * care of mismatches between target ulong and host ulong sizes.
6721 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6722 ret
= -TARGET_EINVAL
;
6725 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6727 mask
= alloca(mask_size
);
6728 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
6730 if (!is_error(ret
)) {
6731 if (copy_to_user(arg3
, mask
, ret
)) {
6737 case TARGET_NR_sched_setaffinity
:
6739 unsigned int mask_size
;
6740 unsigned long *mask
;
6743 * sched_setaffinity needs multiples of ulong, so need to take
6744 * care of mismatches between target ulong and host ulong sizes.
6746 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6747 ret
= -TARGET_EINVAL
;
6750 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6752 mask
= alloca(mask_size
);
6753 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
6756 memcpy(mask
, p
, arg2
);
6757 unlock_user_struct(p
, arg2
, 0);
6759 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
6762 case TARGET_NR_sched_setparam
:
6764 struct sched_param
*target_schp
;
6765 struct sched_param schp
;
6767 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6769 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6770 unlock_user_struct(target_schp
, arg2
, 0);
6771 ret
= get_errno(sched_setparam(arg1
, &schp
));
6774 case TARGET_NR_sched_getparam
:
6776 struct sched_param
*target_schp
;
6777 struct sched_param schp
;
6778 ret
= get_errno(sched_getparam(arg1
, &schp
));
6779 if (!is_error(ret
)) {
6780 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6782 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6783 unlock_user_struct(target_schp
, arg2
, 1);
6787 case TARGET_NR_sched_setscheduler
:
6789 struct sched_param
*target_schp
;
6790 struct sched_param schp
;
6791 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6793 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6794 unlock_user_struct(target_schp
, arg3
, 0);
6795 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6798 case TARGET_NR_sched_getscheduler
:
6799 ret
= get_errno(sched_getscheduler(arg1
));
6801 case TARGET_NR_sched_yield
:
6802 ret
= get_errno(sched_yield());
6804 case TARGET_NR_sched_get_priority_max
:
6805 ret
= get_errno(sched_get_priority_max(arg1
));
6807 case TARGET_NR_sched_get_priority_min
:
6808 ret
= get_errno(sched_get_priority_min(arg1
));
6810 case TARGET_NR_sched_rr_get_interval
:
6813 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6814 if (!is_error(ret
)) {
6815 host_to_target_timespec(arg2
, &ts
);
6819 case TARGET_NR_nanosleep
:
6821 struct timespec req
, rem
;
6822 target_to_host_timespec(&req
, arg1
);
6823 ret
= get_errno(nanosleep(&req
, &rem
));
6824 if (is_error(ret
) && arg2
) {
6825 host_to_target_timespec(arg2
, &rem
);
6829 #ifdef TARGET_NR_query_module
6830 case TARGET_NR_query_module
:
6833 #ifdef TARGET_NR_nfsservctl
6834 case TARGET_NR_nfsservctl
:
6837 case TARGET_NR_prctl
:
6840 case PR_GET_PDEATHSIG
:
6843 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6844 if (!is_error(ret
) && arg2
6845 && put_user_ual(deathsig
, arg2
))
6850 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6854 #ifdef TARGET_NR_arch_prctl
6855 case TARGET_NR_arch_prctl
:
6856 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6857 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6863 #ifdef TARGET_NR_pread
6864 case TARGET_NR_pread
:
6865 if (regpairs_aligned(cpu_env
))
6867 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6869 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6870 unlock_user(p
, arg2
, ret
);
6872 case TARGET_NR_pwrite
:
6873 if (regpairs_aligned(cpu_env
))
6875 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6877 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6878 unlock_user(p
, arg2
, 0);
6881 #ifdef TARGET_NR_pread64
6882 case TARGET_NR_pread64
:
6883 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6885 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6886 unlock_user(p
, arg2
, ret
);
6888 case TARGET_NR_pwrite64
:
6889 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6891 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6892 unlock_user(p
, arg2
, 0);
6895 case TARGET_NR_getcwd
:
6896 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6898 ret
= get_errno(sys_getcwd1(p
, arg2
));
6899 unlock_user(p
, arg1
, ret
);
6901 case TARGET_NR_capget
:
6903 case TARGET_NR_capset
:
6905 case TARGET_NR_sigaltstack
:
6906 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6907 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6908 defined(TARGET_M68K) || defined(TARGET_S390X)
6909 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6914 case TARGET_NR_sendfile
:
6916 #ifdef TARGET_NR_getpmsg
6917 case TARGET_NR_getpmsg
:
6920 #ifdef TARGET_NR_putpmsg
6921 case TARGET_NR_putpmsg
:
6924 #ifdef TARGET_NR_vfork
6925 case TARGET_NR_vfork
:
6926 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6930 #ifdef TARGET_NR_ugetrlimit
6931 case TARGET_NR_ugetrlimit
:
6934 int resource
= target_to_host_resource(arg1
);
6935 ret
= get_errno(getrlimit(resource
, &rlim
));
6936 if (!is_error(ret
)) {
6937 struct target_rlimit
*target_rlim
;
6938 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6940 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6941 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6942 unlock_user_struct(target_rlim
, arg2
, 1);
6947 #ifdef TARGET_NR_truncate64
6948 case TARGET_NR_truncate64
:
6949 if (!(p
= lock_user_string(arg1
)))
6951 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6952 unlock_user(p
, arg1
, 0);
6955 #ifdef TARGET_NR_ftruncate64
6956 case TARGET_NR_ftruncate64
:
6957 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6960 #ifdef TARGET_NR_stat64
6961 case TARGET_NR_stat64
:
6962 if (!(p
= lock_user_string(arg1
)))
6964 ret
= get_errno(stat(path(p
), &st
));
6965 unlock_user(p
, arg1
, 0);
6967 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6970 #ifdef TARGET_NR_lstat64
6971 case TARGET_NR_lstat64
:
6972 if (!(p
= lock_user_string(arg1
)))
6974 ret
= get_errno(lstat(path(p
), &st
));
6975 unlock_user(p
, arg1
, 0);
6977 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6980 #ifdef TARGET_NR_fstat64
6981 case TARGET_NR_fstat64
:
6982 ret
= get_errno(fstat(arg1
, &st
));
6984 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6987 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6988 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6989 #ifdef TARGET_NR_fstatat64
6990 case TARGET_NR_fstatat64
:
6992 #ifdef TARGET_NR_newfstatat
6993 case TARGET_NR_newfstatat
:
6995 if (!(p
= lock_user_string(arg2
)))
6997 #ifdef __NR_fstatat64
6998 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7000 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7003 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7006 case TARGET_NR_lchown
:
7007 if (!(p
= lock_user_string(arg1
)))
7009 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7010 unlock_user(p
, arg1
, 0);
7012 #ifdef TARGET_NR_getuid
7013 case TARGET_NR_getuid
:
7014 ret
= get_errno(high2lowuid(getuid()));
7017 #ifdef TARGET_NR_getgid
7018 case TARGET_NR_getgid
:
7019 ret
= get_errno(high2lowgid(getgid()));
7022 #ifdef TARGET_NR_geteuid
7023 case TARGET_NR_geteuid
:
7024 ret
= get_errno(high2lowuid(geteuid()));
7027 #ifdef TARGET_NR_getegid
7028 case TARGET_NR_getegid
:
7029 ret
= get_errno(high2lowgid(getegid()));
7032 case TARGET_NR_setreuid
:
7033 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7035 case TARGET_NR_setregid
:
7036 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7038 case TARGET_NR_getgroups
:
7040 int gidsetsize
= arg1
;
7041 target_id
*target_grouplist
;
7045 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7046 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7047 if (gidsetsize
== 0)
7049 if (!is_error(ret
)) {
7050 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7051 if (!target_grouplist
)
7053 for(i
= 0;i
< ret
; i
++)
7054 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7055 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7059 case TARGET_NR_setgroups
:
7061 int gidsetsize
= arg1
;
7062 target_id
*target_grouplist
;
7066 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7067 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7068 if (!target_grouplist
) {
7069 ret
= -TARGET_EFAULT
;
7072 for(i
= 0;i
< gidsetsize
; i
++)
7073 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7074 unlock_user(target_grouplist
, arg2
, 0);
7075 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7078 case TARGET_NR_fchown
:
7079 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7081 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7082 case TARGET_NR_fchownat
:
7083 if (!(p
= lock_user_string(arg2
)))
7085 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7086 unlock_user(p
, arg2
, 0);
7089 #ifdef TARGET_NR_setresuid
7090 case TARGET_NR_setresuid
:
7091 ret
= get_errno(setresuid(low2highuid(arg1
),
7093 low2highuid(arg3
)));
7096 #ifdef TARGET_NR_getresuid
7097 case TARGET_NR_getresuid
:
7099 uid_t ruid
, euid
, suid
;
7100 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7101 if (!is_error(ret
)) {
7102 if (put_user_u16(high2lowuid(ruid
), arg1
)
7103 || put_user_u16(high2lowuid(euid
), arg2
)
7104 || put_user_u16(high2lowuid(suid
), arg3
))
7110 #ifdef TARGET_NR_getresgid
7111 case TARGET_NR_setresgid
:
7112 ret
= get_errno(setresgid(low2highgid(arg1
),
7114 low2highgid(arg3
)));
7117 #ifdef TARGET_NR_getresgid
7118 case TARGET_NR_getresgid
:
7120 gid_t rgid
, egid
, sgid
;
7121 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7122 if (!is_error(ret
)) {
7123 if (put_user_u16(high2lowgid(rgid
), arg1
)
7124 || put_user_u16(high2lowgid(egid
), arg2
)
7125 || put_user_u16(high2lowgid(sgid
), arg3
))
7131 case TARGET_NR_chown
:
7132 if (!(p
= lock_user_string(arg1
)))
7134 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7135 unlock_user(p
, arg1
, 0);
7137 case TARGET_NR_setuid
:
7138 ret
= get_errno(setuid(low2highuid(arg1
)));
7140 case TARGET_NR_setgid
:
7141 ret
= get_errno(setgid(low2highgid(arg1
)));
7143 case TARGET_NR_setfsuid
:
7144 ret
= get_errno(setfsuid(arg1
));
7146 case TARGET_NR_setfsgid
:
7147 ret
= get_errno(setfsgid(arg1
));
7150 #ifdef TARGET_NR_lchown32
7151 case TARGET_NR_lchown32
:
7152 if (!(p
= lock_user_string(arg1
)))
7154 ret
= get_errno(lchown(p
, arg2
, arg3
));
7155 unlock_user(p
, arg1
, 0);
7158 #ifdef TARGET_NR_getuid32
7159 case TARGET_NR_getuid32
:
7160 ret
= get_errno(getuid());
7164 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7165 /* Alpha specific */
7166 case TARGET_NR_getxuid
:
7170 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7172 ret
= get_errno(getuid());
7175 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7176 /* Alpha specific */
7177 case TARGET_NR_getxgid
:
7181 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7183 ret
= get_errno(getgid());
7186 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7187 /* Alpha specific */
7188 case TARGET_NR_osf_getsysinfo
:
7189 ret
= -TARGET_EOPNOTSUPP
;
7191 case TARGET_GSI_IEEE_FP_CONTROL
:
7193 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7195 /* Copied from linux ieee_fpcr_to_swcr. */
7196 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7197 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7198 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7199 | SWCR_TRAP_ENABLE_DZE
7200 | SWCR_TRAP_ENABLE_OVF
);
7201 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7202 | SWCR_TRAP_ENABLE_INE
);
7203 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7204 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7206 if (put_user_u64 (swcr
, arg2
))
7212 /* case GSI_IEEE_STATE_AT_SIGNAL:
7213 -- Not implemented in linux kernel.
7215 -- Retrieves current unaligned access state; not much used.
7217 -- Retrieves implver information; surely not used.
7219 -- Grabs a copy of the HWRPB; surely not used.
7224 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7225 /* Alpha specific */
7226 case TARGET_NR_osf_setsysinfo
:
7227 ret
= -TARGET_EOPNOTSUPP
;
7229 case TARGET_SSI_IEEE_FP_CONTROL
:
7230 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7232 uint64_t swcr
, fpcr
, orig_fpcr
;
7234 if (get_user_u64 (swcr
, arg2
))
7236 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7237 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7239 /* Copied from linux ieee_swcr_to_fpcr. */
7240 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7241 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7242 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7243 | SWCR_TRAP_ENABLE_DZE
7244 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7245 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7246 | SWCR_TRAP_ENABLE_INE
)) << 57;
7247 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7248 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7250 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
7253 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
7254 /* Old exceptions are not signaled. */
7255 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7257 /* If any exceptions set by this call, and are unmasked,
7264 /* case SSI_NVPAIRS:
7265 -- Used with SSIN_UACPROC to enable unaligned accesses.
7266 case SSI_IEEE_STATE_AT_SIGNAL:
7267 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7268 -- Not implemented in linux kernel
7273 #ifdef TARGET_NR_osf_sigprocmask
7274 /* Alpha specific. */
7275 case TARGET_NR_osf_sigprocmask
:
7279 sigset_t set
, oldset
;
7282 case TARGET_SIG_BLOCK
:
7285 case TARGET_SIG_UNBLOCK
:
7288 case TARGET_SIG_SETMASK
:
7292 ret
= -TARGET_EINVAL
;
7296 target_to_host_old_sigset(&set
, &mask
);
7297 sigprocmask(how
, &set
, &oldset
);
7298 host_to_target_old_sigset(&mask
, &oldset
);
7304 #ifdef TARGET_NR_getgid32
7305 case TARGET_NR_getgid32
:
7306 ret
= get_errno(getgid());
7309 #ifdef TARGET_NR_geteuid32
7310 case TARGET_NR_geteuid32
:
7311 ret
= get_errno(geteuid());
7314 #ifdef TARGET_NR_getegid32
7315 case TARGET_NR_getegid32
:
7316 ret
= get_errno(getegid());
7319 #ifdef TARGET_NR_setreuid32
7320 case TARGET_NR_setreuid32
:
7321 ret
= get_errno(setreuid(arg1
, arg2
));
7324 #ifdef TARGET_NR_setregid32
7325 case TARGET_NR_setregid32
:
7326 ret
= get_errno(setregid(arg1
, arg2
));
7329 #ifdef TARGET_NR_getgroups32
7330 case TARGET_NR_getgroups32
:
7332 int gidsetsize
= arg1
;
7333 uint32_t *target_grouplist
;
7337 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7338 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7339 if (gidsetsize
== 0)
7341 if (!is_error(ret
)) {
7342 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7343 if (!target_grouplist
) {
7344 ret
= -TARGET_EFAULT
;
7347 for(i
= 0;i
< ret
; i
++)
7348 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7349 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7354 #ifdef TARGET_NR_setgroups32
7355 case TARGET_NR_setgroups32
:
7357 int gidsetsize
= arg1
;
7358 uint32_t *target_grouplist
;
7362 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7363 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7364 if (!target_grouplist
) {
7365 ret
= -TARGET_EFAULT
;
7368 for(i
= 0;i
< gidsetsize
; i
++)
7369 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7370 unlock_user(target_grouplist
, arg2
, 0);
7371 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7375 #ifdef TARGET_NR_fchown32
7376 case TARGET_NR_fchown32
:
7377 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7380 #ifdef TARGET_NR_setresuid32
7381 case TARGET_NR_setresuid32
:
7382 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7385 #ifdef TARGET_NR_getresuid32
7386 case TARGET_NR_getresuid32
:
7388 uid_t ruid
, euid
, suid
;
7389 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7390 if (!is_error(ret
)) {
7391 if (put_user_u32(ruid
, arg1
)
7392 || put_user_u32(euid
, arg2
)
7393 || put_user_u32(suid
, arg3
))
7399 #ifdef TARGET_NR_setresgid32
7400 case TARGET_NR_setresgid32
:
7401 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7404 #ifdef TARGET_NR_getresgid32
7405 case TARGET_NR_getresgid32
:
7407 gid_t rgid
, egid
, sgid
;
7408 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7409 if (!is_error(ret
)) {
7410 if (put_user_u32(rgid
, arg1
)
7411 || put_user_u32(egid
, arg2
)
7412 || put_user_u32(sgid
, arg3
))
7418 #ifdef TARGET_NR_chown32
7419 case TARGET_NR_chown32
:
7420 if (!(p
= lock_user_string(arg1
)))
7422 ret
= get_errno(chown(p
, arg2
, arg3
));
7423 unlock_user(p
, arg1
, 0);
7426 #ifdef TARGET_NR_setuid32
7427 case TARGET_NR_setuid32
:
7428 ret
= get_errno(setuid(arg1
));
7431 #ifdef TARGET_NR_setgid32
7432 case TARGET_NR_setgid32
:
7433 ret
= get_errno(setgid(arg1
));
7436 #ifdef TARGET_NR_setfsuid32
7437 case TARGET_NR_setfsuid32
:
7438 ret
= get_errno(setfsuid(arg1
));
7441 #ifdef TARGET_NR_setfsgid32
7442 case TARGET_NR_setfsgid32
:
7443 ret
= get_errno(setfsgid(arg1
));
7447 case TARGET_NR_pivot_root
:
7449 #ifdef TARGET_NR_mincore
7450 case TARGET_NR_mincore
:
7453 ret
= -TARGET_EFAULT
;
7454 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7456 if (!(p
= lock_user_string(arg3
)))
7458 ret
= get_errno(mincore(a
, arg2
, p
));
7459 unlock_user(p
, arg3
, ret
);
7461 unlock_user(a
, arg1
, 0);
7465 #ifdef TARGET_NR_arm_fadvise64_64
7466 case TARGET_NR_arm_fadvise64_64
:
7469 * arm_fadvise64_64 looks like fadvise64_64 but
7470 * with different argument order
7478 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7479 #ifdef TARGET_NR_fadvise64_64
7480 case TARGET_NR_fadvise64_64
:
7482 #ifdef TARGET_NR_fadvise64
7483 case TARGET_NR_fadvise64
:
7487 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7488 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7489 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7490 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7494 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7497 #ifdef TARGET_NR_madvise
7498 case TARGET_NR_madvise
:
7499 /* A straight passthrough may not be safe because qemu sometimes
7500 turns private flie-backed mappings into anonymous mappings.
7501 This will break MADV_DONTNEED.
7502 This is a hint, so ignoring and returning success is ok. */
7506 #if TARGET_ABI_BITS == 32
7507 case TARGET_NR_fcntl64
:
7511 struct target_flock64
*target_fl
;
7513 struct target_eabi_flock64
*target_efl
;
7516 cmd
= target_to_host_fcntl_cmd(arg2
);
7517 if (cmd
== -TARGET_EINVAL
)
7521 case TARGET_F_GETLK64
:
7523 if (((CPUARMState
*)cpu_env
)->eabi
) {
7524 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7526 fl
.l_type
= tswap16(target_efl
->l_type
);
7527 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7528 fl
.l_start
= tswap64(target_efl
->l_start
);
7529 fl
.l_len
= tswap64(target_efl
->l_len
);
7530 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7531 unlock_user_struct(target_efl
, arg3
, 0);
7535 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7537 fl
.l_type
= tswap16(target_fl
->l_type
);
7538 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7539 fl
.l_start
= tswap64(target_fl
->l_start
);
7540 fl
.l_len
= tswap64(target_fl
->l_len
);
7541 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7542 unlock_user_struct(target_fl
, arg3
, 0);
7544 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7547 if (((CPUARMState
*)cpu_env
)->eabi
) {
7548 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7550 target_efl
->l_type
= tswap16(fl
.l_type
);
7551 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7552 target_efl
->l_start
= tswap64(fl
.l_start
);
7553 target_efl
->l_len
= tswap64(fl
.l_len
);
7554 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7555 unlock_user_struct(target_efl
, arg3
, 1);
7559 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7561 target_fl
->l_type
= tswap16(fl
.l_type
);
7562 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7563 target_fl
->l_start
= tswap64(fl
.l_start
);
7564 target_fl
->l_len
= tswap64(fl
.l_len
);
7565 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7566 unlock_user_struct(target_fl
, arg3
, 1);
7571 case TARGET_F_SETLK64
:
7572 case TARGET_F_SETLKW64
:
7574 if (((CPUARMState
*)cpu_env
)->eabi
) {
7575 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7577 fl
.l_type
= tswap16(target_efl
->l_type
);
7578 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7579 fl
.l_start
= tswap64(target_efl
->l_start
);
7580 fl
.l_len
= tswap64(target_efl
->l_len
);
7581 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7582 unlock_user_struct(target_efl
, arg3
, 0);
7586 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7588 fl
.l_type
= tswap16(target_fl
->l_type
);
7589 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7590 fl
.l_start
= tswap64(target_fl
->l_start
);
7591 fl
.l_len
= tswap64(target_fl
->l_len
);
7592 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7593 unlock_user_struct(target_fl
, arg3
, 0);
7595 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7598 ret
= do_fcntl(arg1
, arg2
, arg3
);
7604 #ifdef TARGET_NR_cacheflush
7605 case TARGET_NR_cacheflush
:
7606 /* self-modifying code is handled automatically, so nothing needed */
7610 #ifdef TARGET_NR_security
7611 case TARGET_NR_security
:
7614 #ifdef TARGET_NR_getpagesize
7615 case TARGET_NR_getpagesize
:
7616 ret
= TARGET_PAGE_SIZE
;
7619 case TARGET_NR_gettid
:
7620 ret
= get_errno(gettid());
7622 #ifdef TARGET_NR_readahead
7623 case TARGET_NR_readahead
:
7624 #if TARGET_ABI_BITS == 32
7625 if (regpairs_aligned(cpu_env
)) {
7630 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7632 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7636 #ifdef TARGET_NR_setxattr
7637 case TARGET_NR_setxattr
:
7638 case TARGET_NR_lsetxattr
:
7639 case TARGET_NR_fsetxattr
:
7640 case TARGET_NR_getxattr
:
7641 case TARGET_NR_lgetxattr
:
7642 case TARGET_NR_fgetxattr
:
7643 case TARGET_NR_listxattr
:
7644 case TARGET_NR_llistxattr
:
7645 case TARGET_NR_flistxattr
:
7646 case TARGET_NR_removexattr
:
7647 case TARGET_NR_lremovexattr
:
7648 case TARGET_NR_fremovexattr
:
7649 ret
= -TARGET_EOPNOTSUPP
;
7652 #ifdef TARGET_NR_set_thread_area
7653 case TARGET_NR_set_thread_area
:
7654 #if defined(TARGET_MIPS)
7655 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7658 #elif defined(TARGET_CRIS)
7660 ret
= -TARGET_EINVAL
;
7662 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7666 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7667 ret
= do_set_thread_area(cpu_env
, arg1
);
7670 goto unimplemented_nowarn
;
7673 #ifdef TARGET_NR_get_thread_area
7674 case TARGET_NR_get_thread_area
:
7675 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7676 ret
= do_get_thread_area(cpu_env
, arg1
);
7678 goto unimplemented_nowarn
;
7681 #ifdef TARGET_NR_getdomainname
7682 case TARGET_NR_getdomainname
:
7683 goto unimplemented_nowarn
;
7686 #ifdef TARGET_NR_clock_gettime
7687 case TARGET_NR_clock_gettime
:
7690 ret
= get_errno(clock_gettime(arg1
, &ts
));
7691 if (!is_error(ret
)) {
7692 host_to_target_timespec(arg2
, &ts
);
7697 #ifdef TARGET_NR_clock_getres
7698 case TARGET_NR_clock_getres
:
7701 ret
= get_errno(clock_getres(arg1
, &ts
));
7702 if (!is_error(ret
)) {
7703 host_to_target_timespec(arg2
, &ts
);
7708 #ifdef TARGET_NR_clock_nanosleep
7709 case TARGET_NR_clock_nanosleep
:
7712 target_to_host_timespec(&ts
, arg3
);
7713 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7715 host_to_target_timespec(arg4
, &ts
);
7720 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7721 case TARGET_NR_set_tid_address
:
7722 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7726 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7727 case TARGET_NR_tkill
:
7728 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7732 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7733 case TARGET_NR_tgkill
:
7734 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7735 target_to_host_signal(arg3
)));
7739 #ifdef TARGET_NR_set_robust_list
7740 case TARGET_NR_set_robust_list
:
7741 goto unimplemented_nowarn
;
7744 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7745 case TARGET_NR_utimensat
:
7747 struct timespec
*tsp
, ts
[2];
7751 target_to_host_timespec(ts
, arg3
);
7752 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7756 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7758 if (!(p
= lock_user_string(arg2
))) {
7759 ret
= -TARGET_EFAULT
;
7762 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7763 unlock_user(p
, arg2
, 0);
7768 #if defined(CONFIG_USE_NPTL)
7769 case TARGET_NR_futex
:
7770 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7773 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7774 case TARGET_NR_inotify_init
:
7775 ret
= get_errno(sys_inotify_init());
7778 #ifdef CONFIG_INOTIFY1
7779 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7780 case TARGET_NR_inotify_init1
:
7781 ret
= get_errno(sys_inotify_init1(arg1
));
7785 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7786 case TARGET_NR_inotify_add_watch
:
7787 p
= lock_user_string(arg2
);
7788 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7789 unlock_user(p
, arg2
, 0);
7792 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7793 case TARGET_NR_inotify_rm_watch
:
7794 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7798 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7799 case TARGET_NR_mq_open
:
7801 struct mq_attr posix_mq_attr
;
7803 p
= lock_user_string(arg1
- 1);
7805 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7806 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7807 unlock_user (p
, arg1
, 0);
7811 case TARGET_NR_mq_unlink
:
7812 p
= lock_user_string(arg1
- 1);
7813 ret
= get_errno(mq_unlink(p
));
7814 unlock_user (p
, arg1
, 0);
7817 case TARGET_NR_mq_timedsend
:
7821 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7823 target_to_host_timespec(&ts
, arg5
);
7824 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7825 host_to_target_timespec(arg5
, &ts
);
7828 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7829 unlock_user (p
, arg2
, arg3
);
7833 case TARGET_NR_mq_timedreceive
:
7838 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7840 target_to_host_timespec(&ts
, arg5
);
7841 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7842 host_to_target_timespec(arg5
, &ts
);
7845 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7846 unlock_user (p
, arg2
, arg3
);
7848 put_user_u32(prio
, arg4
);
7852 /* Not implemented for now... */
7853 /* case TARGET_NR_mq_notify: */
7856 case TARGET_NR_mq_getsetattr
:
7858 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7861 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7862 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7865 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7866 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7873 #ifdef CONFIG_SPLICE
7874 #ifdef TARGET_NR_tee
7877 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7881 #ifdef TARGET_NR_splice
7882 case TARGET_NR_splice
:
7884 loff_t loff_in
, loff_out
;
7885 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7887 get_user_u64(loff_in
, arg2
);
7888 ploff_in
= &loff_in
;
7891 get_user_u64(loff_out
, arg2
);
7892 ploff_out
= &loff_out
;
7894 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7898 #ifdef TARGET_NR_vmsplice
7899 case TARGET_NR_vmsplice
:
7904 vec
= alloca(count
* sizeof(struct iovec
));
7905 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7907 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7908 unlock_iovec(vec
, arg2
, count
, 0);
7912 #endif /* CONFIG_SPLICE */
7913 #ifdef CONFIG_EVENTFD
7914 #if defined(TARGET_NR_eventfd)
7915 case TARGET_NR_eventfd
:
7916 ret
= get_errno(eventfd(arg1
, 0));
7919 #if defined(TARGET_NR_eventfd2)
7920 case TARGET_NR_eventfd2
:
7921 ret
= get_errno(eventfd(arg1
, arg2
));
7924 #endif /* CONFIG_EVENTFD */
7925 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7926 case TARGET_NR_fallocate
:
7927 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7930 #if defined(CONFIG_SYNC_FILE_RANGE)
7931 #if defined(TARGET_NR_sync_file_range)
7932 case TARGET_NR_sync_file_range
:
7933 #if TARGET_ABI_BITS == 32
7934 #if defined(TARGET_MIPS)
7935 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7936 target_offset64(arg5
, arg6
), arg7
));
7938 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
7939 target_offset64(arg4
, arg5
), arg6
));
7940 #endif /* !TARGET_MIPS */
7942 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
7946 #if defined(TARGET_NR_sync_file_range2)
7947 case TARGET_NR_sync_file_range2
:
7948 /* This is like sync_file_range but the arguments are reordered */
7949 #if TARGET_ABI_BITS == 32
7950 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7951 target_offset64(arg5
, arg6
), arg2
));
7953 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
7958 #if defined(CONFIG_EPOLL)
7959 #if defined(TARGET_NR_epoll_create)
7960 case TARGET_NR_epoll_create
:
7961 ret
= get_errno(epoll_create(arg1
));
7964 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7965 case TARGET_NR_epoll_create1
:
7966 ret
= get_errno(epoll_create1(arg1
));
7969 #if defined(TARGET_NR_epoll_ctl)
7970 case TARGET_NR_epoll_ctl
:
7972 struct epoll_event ep
;
7973 struct epoll_event
*epp
= 0;
7975 struct target_epoll_event
*target_ep
;
7976 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
7979 ep
.events
= tswap32(target_ep
->events
);
7980 /* The epoll_data_t union is just opaque data to the kernel,
7981 * so we transfer all 64 bits across and need not worry what
7982 * actual data type it is.
7984 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
7985 unlock_user_struct(target_ep
, arg4
, 0);
7988 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
7993 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7994 #define IMPLEMENT_EPOLL_PWAIT
7996 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7997 #if defined(TARGET_NR_epoll_wait)
7998 case TARGET_NR_epoll_wait
:
8000 #if defined(IMPLEMENT_EPOLL_PWAIT)
8001 case TARGET_NR_epoll_pwait
:
8004 struct target_epoll_event
*target_ep
;
8005 struct epoll_event
*ep
;
8007 int maxevents
= arg3
;
8010 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8011 maxevents
* sizeof(struct target_epoll_event
), 1);
8016 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8019 #if defined(IMPLEMENT_EPOLL_PWAIT)
8020 case TARGET_NR_epoll_pwait
:
8022 target_sigset_t
*target_set
;
8023 sigset_t _set
, *set
= &_set
;
8026 target_set
= lock_user(VERIFY_READ
, arg5
,
8027 sizeof(target_sigset_t
), 1);
8029 unlock_user(target_ep
, arg2
, 0);
8032 target_to_host_sigset(set
, target_set
);
8033 unlock_user(target_set
, arg5
, 0);
8038 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8042 #if defined(TARGET_NR_epoll_wait)
8043 case TARGET_NR_epoll_wait
:
8044 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8048 ret
= -TARGET_ENOSYS
;
8050 if (!is_error(ret
)) {
8052 for (i
= 0; i
< ret
; i
++) {
8053 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8054 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8057 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8062 #ifdef TARGET_NR_prlimit64
8063 case TARGET_NR_prlimit64
:
8065 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8066 struct target_rlimit64
*target_rnew
, *target_rold
;
8067 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8069 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8072 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8073 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8074 unlock_user_struct(target_rnew
, arg3
, 0);
8078 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8079 if (!is_error(ret
) && arg4
) {
8080 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8083 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8084 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8085 unlock_user_struct(target_rold
, arg4
, 1);
8092 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8093 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8094 unimplemented_nowarn
:
8096 ret
= -TARGET_ENOSYS
;
8101 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8104 print_syscall_ret(num
, ret
);
8107 ret
= -TARGET_EFAULT
;