4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
88 #include <linux/mtio.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
202 #define __NR__llseek __NR_lseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
218 _syscall2(int, sys_getpriority
, int, which
, int, who
);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
221 loff_t
*, res
, uint
, wh
);
223 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
224 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group
,int,error_code
)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address
,int *,tidptr
)
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
240 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
245 unsigned long *, user_mask_ptr
);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
250 static bitmask_transtbl fcntl_flags_tbl
[] = {
251 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
252 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
253 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
254 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
255 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
256 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
257 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
258 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
259 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
260 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
261 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
262 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
263 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
270 #define COPY_UTSNAME_FIELD(dest, src) \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
277 static int sys_uname(struct new_utsname
*buf
)
279 struct utsname uts_buf
;
281 if (uname(&uts_buf
) < 0)
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 memset(buf
, 0, sizeof(*buf
));
291 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
292 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
293 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
294 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
295 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
297 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf
, size_t size
)
306 if (getcwd(buf
, size
) == NULL
) {
307 /* getcwd() sets errno */
310 return strlen(buf
)+1;
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
322 return (faccessat(dirfd
, pathname
, mode
, 0));
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
328 return (fchmodat(dirfd
, pathname
, mode
, 0));
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
333 gid_t group
, int flags
)
335 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
342 return (fstatat(dirfd
, pathname
, buf
, flags
));
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
349 return (fstatat(dirfd
, pathname
, buf
, flags
));
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd
, const char *pathname
,
354 const struct timeval times
[2])
356 return (futimesat(dirfd
, pathname
, times
));
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd
, const char *oldpath
,
361 int newdirfd
, const char *newpath
, int flags
)
363 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
369 return (mkdirat(dirfd
, pathname
, mode
));
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
376 return (mknodat(dirfd
, pathname
, mode
, dev
));
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
383 * open(2) has extra parameter 'mode' when called with
386 if ((flags
& O_CREAT
) != 0) {
391 * Get the 'mode' parameter and translate it to
395 mode
= va_arg(ap
, mode_t
);
396 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
399 return (openat(dirfd
, pathname
, flags
, mode
));
401 return (openat(dirfd
, pathname
, flags
));
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
407 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd
, const char *oldpath
,
412 int newdirfd
, const char *newpath
)
414 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
420 return (symlinkat(oldpath
, newdirfd
, newpath
));
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
426 return (unlinkat(dirfd
, pathname
, flags
));
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
442 uid_t
,owner
,gid_t
,group
,int,flags
)
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
447 struct stat
*,buf
,int,flags
)
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
451 const struct timeval
*,times
)
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
456 struct stat
*,buf
,int,flags
)
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
460 int,newdirfd
,const char *,newpath
,int,flags
)
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
467 mode_t
,mode
,dev_t
,dev
)
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
474 char *,buf
,size_t,bufsize
)
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
478 int,newdirfd
,const char *,newpath
)
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
482 int,newdirfd
,const char *,newpath
)
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd
, const char *pathname
,
492 const struct timespec times
[2], int flags
)
494 if (pathname
== NULL
)
495 return futimens(dirfd
, times
);
497 return utimensat(dirfd
, pathname
, times
, flags
);
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
502 const struct timespec
*,tsp
,int,flags
)
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
518 return (inotify_add_watch(fd
, pathname
, mask
));
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
524 return (inotify_rm_watch(fd
, wd
));
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags
)
531 return (inotify_init1(flags
));
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
545 # define __NR_ppoll -1
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
549 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
553 #if defined(TARGET_NR_pselect6)
554 #ifndef __NR_pselect6
555 # define __NR_pselect6 -1
557 #define __NR_sys_pselect6 __NR_pselect6
558 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
559 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
562 #if defined(TARGET_NR_prlimit64)
563 #ifndef __NR_prlimit64
564 # define __NR_prlimit64 -1
566 #define __NR_sys_prlimit64 __NR_prlimit64
567 /* The glibc rlimit structure may not be that used by the underlying syscall */
568 struct host_rlimit64
{
572 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
573 const struct host_rlimit64
*, new_limit
,
574 struct host_rlimit64
*, old_limit
)
577 extern int personality(int);
578 extern int flock(int, int);
579 extern int setfsuid(int);
580 extern int setfsgid(int);
581 extern int setgroups(int, gid_t
*);
583 #define ERRNO_TABLE_SIZE 1200
585 /* target_to_host_errno_table[] is initialized from
586 * host_to_target_errno_table[] in syscall_init(). */
587 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
591 * This list is the union of errno values overridden in asm-<arch>/errno.h
592 * minus the errnos that are not actually generic to all archs.
594 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
595 [EIDRM
] = TARGET_EIDRM
,
596 [ECHRNG
] = TARGET_ECHRNG
,
597 [EL2NSYNC
] = TARGET_EL2NSYNC
,
598 [EL3HLT
] = TARGET_EL3HLT
,
599 [EL3RST
] = TARGET_EL3RST
,
600 [ELNRNG
] = TARGET_ELNRNG
,
601 [EUNATCH
] = TARGET_EUNATCH
,
602 [ENOCSI
] = TARGET_ENOCSI
,
603 [EL2HLT
] = TARGET_EL2HLT
,
604 [EDEADLK
] = TARGET_EDEADLK
,
605 [ENOLCK
] = TARGET_ENOLCK
,
606 [EBADE
] = TARGET_EBADE
,
607 [EBADR
] = TARGET_EBADR
,
608 [EXFULL
] = TARGET_EXFULL
,
609 [ENOANO
] = TARGET_ENOANO
,
610 [EBADRQC
] = TARGET_EBADRQC
,
611 [EBADSLT
] = TARGET_EBADSLT
,
612 [EBFONT
] = TARGET_EBFONT
,
613 [ENOSTR
] = TARGET_ENOSTR
,
614 [ENODATA
] = TARGET_ENODATA
,
615 [ETIME
] = TARGET_ETIME
,
616 [ENOSR
] = TARGET_ENOSR
,
617 [ENONET
] = TARGET_ENONET
,
618 [ENOPKG
] = TARGET_ENOPKG
,
619 [EREMOTE
] = TARGET_EREMOTE
,
620 [ENOLINK
] = TARGET_ENOLINK
,
621 [EADV
] = TARGET_EADV
,
622 [ESRMNT
] = TARGET_ESRMNT
,
623 [ECOMM
] = TARGET_ECOMM
,
624 [EPROTO
] = TARGET_EPROTO
,
625 [EDOTDOT
] = TARGET_EDOTDOT
,
626 [EMULTIHOP
] = TARGET_EMULTIHOP
,
627 [EBADMSG
] = TARGET_EBADMSG
,
628 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
629 [EOVERFLOW
] = TARGET_EOVERFLOW
,
630 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
631 [EBADFD
] = TARGET_EBADFD
,
632 [EREMCHG
] = TARGET_EREMCHG
,
633 [ELIBACC
] = TARGET_ELIBACC
,
634 [ELIBBAD
] = TARGET_ELIBBAD
,
635 [ELIBSCN
] = TARGET_ELIBSCN
,
636 [ELIBMAX
] = TARGET_ELIBMAX
,
637 [ELIBEXEC
] = TARGET_ELIBEXEC
,
638 [EILSEQ
] = TARGET_EILSEQ
,
639 [ENOSYS
] = TARGET_ENOSYS
,
640 [ELOOP
] = TARGET_ELOOP
,
641 [ERESTART
] = TARGET_ERESTART
,
642 [ESTRPIPE
] = TARGET_ESTRPIPE
,
643 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
644 [EUSERS
] = TARGET_EUSERS
,
645 [ENOTSOCK
] = TARGET_ENOTSOCK
,
646 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
647 [EMSGSIZE
] = TARGET_EMSGSIZE
,
648 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
649 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
650 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
651 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
652 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
653 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
654 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
655 [EADDRINUSE
] = TARGET_EADDRINUSE
,
656 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
657 [ENETDOWN
] = TARGET_ENETDOWN
,
658 [ENETUNREACH
] = TARGET_ENETUNREACH
,
659 [ENETRESET
] = TARGET_ENETRESET
,
660 [ECONNABORTED
] = TARGET_ECONNABORTED
,
661 [ECONNRESET
] = TARGET_ECONNRESET
,
662 [ENOBUFS
] = TARGET_ENOBUFS
,
663 [EISCONN
] = TARGET_EISCONN
,
664 [ENOTCONN
] = TARGET_ENOTCONN
,
665 [EUCLEAN
] = TARGET_EUCLEAN
,
666 [ENOTNAM
] = TARGET_ENOTNAM
,
667 [ENAVAIL
] = TARGET_ENAVAIL
,
668 [EISNAM
] = TARGET_EISNAM
,
669 [EREMOTEIO
] = TARGET_EREMOTEIO
,
670 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
671 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
672 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
673 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
674 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
675 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
676 [EALREADY
] = TARGET_EALREADY
,
677 [EINPROGRESS
] = TARGET_EINPROGRESS
,
678 [ESTALE
] = TARGET_ESTALE
,
679 [ECANCELED
] = TARGET_ECANCELED
,
680 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
681 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
683 [ENOKEY
] = TARGET_ENOKEY
,
686 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
689 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
692 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
695 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
697 #ifdef ENOTRECOVERABLE
698 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
702 static inline int host_to_target_errno(int err
)
704 if(host_to_target_errno_table
[err
])
705 return host_to_target_errno_table
[err
];
709 static inline int target_to_host_errno(int err
)
711 if (target_to_host_errno_table
[err
])
712 return target_to_host_errno_table
[err
];
716 static inline abi_long
get_errno(abi_long ret
)
719 return -host_to_target_errno(errno
);
724 static inline int is_error(abi_long ret
)
726 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
729 char *target_strerror(int err
)
731 return strerror(target_to_host_errno(err
));
734 static abi_ulong target_brk
;
735 static abi_ulong target_original_brk
;
736 static abi_ulong brk_page
;
738 void target_set_brk(abi_ulong new_brk
)
740 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
741 brk_page
= HOST_PAGE_ALIGN(target_brk
);
744 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
745 #define DEBUGF_BRK(message, args...)
747 /* do_brk() must return target values and target errnos. */
748 abi_long
do_brk(abi_ulong new_brk
)
750 abi_long mapped_addr
;
753 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk
);
756 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk
);
759 if (new_brk
< target_original_brk
) {
760 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk
);
764 /* If the new brk is less than the highest page reserved to the
765 * target heap allocation, set it and we're almost done... */
766 if (new_brk
<= brk_page
) {
767 /* Heap contents are initialized to zero, as for anonymous
769 if (new_brk
> target_brk
) {
770 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
772 target_brk
= new_brk
;
773 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk
);
777 /* We need to allocate more memory after the brk... Note that
778 * we don't use MAP_FIXED because that will map over the top of
779 * any existing mapping (like the one with the host libc or qemu
780 * itself); instead we treat "mapped but at wrong address" as
781 * a failure and unmap again.
783 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
784 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
785 PROT_READ
|PROT_WRITE
,
786 MAP_ANON
|MAP_PRIVATE
, 0, 0));
788 if (mapped_addr
== brk_page
) {
789 target_brk
= new_brk
;
790 brk_page
= HOST_PAGE_ALIGN(target_brk
);
791 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk
);
793 } else if (mapped_addr
!= -1) {
794 /* Mapped but at wrong address, meaning there wasn't actually
795 * enough space for this brk.
797 target_munmap(mapped_addr
, new_alloc_size
);
799 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk
);
802 DEBUGF_BRK("%#010x (otherwise)\n", target_brk
);
805 #if defined(TARGET_ALPHA)
806 /* We (partially) emulate OSF/1 on Alpha, which requires we
807 return a proper errno, not an unchanged brk value. */
808 return -TARGET_ENOMEM
;
810 /* For everything else, return the previous break. */
814 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
815 abi_ulong target_fds_addr
,
819 abi_ulong b
, *target_fds
;
821 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
822 if (!(target_fds
= lock_user(VERIFY_READ
,
824 sizeof(abi_ulong
) * nw
,
826 return -TARGET_EFAULT
;
830 for (i
= 0; i
< nw
; i
++) {
831 /* grab the abi_ulong */
832 __get_user(b
, &target_fds
[i
]);
833 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
834 /* check the bit inside the abi_ulong */
841 unlock_user(target_fds
, target_fds_addr
, 0);
846 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
847 abi_ulong target_fds_addr
,
850 if (target_fds_addr
) {
851 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
852 return -TARGET_EFAULT
;
860 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
866 abi_ulong
*target_fds
;
868 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
869 if (!(target_fds
= lock_user(VERIFY_WRITE
,
871 sizeof(abi_ulong
) * nw
,
873 return -TARGET_EFAULT
;
876 for (i
= 0; i
< nw
; i
++) {
878 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
879 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
882 __put_user(v
, &target_fds
[i
]);
885 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
890 #if defined(__alpha__)
896 static inline abi_long
host_to_target_clock_t(long ticks
)
898 #if HOST_HZ == TARGET_HZ
901 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
905 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
906 const struct rusage
*rusage
)
908 struct target_rusage
*target_rusage
;
910 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
911 return -TARGET_EFAULT
;
912 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
913 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
914 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
915 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
916 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
917 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
918 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
919 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
920 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
921 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
922 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
923 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
924 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
925 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
926 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
927 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
928 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
929 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
930 unlock_user_struct(target_rusage
, target_addr
, 1);
935 static inline rlim_t
target_to_host_rlim(target_ulong target_rlim
)
937 if (target_rlim
== TARGET_RLIM_INFINITY
)
938 return RLIM_INFINITY
;
940 return tswapl(target_rlim
);
943 static inline target_ulong
host_to_target_rlim(rlim_t rlim
)
945 if (rlim
== RLIM_INFINITY
|| rlim
!= (target_long
)rlim
)
946 return TARGET_RLIM_INFINITY
;
951 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
952 abi_ulong target_tv_addr
)
954 struct target_timeval
*target_tv
;
956 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
957 return -TARGET_EFAULT
;
959 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
960 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
962 unlock_user_struct(target_tv
, target_tv_addr
, 0);
967 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
968 const struct timeval
*tv
)
970 struct target_timeval
*target_tv
;
972 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
973 return -TARGET_EFAULT
;
975 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
976 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
978 unlock_user_struct(target_tv
, target_tv_addr
, 1);
983 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
986 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
987 abi_ulong target_mq_attr_addr
)
989 struct target_mq_attr
*target_mq_attr
;
991 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
992 target_mq_attr_addr
, 1))
993 return -TARGET_EFAULT
;
995 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
996 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
997 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
998 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1000 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1005 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1006 const struct mq_attr
*attr
)
1008 struct target_mq_attr
*target_mq_attr
;
1010 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1011 target_mq_attr_addr
, 0))
1012 return -TARGET_EFAULT
;
1014 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1015 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1016 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1017 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1019 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1025 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1026 /* do_select() must return target values and target errnos. */
1027 static abi_long
do_select(int n
,
1028 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1029 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1031 fd_set rfds
, wfds
, efds
;
1032 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1033 struct timeval tv
, *tv_ptr
;
1036 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1040 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1044 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1049 if (target_tv_addr
) {
1050 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1051 return -TARGET_EFAULT
;
1057 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1059 if (!is_error(ret
)) {
1060 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1061 return -TARGET_EFAULT
;
1062 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1063 return -TARGET_EFAULT
;
1064 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1065 return -TARGET_EFAULT
;
1067 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1068 return -TARGET_EFAULT
;
1075 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1078 return pipe2(host_pipe
, flags
);
1084 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1085 int flags
, int is_pipe2
)
1089 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1092 return get_errno(ret
);
1094 /* Several targets have special calling conventions for the original
1095 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1097 #if defined(TARGET_ALPHA)
1098 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1099 return host_pipe
[0];
1100 #elif defined(TARGET_MIPS)
1101 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1102 return host_pipe
[0];
1103 #elif defined(TARGET_SH4)
1104 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1105 return host_pipe
[0];
1109 if (put_user_s32(host_pipe
[0], pipedes
)
1110 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1111 return -TARGET_EFAULT
;
1112 return get_errno(ret
);
1115 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1116 abi_ulong target_addr
,
1119 struct target_ip_mreqn
*target_smreqn
;
1121 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1123 return -TARGET_EFAULT
;
1124 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1125 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1126 if (len
== sizeof(struct target_ip_mreqn
))
1127 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
1128 unlock_user(target_smreqn
, target_addr
, 0);
1133 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1134 abi_ulong target_addr
,
1137 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1138 sa_family_t sa_family
;
1139 struct target_sockaddr
*target_saddr
;
1141 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1143 return -TARGET_EFAULT
;
1145 sa_family
= tswap16(target_saddr
->sa_family
);
1147 /* Oops. The caller might send a incomplete sun_path; sun_path
1148 * must be terminated by \0 (see the manual page), but
1149 * unfortunately it is quite common to specify sockaddr_un
1150 * length as "strlen(x->sun_path)" while it should be
1151 * "strlen(...) + 1". We'll fix that here if needed.
1152 * Linux kernel has a similar feature.
1155 if (sa_family
== AF_UNIX
) {
1156 if (len
< unix_maxlen
&& len
> 0) {
1157 char *cp
= (char*)target_saddr
;
1159 if ( cp
[len
-1] && !cp
[len
] )
1162 if (len
> unix_maxlen
)
1166 memcpy(addr
, target_saddr
, len
);
1167 addr
->sa_family
= sa_family
;
1168 unlock_user(target_saddr
, target_addr
, 0);
1173 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1174 struct sockaddr
*addr
,
1177 struct target_sockaddr
*target_saddr
;
1179 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1181 return -TARGET_EFAULT
;
1182 memcpy(target_saddr
, addr
, len
);
1183 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1184 unlock_user(target_saddr
, target_addr
, len
);
1189 /* ??? Should this also swap msgh->name? */
1190 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1191 struct target_msghdr
*target_msgh
)
1193 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1194 abi_long msg_controllen
;
1195 abi_ulong target_cmsg_addr
;
1196 struct target_cmsghdr
*target_cmsg
;
1197 socklen_t space
= 0;
1199 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1200 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1202 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1203 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1205 return -TARGET_EFAULT
;
1207 while (cmsg
&& target_cmsg
) {
1208 void *data
= CMSG_DATA(cmsg
);
1209 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1211 int len
= tswapl(target_cmsg
->cmsg_len
)
1212 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1214 space
+= CMSG_SPACE(len
);
1215 if (space
> msgh
->msg_controllen
) {
1216 space
-= CMSG_SPACE(len
);
1217 gemu_log("Host cmsg overflow\n");
1221 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1222 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1223 cmsg
->cmsg_len
= CMSG_LEN(len
);
1225 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1226 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1227 memcpy(data
, target_data
, len
);
1229 int *fd
= (int *)data
;
1230 int *target_fd
= (int *)target_data
;
1231 int i
, numfds
= len
/ sizeof(int);
1233 for (i
= 0; i
< numfds
; i
++)
1234 fd
[i
] = tswap32(target_fd
[i
]);
1237 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1238 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1240 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1242 msgh
->msg_controllen
= space
;
1246 /* ??? Should this also swap msgh->name? */
1247 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1248 struct msghdr
*msgh
)
1250 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1251 abi_long msg_controllen
;
1252 abi_ulong target_cmsg_addr
;
1253 struct target_cmsghdr
*target_cmsg
;
1254 socklen_t space
= 0;
1256 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1257 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1259 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1260 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1262 return -TARGET_EFAULT
;
1264 while (cmsg
&& target_cmsg
) {
1265 void *data
= CMSG_DATA(cmsg
);
1266 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1268 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1270 space
+= TARGET_CMSG_SPACE(len
);
1271 if (space
> msg_controllen
) {
1272 space
-= TARGET_CMSG_SPACE(len
);
1273 gemu_log("Target cmsg overflow\n");
1277 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1278 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1279 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1281 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1282 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1283 memcpy(target_data
, data
, len
);
1285 int *fd
= (int *)data
;
1286 int *target_fd
= (int *)target_data
;
1287 int i
, numfds
= len
/ sizeof(int);
1289 for (i
= 0; i
< numfds
; i
++)
1290 target_fd
[i
] = tswap32(fd
[i
]);
1293 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1294 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1296 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1298 target_msgh
->msg_controllen
= tswapl(space
);
1302 /* do_setsockopt() Must return target values and target errnos. */
1303 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1304 abi_ulong optval_addr
, socklen_t optlen
)
1308 struct ip_mreqn
*ip_mreq
;
1309 struct ip_mreq_source
*ip_mreq_source
;
1313 /* TCP options all take an 'int' value. */
1314 if (optlen
< sizeof(uint32_t))
1315 return -TARGET_EINVAL
;
1317 if (get_user_u32(val
, optval_addr
))
1318 return -TARGET_EFAULT
;
1319 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1326 case IP_ROUTER_ALERT
:
1330 case IP_MTU_DISCOVER
:
1336 case IP_MULTICAST_TTL
:
1337 case IP_MULTICAST_LOOP
:
1339 if (optlen
>= sizeof(uint32_t)) {
1340 if (get_user_u32(val
, optval_addr
))
1341 return -TARGET_EFAULT
;
1342 } else if (optlen
>= 1) {
1343 if (get_user_u8(val
, optval_addr
))
1344 return -TARGET_EFAULT
;
1346 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1348 case IP_ADD_MEMBERSHIP
:
1349 case IP_DROP_MEMBERSHIP
:
1350 if (optlen
< sizeof (struct target_ip_mreq
) ||
1351 optlen
> sizeof (struct target_ip_mreqn
))
1352 return -TARGET_EINVAL
;
1354 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1355 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1356 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1359 case IP_BLOCK_SOURCE
:
1360 case IP_UNBLOCK_SOURCE
:
1361 case IP_ADD_SOURCE_MEMBERSHIP
:
1362 case IP_DROP_SOURCE_MEMBERSHIP
:
1363 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1364 return -TARGET_EINVAL
;
1366 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1367 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1368 unlock_user (ip_mreq_source
, optval_addr
, 0);
1375 case TARGET_SOL_SOCKET
:
1377 /* Options with 'int' argument. */
1378 case TARGET_SO_DEBUG
:
1381 case TARGET_SO_REUSEADDR
:
1382 optname
= SO_REUSEADDR
;
1384 case TARGET_SO_TYPE
:
1387 case TARGET_SO_ERROR
:
1390 case TARGET_SO_DONTROUTE
:
1391 optname
= SO_DONTROUTE
;
1393 case TARGET_SO_BROADCAST
:
1394 optname
= SO_BROADCAST
;
1396 case TARGET_SO_SNDBUF
:
1397 optname
= SO_SNDBUF
;
1399 case TARGET_SO_RCVBUF
:
1400 optname
= SO_RCVBUF
;
1402 case TARGET_SO_KEEPALIVE
:
1403 optname
= SO_KEEPALIVE
;
1405 case TARGET_SO_OOBINLINE
:
1406 optname
= SO_OOBINLINE
;
1408 case TARGET_SO_NO_CHECK
:
1409 optname
= SO_NO_CHECK
;
1411 case TARGET_SO_PRIORITY
:
1412 optname
= SO_PRIORITY
;
1415 case TARGET_SO_BSDCOMPAT
:
1416 optname
= SO_BSDCOMPAT
;
1419 case TARGET_SO_PASSCRED
:
1420 optname
= SO_PASSCRED
;
1422 case TARGET_SO_TIMESTAMP
:
1423 optname
= SO_TIMESTAMP
;
1425 case TARGET_SO_RCVLOWAT
:
1426 optname
= SO_RCVLOWAT
;
1428 case TARGET_SO_RCVTIMEO
:
1429 optname
= SO_RCVTIMEO
;
1431 case TARGET_SO_SNDTIMEO
:
1432 optname
= SO_SNDTIMEO
;
1438 if (optlen
< sizeof(uint32_t))
1439 return -TARGET_EINVAL
;
1441 if (get_user_u32(val
, optval_addr
))
1442 return -TARGET_EFAULT
;
1443 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1447 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1448 ret
= -TARGET_ENOPROTOOPT
;
1453 /* do_getsockopt() Must return target values and target errnos. */
1454 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1455 abi_ulong optval_addr
, abi_ulong optlen
)
1462 case TARGET_SOL_SOCKET
:
1465 /* These don't just return a single integer */
1466 case TARGET_SO_LINGER
:
1467 case TARGET_SO_RCVTIMEO
:
1468 case TARGET_SO_SNDTIMEO
:
1469 case TARGET_SO_PEERCRED
:
1470 case TARGET_SO_PEERNAME
:
1472 /* Options with 'int' argument. */
1473 case TARGET_SO_DEBUG
:
1476 case TARGET_SO_REUSEADDR
:
1477 optname
= SO_REUSEADDR
;
1479 case TARGET_SO_TYPE
:
1482 case TARGET_SO_ERROR
:
1485 case TARGET_SO_DONTROUTE
:
1486 optname
= SO_DONTROUTE
;
1488 case TARGET_SO_BROADCAST
:
1489 optname
= SO_BROADCAST
;
1491 case TARGET_SO_SNDBUF
:
1492 optname
= SO_SNDBUF
;
1494 case TARGET_SO_RCVBUF
:
1495 optname
= SO_RCVBUF
;
1497 case TARGET_SO_KEEPALIVE
:
1498 optname
= SO_KEEPALIVE
;
1500 case TARGET_SO_OOBINLINE
:
1501 optname
= SO_OOBINLINE
;
1503 case TARGET_SO_NO_CHECK
:
1504 optname
= SO_NO_CHECK
;
1506 case TARGET_SO_PRIORITY
:
1507 optname
= SO_PRIORITY
;
1510 case TARGET_SO_BSDCOMPAT
:
1511 optname
= SO_BSDCOMPAT
;
1514 case TARGET_SO_PASSCRED
:
1515 optname
= SO_PASSCRED
;
1517 case TARGET_SO_TIMESTAMP
:
1518 optname
= SO_TIMESTAMP
;
1520 case TARGET_SO_RCVLOWAT
:
1521 optname
= SO_RCVLOWAT
;
1528 /* TCP options all take an 'int' value. */
1530 if (get_user_u32(len
, optlen
))
1531 return -TARGET_EFAULT
;
1533 return -TARGET_EINVAL
;
1535 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1541 if (put_user_u32(val
, optval_addr
))
1542 return -TARGET_EFAULT
;
1544 if (put_user_u8(val
, optval_addr
))
1545 return -TARGET_EFAULT
;
1547 if (put_user_u32(len
, optlen
))
1548 return -TARGET_EFAULT
;
1555 case IP_ROUTER_ALERT
:
1559 case IP_MTU_DISCOVER
:
1565 case IP_MULTICAST_TTL
:
1566 case IP_MULTICAST_LOOP
:
1567 if (get_user_u32(len
, optlen
))
1568 return -TARGET_EFAULT
;
1570 return -TARGET_EINVAL
;
1572 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1575 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1577 if (put_user_u32(len
, optlen
)
1578 || put_user_u8(val
, optval_addr
))
1579 return -TARGET_EFAULT
;
1581 if (len
> sizeof(int))
1583 if (put_user_u32(len
, optlen
)
1584 || put_user_u32(val
, optval_addr
))
1585 return -TARGET_EFAULT
;
1589 ret
= -TARGET_ENOPROTOOPT
;
1595 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1597 ret
= -TARGET_EOPNOTSUPP
;
1604 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1605 * other lock functions have a return code of 0 for failure.
1607 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1608 int count
, int copy
)
1610 struct target_iovec
*target_vec
;
1614 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1616 return -TARGET_EFAULT
;
1617 for(i
= 0;i
< count
; i
++) {
1618 base
= tswapl(target_vec
[i
].iov_base
);
1619 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1620 if (vec
[i
].iov_len
!= 0) {
1621 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1622 /* Don't check lock_user return value. We must call writev even
1623 if a element has invalid base address. */
1625 /* zero length pointer is ignored */
1626 vec
[i
].iov_base
= NULL
;
1629 unlock_user (target_vec
, target_addr
, 0);
1633 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1634 int count
, int copy
)
1636 struct target_iovec
*target_vec
;
1640 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1642 return -TARGET_EFAULT
;
1643 for(i
= 0;i
< count
; i
++) {
1644 if (target_vec
[i
].iov_base
) {
1645 base
= tswapl(target_vec
[i
].iov_base
);
1646 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1649 unlock_user (target_vec
, target_addr
, 0);
1654 /* do_socket() Must return target values and target errnos. */
1655 static abi_long
do_socket(int domain
, int type
, int protocol
)
1657 #if defined(TARGET_MIPS)
1659 case TARGET_SOCK_DGRAM
:
1662 case TARGET_SOCK_STREAM
:
1665 case TARGET_SOCK_RAW
:
1668 case TARGET_SOCK_RDM
:
1671 case TARGET_SOCK_SEQPACKET
:
1672 type
= SOCK_SEQPACKET
;
1674 case TARGET_SOCK_PACKET
:
1679 if (domain
== PF_NETLINK
)
1680 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1681 return get_errno(socket(domain
, type
, protocol
));
1684 /* do_bind() Must return target values and target errnos. */
1685 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1691 if ((int)addrlen
< 0) {
1692 return -TARGET_EINVAL
;
1695 addr
= alloca(addrlen
+1);
1697 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1701 return get_errno(bind(sockfd
, addr
, addrlen
));
1704 /* do_connect() Must return target values and target errnos. */
1705 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1711 if ((int)addrlen
< 0) {
1712 return -TARGET_EINVAL
;
1715 addr
= alloca(addrlen
);
1717 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1721 return get_errno(connect(sockfd
, addr
, addrlen
));
1724 /* do_sendrecvmsg() Must return target values and target errnos. */
1725 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1726 int flags
, int send
)
1729 struct target_msghdr
*msgp
;
1733 abi_ulong target_vec
;
1736 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1740 return -TARGET_EFAULT
;
1741 if (msgp
->msg_name
) {
1742 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1743 msg
.msg_name
= alloca(msg
.msg_namelen
);
1744 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1747 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1751 msg
.msg_name
= NULL
;
1752 msg
.msg_namelen
= 0;
1754 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1755 msg
.msg_control
= alloca(msg
.msg_controllen
);
1756 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1758 count
= tswapl(msgp
->msg_iovlen
);
1759 vec
= alloca(count
* sizeof(struct iovec
));
1760 target_vec
= tswapl(msgp
->msg_iov
);
1761 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1762 msg
.msg_iovlen
= count
;
1766 ret
= target_to_host_cmsg(&msg
, msgp
);
1768 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1770 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1771 if (!is_error(ret
)) {
1773 ret
= host_to_target_cmsg(msgp
, &msg
);
1778 unlock_iovec(vec
, target_vec
, count
, !send
);
1779 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1783 /* do_accept() Must return target values and target errnos. */
1784 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1785 abi_ulong target_addrlen_addr
)
1791 if (target_addr
== 0)
1792 return get_errno(accept(fd
, NULL
, NULL
));
1794 /* linux returns EINVAL if addrlen pointer is invalid */
1795 if (get_user_u32(addrlen
, target_addrlen_addr
))
1796 return -TARGET_EINVAL
;
1798 if ((int)addrlen
< 0) {
1799 return -TARGET_EINVAL
;
1802 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1803 return -TARGET_EINVAL
;
1805 addr
= alloca(addrlen
);
1807 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1808 if (!is_error(ret
)) {
1809 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1810 if (put_user_u32(addrlen
, target_addrlen_addr
))
1811 ret
= -TARGET_EFAULT
;
1816 /* do_getpeername() Must return target values and target errnos. */
1817 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1818 abi_ulong target_addrlen_addr
)
1824 if (get_user_u32(addrlen
, target_addrlen_addr
))
1825 return -TARGET_EFAULT
;
1827 if ((int)addrlen
< 0) {
1828 return -TARGET_EINVAL
;
1831 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1832 return -TARGET_EFAULT
;
1834 addr
= alloca(addrlen
);
1836 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1837 if (!is_error(ret
)) {
1838 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1839 if (put_user_u32(addrlen
, target_addrlen_addr
))
1840 ret
= -TARGET_EFAULT
;
1845 /* do_getsockname() Must return target values and target errnos. */
1846 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1847 abi_ulong target_addrlen_addr
)
1853 if (get_user_u32(addrlen
, target_addrlen_addr
))
1854 return -TARGET_EFAULT
;
1856 if ((int)addrlen
< 0) {
1857 return -TARGET_EINVAL
;
1860 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1861 return -TARGET_EFAULT
;
1863 addr
= alloca(addrlen
);
1865 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1866 if (!is_error(ret
)) {
1867 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1868 if (put_user_u32(addrlen
, target_addrlen_addr
))
1869 ret
= -TARGET_EFAULT
;
1874 /* do_socketpair() Must return target values and target errnos. */
1875 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1876 abi_ulong target_tab_addr
)
1881 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1882 if (!is_error(ret
)) {
1883 if (put_user_s32(tab
[0], target_tab_addr
)
1884 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1885 ret
= -TARGET_EFAULT
;
1890 /* do_sendto() Must return target values and target errnos. */
1891 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1892 abi_ulong target_addr
, socklen_t addrlen
)
1898 if ((int)addrlen
< 0) {
1899 return -TARGET_EINVAL
;
1902 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1904 return -TARGET_EFAULT
;
1906 addr
= alloca(addrlen
);
1907 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1909 unlock_user(host_msg
, msg
, 0);
1912 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1914 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1916 unlock_user(host_msg
, msg
, 0);
1920 /* do_recvfrom() Must return target values and target errnos. */
1921 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1922 abi_ulong target_addr
,
1923 abi_ulong target_addrlen
)
1930 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1932 return -TARGET_EFAULT
;
1934 if (get_user_u32(addrlen
, target_addrlen
)) {
1935 ret
= -TARGET_EFAULT
;
1938 if ((int)addrlen
< 0) {
1939 ret
= -TARGET_EINVAL
;
1942 addr
= alloca(addrlen
);
1943 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1945 addr
= NULL
; /* To keep compiler quiet. */
1946 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1948 if (!is_error(ret
)) {
1950 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1951 if (put_user_u32(addrlen
, target_addrlen
)) {
1952 ret
= -TARGET_EFAULT
;
1956 unlock_user(host_msg
, msg
, len
);
1959 unlock_user(host_msg
, msg
, 0);
1964 #ifdef TARGET_NR_socketcall
1965 /* do_socketcall() Must return target values and target errnos. */
1966 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1969 const int n
= sizeof(abi_ulong
);
1974 abi_ulong domain
, type
, protocol
;
1976 if (get_user_ual(domain
, vptr
)
1977 || get_user_ual(type
, vptr
+ n
)
1978 || get_user_ual(protocol
, vptr
+ 2 * n
))
1979 return -TARGET_EFAULT
;
1981 ret
= do_socket(domain
, type
, protocol
);
1987 abi_ulong target_addr
;
1990 if (get_user_ual(sockfd
, vptr
)
1991 || get_user_ual(target_addr
, vptr
+ n
)
1992 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1993 return -TARGET_EFAULT
;
1995 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1998 case SOCKOP_connect
:
2001 abi_ulong target_addr
;
2004 if (get_user_ual(sockfd
, vptr
)
2005 || get_user_ual(target_addr
, vptr
+ n
)
2006 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2007 return -TARGET_EFAULT
;
2009 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2014 abi_ulong sockfd
, backlog
;
2016 if (get_user_ual(sockfd
, vptr
)
2017 || get_user_ual(backlog
, vptr
+ n
))
2018 return -TARGET_EFAULT
;
2020 ret
= get_errno(listen(sockfd
, backlog
));
2026 abi_ulong target_addr
, target_addrlen
;
2028 if (get_user_ual(sockfd
, vptr
)
2029 || get_user_ual(target_addr
, vptr
+ n
)
2030 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2031 return -TARGET_EFAULT
;
2033 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2036 case SOCKOP_getsockname
:
2039 abi_ulong target_addr
, target_addrlen
;
2041 if (get_user_ual(sockfd
, vptr
)
2042 || get_user_ual(target_addr
, vptr
+ n
)
2043 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2044 return -TARGET_EFAULT
;
2046 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2049 case SOCKOP_getpeername
:
2052 abi_ulong target_addr
, target_addrlen
;
2054 if (get_user_ual(sockfd
, vptr
)
2055 || get_user_ual(target_addr
, vptr
+ n
)
2056 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2057 return -TARGET_EFAULT
;
2059 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2062 case SOCKOP_socketpair
:
2064 abi_ulong domain
, type
, protocol
;
2067 if (get_user_ual(domain
, vptr
)
2068 || get_user_ual(type
, vptr
+ n
)
2069 || get_user_ual(protocol
, vptr
+ 2 * n
)
2070 || get_user_ual(tab
, vptr
+ 3 * n
))
2071 return -TARGET_EFAULT
;
2073 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2083 if (get_user_ual(sockfd
, vptr
)
2084 || get_user_ual(msg
, vptr
+ n
)
2085 || get_user_ual(len
, vptr
+ 2 * n
)
2086 || get_user_ual(flags
, vptr
+ 3 * n
))
2087 return -TARGET_EFAULT
;
2089 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2099 if (get_user_ual(sockfd
, vptr
)
2100 || get_user_ual(msg
, vptr
+ n
)
2101 || get_user_ual(len
, vptr
+ 2 * n
)
2102 || get_user_ual(flags
, vptr
+ 3 * n
))
2103 return -TARGET_EFAULT
;
2105 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2117 if (get_user_ual(sockfd
, vptr
)
2118 || get_user_ual(msg
, vptr
+ n
)
2119 || get_user_ual(len
, vptr
+ 2 * n
)
2120 || get_user_ual(flags
, vptr
+ 3 * n
)
2121 || get_user_ual(addr
, vptr
+ 4 * n
)
2122 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2123 return -TARGET_EFAULT
;
2125 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2128 case SOCKOP_recvfrom
:
2137 if (get_user_ual(sockfd
, vptr
)
2138 || get_user_ual(msg
, vptr
+ n
)
2139 || get_user_ual(len
, vptr
+ 2 * n
)
2140 || get_user_ual(flags
, vptr
+ 3 * n
)
2141 || get_user_ual(addr
, vptr
+ 4 * n
)
2142 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2143 return -TARGET_EFAULT
;
2145 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2148 case SOCKOP_shutdown
:
2150 abi_ulong sockfd
, how
;
2152 if (get_user_ual(sockfd
, vptr
)
2153 || get_user_ual(how
, vptr
+ n
))
2154 return -TARGET_EFAULT
;
2156 ret
= get_errno(shutdown(sockfd
, how
));
2159 case SOCKOP_sendmsg
:
2160 case SOCKOP_recvmsg
:
2163 abi_ulong target_msg
;
2166 if (get_user_ual(fd
, vptr
)
2167 || get_user_ual(target_msg
, vptr
+ n
)
2168 || get_user_ual(flags
, vptr
+ 2 * n
))
2169 return -TARGET_EFAULT
;
2171 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2172 (num
== SOCKOP_sendmsg
));
2175 case SOCKOP_setsockopt
:
2183 if (get_user_ual(sockfd
, vptr
)
2184 || get_user_ual(level
, vptr
+ n
)
2185 || get_user_ual(optname
, vptr
+ 2 * n
)
2186 || get_user_ual(optval
, vptr
+ 3 * n
)
2187 || get_user_ual(optlen
, vptr
+ 4 * n
))
2188 return -TARGET_EFAULT
;
2190 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2193 case SOCKOP_getsockopt
:
2201 if (get_user_ual(sockfd
, vptr
)
2202 || get_user_ual(level
, vptr
+ n
)
2203 || get_user_ual(optname
, vptr
+ 2 * n
)
2204 || get_user_ual(optval
, vptr
+ 3 * n
)
2205 || get_user_ual(optlen
, vptr
+ 4 * n
))
2206 return -TARGET_EFAULT
;
2208 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2212 gemu_log("Unsupported socketcall: %d\n", num
);
2213 ret
= -TARGET_ENOSYS
;
2220 #define N_SHM_REGIONS 32
2222 static struct shm_region
{
2225 } shm_regions
[N_SHM_REGIONS
];
2227 struct target_ipc_perm
2234 unsigned short int mode
;
2235 unsigned short int __pad1
;
2236 unsigned short int __seq
;
2237 unsigned short int __pad2
;
2238 abi_ulong __unused1
;
2239 abi_ulong __unused2
;
2242 struct target_semid_ds
2244 struct target_ipc_perm sem_perm
;
2245 abi_ulong sem_otime
;
2246 abi_ulong __unused1
;
2247 abi_ulong sem_ctime
;
2248 abi_ulong __unused2
;
2249 abi_ulong sem_nsems
;
2250 abi_ulong __unused3
;
2251 abi_ulong __unused4
;
2254 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2255 abi_ulong target_addr
)
2257 struct target_ipc_perm
*target_ip
;
2258 struct target_semid_ds
*target_sd
;
2260 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2261 return -TARGET_EFAULT
;
2262 target_ip
= &(target_sd
->sem_perm
);
2263 host_ip
->__key
= tswapl(target_ip
->__key
);
2264 host_ip
->uid
= tswapl(target_ip
->uid
);
2265 host_ip
->gid
= tswapl(target_ip
->gid
);
2266 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2267 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2268 host_ip
->mode
= tswapl(target_ip
->mode
);
2269 unlock_user_struct(target_sd
, target_addr
, 0);
2273 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2274 struct ipc_perm
*host_ip
)
2276 struct target_ipc_perm
*target_ip
;
2277 struct target_semid_ds
*target_sd
;
2279 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2280 return -TARGET_EFAULT
;
2281 target_ip
= &(target_sd
->sem_perm
);
2282 target_ip
->__key
= tswapl(host_ip
->__key
);
2283 target_ip
->uid
= tswapl(host_ip
->uid
);
2284 target_ip
->gid
= tswapl(host_ip
->gid
);
2285 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2286 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2287 target_ip
->mode
= tswapl(host_ip
->mode
);
2288 unlock_user_struct(target_sd
, target_addr
, 1);
2292 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2293 abi_ulong target_addr
)
2295 struct target_semid_ds
*target_sd
;
2297 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2298 return -TARGET_EFAULT
;
2299 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2300 return -TARGET_EFAULT
;
2301 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2302 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2303 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2304 unlock_user_struct(target_sd
, target_addr
, 0);
2308 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2309 struct semid_ds
*host_sd
)
2311 struct target_semid_ds
*target_sd
;
2313 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2314 return -TARGET_EFAULT
;
2315 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2316 return -TARGET_EFAULT
;;
2317 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2318 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2319 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2320 unlock_user_struct(target_sd
, target_addr
, 1);
2324 struct target_seminfo
{
2337 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2338 struct seminfo
*host_seminfo
)
2340 struct target_seminfo
*target_seminfo
;
2341 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2342 return -TARGET_EFAULT
;
2343 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2344 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2345 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2346 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2347 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2348 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2349 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2350 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2351 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2352 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2353 unlock_user_struct(target_seminfo
, target_addr
, 1);
2359 struct semid_ds
*buf
;
2360 unsigned short *array
;
2361 struct seminfo
*__buf
;
2364 union target_semun
{
2371 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2372 abi_ulong target_addr
)
2375 unsigned short *array
;
2377 struct semid_ds semid_ds
;
2380 semun
.buf
= &semid_ds
;
2382 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2384 return get_errno(ret
);
2386 nsems
= semid_ds
.sem_nsems
;
2388 *host_array
= malloc(nsems
*sizeof(unsigned short));
2389 array
= lock_user(VERIFY_READ
, target_addr
,
2390 nsems
*sizeof(unsigned short), 1);
2392 return -TARGET_EFAULT
;
2394 for(i
=0; i
<nsems
; i
++) {
2395 __get_user((*host_array
)[i
], &array
[i
]);
2397 unlock_user(array
, target_addr
, 0);
2402 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2403 unsigned short **host_array
)
2406 unsigned short *array
;
2408 struct semid_ds semid_ds
;
2411 semun
.buf
= &semid_ds
;
2413 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2415 return get_errno(ret
);
2417 nsems
= semid_ds
.sem_nsems
;
2419 array
= lock_user(VERIFY_WRITE
, target_addr
,
2420 nsems
*sizeof(unsigned short), 0);
2422 return -TARGET_EFAULT
;
2424 for(i
=0; i
<nsems
; i
++) {
2425 __put_user((*host_array
)[i
], &array
[i
]);
2428 unlock_user(array
, target_addr
, 1);
2433 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2434 union target_semun target_su
)
2437 struct semid_ds dsarg
;
2438 unsigned short *array
= NULL
;
2439 struct seminfo seminfo
;
2440 abi_long ret
= -TARGET_EINVAL
;
2447 arg
.val
= tswapl(target_su
.val
);
2448 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2449 target_su
.val
= tswapl(arg
.val
);
2453 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2457 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2458 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2465 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2469 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2470 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2476 arg
.__buf
= &seminfo
;
2477 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2478 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2486 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2493 struct target_sembuf
{
2494 unsigned short sem_num
;
2499 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2500 abi_ulong target_addr
,
2503 struct target_sembuf
*target_sembuf
;
2506 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2507 nsops
*sizeof(struct target_sembuf
), 1);
2509 return -TARGET_EFAULT
;
2511 for(i
=0; i
<nsops
; i
++) {
2512 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2513 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2514 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2517 unlock_user(target_sembuf
, target_addr
, 0);
2522 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2524 struct sembuf sops
[nsops
];
2526 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2527 return -TARGET_EFAULT
;
2529 return semop(semid
, sops
, nsops
);
2532 struct target_msqid_ds
2534 struct target_ipc_perm msg_perm
;
2535 abi_ulong msg_stime
;
2536 #if TARGET_ABI_BITS == 32
2537 abi_ulong __unused1
;
2539 abi_ulong msg_rtime
;
2540 #if TARGET_ABI_BITS == 32
2541 abi_ulong __unused2
;
2543 abi_ulong msg_ctime
;
2544 #if TARGET_ABI_BITS == 32
2545 abi_ulong __unused3
;
2547 abi_ulong __msg_cbytes
;
2549 abi_ulong msg_qbytes
;
2550 abi_ulong msg_lspid
;
2551 abi_ulong msg_lrpid
;
2552 abi_ulong __unused4
;
2553 abi_ulong __unused5
;
2556 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2557 abi_ulong target_addr
)
2559 struct target_msqid_ds
*target_md
;
2561 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2562 return -TARGET_EFAULT
;
2563 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2564 return -TARGET_EFAULT
;
2565 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2566 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2567 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2568 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2569 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2570 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2571 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2572 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2573 unlock_user_struct(target_md
, target_addr
, 0);
2577 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2578 struct msqid_ds
*host_md
)
2580 struct target_msqid_ds
*target_md
;
2582 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2583 return -TARGET_EFAULT
;
2584 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2585 return -TARGET_EFAULT
;
2586 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2587 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2588 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2589 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2590 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2591 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2592 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2593 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2594 unlock_user_struct(target_md
, target_addr
, 1);
2598 struct target_msginfo
{
2606 unsigned short int msgseg
;
2609 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2610 struct msginfo
*host_msginfo
)
2612 struct target_msginfo
*target_msginfo
;
2613 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2614 return -TARGET_EFAULT
;
2615 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2616 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2617 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2618 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2619 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2620 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2621 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2622 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2623 unlock_user_struct(target_msginfo
, target_addr
, 1);
2627 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2629 struct msqid_ds dsarg
;
2630 struct msginfo msginfo
;
2631 abi_long ret
= -TARGET_EINVAL
;
2639 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2640 return -TARGET_EFAULT
;
2641 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2642 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2643 return -TARGET_EFAULT
;
2646 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2650 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2651 if (host_to_target_msginfo(ptr
, &msginfo
))
2652 return -TARGET_EFAULT
;
2659 struct target_msgbuf
{
2664 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2665 unsigned int msgsz
, int msgflg
)
2667 struct target_msgbuf
*target_mb
;
2668 struct msgbuf
*host_mb
;
2671 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2672 return -TARGET_EFAULT
;
2673 host_mb
= malloc(msgsz
+sizeof(long));
2674 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2675 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2676 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2678 unlock_user_struct(target_mb
, msgp
, 0);
2683 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2684 unsigned int msgsz
, abi_long msgtyp
,
2687 struct target_msgbuf
*target_mb
;
2689 struct msgbuf
*host_mb
;
2692 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2693 return -TARGET_EFAULT
;
2695 host_mb
= malloc(msgsz
+sizeof(long));
2696 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2699 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2700 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2701 if (!target_mtext
) {
2702 ret
= -TARGET_EFAULT
;
2705 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2706 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2709 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2714 unlock_user_struct(target_mb
, msgp
, 1);
2718 struct target_shmid_ds
2720 struct target_ipc_perm shm_perm
;
2721 abi_ulong shm_segsz
;
2722 abi_ulong shm_atime
;
2723 #if TARGET_ABI_BITS == 32
2724 abi_ulong __unused1
;
2726 abi_ulong shm_dtime
;
2727 #if TARGET_ABI_BITS == 32
2728 abi_ulong __unused2
;
2730 abi_ulong shm_ctime
;
2731 #if TARGET_ABI_BITS == 32
2732 abi_ulong __unused3
;
2736 abi_ulong shm_nattch
;
2737 unsigned long int __unused4
;
2738 unsigned long int __unused5
;
2741 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2742 abi_ulong target_addr
)
2744 struct target_shmid_ds
*target_sd
;
2746 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2747 return -TARGET_EFAULT
;
2748 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2749 return -TARGET_EFAULT
;
2750 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2751 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2752 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2753 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2754 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2755 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2756 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2757 unlock_user_struct(target_sd
, target_addr
, 0);
2761 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2762 struct shmid_ds
*host_sd
)
2764 struct target_shmid_ds
*target_sd
;
2766 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2767 return -TARGET_EFAULT
;
2768 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2769 return -TARGET_EFAULT
;
2770 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2771 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2772 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2773 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2774 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2775 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2776 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2777 unlock_user_struct(target_sd
, target_addr
, 1);
2781 struct target_shminfo
{
2789 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2790 struct shminfo
*host_shminfo
)
2792 struct target_shminfo
*target_shminfo
;
2793 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2794 return -TARGET_EFAULT
;
2795 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2796 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2797 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2798 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2799 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2800 unlock_user_struct(target_shminfo
, target_addr
, 1);
2804 struct target_shm_info
{
2809 abi_ulong swap_attempts
;
2810 abi_ulong swap_successes
;
2813 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2814 struct shm_info
*host_shm_info
)
2816 struct target_shm_info
*target_shm_info
;
2817 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2818 return -TARGET_EFAULT
;
2819 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2820 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2821 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2822 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2823 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2824 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2825 unlock_user_struct(target_shm_info
, target_addr
, 1);
2829 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2831 struct shmid_ds dsarg
;
2832 struct shminfo shminfo
;
2833 struct shm_info shm_info
;
2834 abi_long ret
= -TARGET_EINVAL
;
2842 if (target_to_host_shmid_ds(&dsarg
, buf
))
2843 return -TARGET_EFAULT
;
2844 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2845 if (host_to_target_shmid_ds(buf
, &dsarg
))
2846 return -TARGET_EFAULT
;
2849 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2850 if (host_to_target_shminfo(buf
, &shminfo
))
2851 return -TARGET_EFAULT
;
2854 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2855 if (host_to_target_shm_info(buf
, &shm_info
))
2856 return -TARGET_EFAULT
;
2861 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2868 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2872 struct shmid_ds shm_info
;
2875 /* find out the length of the shared memory segment */
2876 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2877 if (is_error(ret
)) {
2878 /* can't get length, bail out */
2885 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2887 abi_ulong mmap_start
;
2889 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2891 if (mmap_start
== -1) {
2893 host_raddr
= (void *)-1;
2895 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2898 if (host_raddr
== (void *)-1) {
2900 return get_errno((long)host_raddr
);
2902 raddr
=h2g((unsigned long)host_raddr
);
2904 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2905 PAGE_VALID
| PAGE_READ
|
2906 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2908 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2909 if (shm_regions
[i
].start
== 0) {
2910 shm_regions
[i
].start
= raddr
;
2911 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2921 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2925 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2926 if (shm_regions
[i
].start
== shmaddr
) {
2927 shm_regions
[i
].start
= 0;
2928 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
2933 return get_errno(shmdt(g2h(shmaddr
)));
2936 #ifdef TARGET_NR_ipc
2937 /* ??? This only works with linear mappings. */
2938 /* do_ipc() must return target values and target errnos. */
2939 static abi_long
do_ipc(unsigned int call
, int first
,
2940 int second
, int third
,
2941 abi_long ptr
, abi_long fifth
)
2946 version
= call
>> 16;
2951 ret
= do_semop(first
, ptr
, second
);
2955 ret
= get_errno(semget(first
, second
, third
));
2959 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2963 ret
= get_errno(msgget(first
, second
));
2967 ret
= do_msgsnd(first
, ptr
, second
, third
);
2971 ret
= do_msgctl(first
, second
, ptr
);
2978 struct target_ipc_kludge
{
2983 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2984 ret
= -TARGET_EFAULT
;
2988 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2990 unlock_user_struct(tmp
, ptr
, 0);
2994 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3003 raddr
= do_shmat(first
, ptr
, second
);
3004 if (is_error(raddr
))
3005 return get_errno(raddr
);
3006 if (put_user_ual(raddr
, third
))
3007 return -TARGET_EFAULT
;
3011 ret
= -TARGET_EINVAL
;
3016 ret
= do_shmdt(ptr
);
3020 /* IPC_* flag values are the same on all linux platforms */
3021 ret
= get_errno(shmget(first
, second
, third
));
3024 /* IPC_* and SHM_* command values are the same on all linux platforms */
3026 ret
= do_shmctl(first
, second
, third
);
3029 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3030 ret
= -TARGET_ENOSYS
;
3037 /* kernel structure types definitions */
3039 #define STRUCT(name, ...) STRUCT_ ## name,
3040 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3042 #include "syscall_types.h"
3045 #undef STRUCT_SPECIAL
3047 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3048 #define STRUCT_SPECIAL(name)
3049 #include "syscall_types.h"
3051 #undef STRUCT_SPECIAL
3053 typedef struct IOCTLEntry IOCTLEntry
;
3055 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3056 int fd
, abi_long cmd
, abi_long arg
);
3059 unsigned int target_cmd
;
3060 unsigned int host_cmd
;
3063 do_ioctl_fn
*do_ioctl
;
3064 const argtype arg_type
[5];
3067 #define IOC_R 0x0001
3068 #define IOC_W 0x0002
3069 #define IOC_RW (IOC_R | IOC_W)
3071 #define MAX_STRUCT_SIZE 4096
3073 #ifdef CONFIG_FIEMAP
3074 /* So fiemap access checks don't overflow on 32 bit systems.
3075 * This is very slightly smaller than the limit imposed by
3076 * the underlying kernel.
3078 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3079 / sizeof(struct fiemap_extent))
3081 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3082 int fd
, abi_long cmd
, abi_long arg
)
3084 /* The parameter for this ioctl is a struct fiemap followed
3085 * by an array of struct fiemap_extent whose size is set
3086 * in fiemap->fm_extent_count. The array is filled in by the
3089 int target_size_in
, target_size_out
;
3091 const argtype
*arg_type
= ie
->arg_type
;
3092 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3095 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3099 assert(arg_type
[0] == TYPE_PTR
);
3100 assert(ie
->access
== IOC_RW
);
3102 target_size_in
= thunk_type_size(arg_type
, 0);
3103 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3105 return -TARGET_EFAULT
;
3107 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3108 unlock_user(argptr
, arg
, 0);
3109 fm
= (struct fiemap
*)buf_temp
;
3110 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3111 return -TARGET_EINVAL
;
3114 outbufsz
= sizeof (*fm
) +
3115 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3117 if (outbufsz
> MAX_STRUCT_SIZE
) {
3118 /* We can't fit all the extents into the fixed size buffer.
3119 * Allocate one that is large enough and use it instead.
3121 fm
= malloc(outbufsz
);
3123 return -TARGET_ENOMEM
;
3125 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3128 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3129 if (!is_error(ret
)) {
3130 target_size_out
= target_size_in
;
3131 /* An extent_count of 0 means we were only counting the extents
3132 * so there are no structs to copy
3134 if (fm
->fm_extent_count
!= 0) {
3135 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3137 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3139 ret
= -TARGET_EFAULT
;
3141 /* Convert the struct fiemap */
3142 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3143 if (fm
->fm_extent_count
!= 0) {
3144 p
= argptr
+ target_size_in
;
3145 /* ...and then all the struct fiemap_extents */
3146 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3147 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3152 unlock_user(argptr
, arg
, target_size_out
);
3162 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3163 int fd
, abi_long cmd
, abi_long arg
)
3165 const argtype
*arg_type
= ie
->arg_type
;
3169 struct ifconf
*host_ifconf
;
3171 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3172 int target_ifreq_size
;
3177 abi_long target_ifc_buf
;
3181 assert(arg_type
[0] == TYPE_PTR
);
3182 assert(ie
->access
== IOC_RW
);
3185 target_size
= thunk_type_size(arg_type
, 0);
3187 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3189 return -TARGET_EFAULT
;
3190 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3191 unlock_user(argptr
, arg
, 0);
3193 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3194 target_ifc_len
= host_ifconf
->ifc_len
;
3195 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3197 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3198 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3199 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3201 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3202 if (outbufsz
> MAX_STRUCT_SIZE
) {
3203 /* We can't fit all the extents into the fixed size buffer.
3204 * Allocate one that is large enough and use it instead.
3206 host_ifconf
= malloc(outbufsz
);
3208 return -TARGET_ENOMEM
;
3210 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3213 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3215 host_ifconf
->ifc_len
= host_ifc_len
;
3216 host_ifconf
->ifc_buf
= host_ifc_buf
;
3218 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3219 if (!is_error(ret
)) {
3220 /* convert host ifc_len to target ifc_len */
3222 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3223 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3224 host_ifconf
->ifc_len
= target_ifc_len
;
3226 /* restore target ifc_buf */
3228 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3230 /* copy struct ifconf to target user */
3232 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3234 return -TARGET_EFAULT
;
3235 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3236 unlock_user(argptr
, arg
, target_size
);
3238 /* copy ifreq[] to target user */
3240 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3241 for (i
= 0; i
< nb_ifreq
; i
++) {
3242 thunk_convert(argptr
+ i
* target_ifreq_size
,
3243 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3244 ifreq_arg_type
, THUNK_TARGET
);
3246 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3256 static IOCTLEntry ioctl_entries
[] = {
3257 #define IOCTL(cmd, access, ...) \
3258 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3259 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3260 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3265 /* ??? Implement proper locking for ioctls. */
3266 /* do_ioctl() Must return target values and target errnos. */
3267 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3269 const IOCTLEntry
*ie
;
3270 const argtype
*arg_type
;
3272 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3278 if (ie
->target_cmd
== 0) {
3279 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3280 return -TARGET_ENOSYS
;
3282 if (ie
->target_cmd
== cmd
)
3286 arg_type
= ie
->arg_type
;
3288 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3291 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3294 switch(arg_type
[0]) {
3297 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3302 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3306 target_size
= thunk_type_size(arg_type
, 0);
3307 switch(ie
->access
) {
3309 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3310 if (!is_error(ret
)) {
3311 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3313 return -TARGET_EFAULT
;
3314 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3315 unlock_user(argptr
, arg
, target_size
);
3319 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3321 return -TARGET_EFAULT
;
3322 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3323 unlock_user(argptr
, arg
, 0);
3324 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3328 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3330 return -TARGET_EFAULT
;
3331 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3332 unlock_user(argptr
, arg
, 0);
3333 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3334 if (!is_error(ret
)) {
3335 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3337 return -TARGET_EFAULT
;
3338 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3339 unlock_user(argptr
, arg
, target_size
);
3345 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3346 (long)cmd
, arg_type
[0]);
3347 ret
= -TARGET_ENOSYS
;
3353 static const bitmask_transtbl iflag_tbl
[] = {
3354 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3355 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3356 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3357 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3358 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3359 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3360 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3361 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3362 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3363 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3364 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3365 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3366 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3367 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3371 static const bitmask_transtbl oflag_tbl
[] = {
3372 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3373 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3374 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3375 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3376 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3377 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3378 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3379 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3380 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3381 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3382 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3383 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3384 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3385 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3386 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3387 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3388 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3389 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3390 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3391 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3392 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3393 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3394 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3395 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3399 static const bitmask_transtbl cflag_tbl
[] = {
3400 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3401 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3402 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3403 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3404 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3405 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3406 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3407 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3408 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3409 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3410 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3411 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3412 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3413 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3414 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3415 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3416 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3417 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3418 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3419 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3420 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3421 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3422 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3423 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3424 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3425 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3426 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3427 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3428 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3429 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3430 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3434 static const bitmask_transtbl lflag_tbl
[] = {
3435 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3436 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3437 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3438 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3439 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3440 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3441 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3442 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3443 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3444 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3445 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3446 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3447 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3448 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3449 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3453 static void target_to_host_termios (void *dst
, const void *src
)
3455 struct host_termios
*host
= dst
;
3456 const struct target_termios
*target
= src
;
3459 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3461 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3463 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3465 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3466 host
->c_line
= target
->c_line
;
3468 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3469 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3470 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3471 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3472 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3473 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3474 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3475 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3476 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3477 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3478 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3479 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3480 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3481 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3482 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3483 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3484 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3485 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3488 static void host_to_target_termios (void *dst
, const void *src
)
3490 struct target_termios
*target
= dst
;
3491 const struct host_termios
*host
= src
;
3494 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3496 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3498 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3500 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3501 target
->c_line
= host
->c_line
;
3503 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3504 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3505 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3506 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3507 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3508 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3509 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3510 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3511 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3512 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3513 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3514 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3515 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3516 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3517 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3518 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3519 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3520 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3523 static const StructEntry struct_termios_def
= {
3524 .convert
= { host_to_target_termios
, target_to_host_termios
},
3525 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3526 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3529 static bitmask_transtbl mmap_flags_tbl
[] = {
3530 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3531 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3532 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3533 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3534 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3535 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3536 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3537 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3541 #if defined(TARGET_I386)
3543 /* NOTE: there is really one LDT for all the threads */
3544 static uint8_t *ldt_table
;
3546 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3553 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3554 if (size
> bytecount
)
3556 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3558 return -TARGET_EFAULT
;
3559 /* ??? Should this by byteswapped? */
3560 memcpy(p
, ldt_table
, size
);
3561 unlock_user(p
, ptr
, size
);
3565 /* XXX: add locking support */
3566 static abi_long
write_ldt(CPUX86State
*env
,
3567 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3569 struct target_modify_ldt_ldt_s ldt_info
;
3570 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3571 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3572 int seg_not_present
, useable
, lm
;
3573 uint32_t *lp
, entry_1
, entry_2
;
3575 if (bytecount
!= sizeof(ldt_info
))
3576 return -TARGET_EINVAL
;
3577 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3578 return -TARGET_EFAULT
;
3579 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3580 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3581 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3582 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3583 unlock_user_struct(target_ldt_info
, ptr
, 0);
3585 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3586 return -TARGET_EINVAL
;
3587 seg_32bit
= ldt_info
.flags
& 1;
3588 contents
= (ldt_info
.flags
>> 1) & 3;
3589 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3590 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3591 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3592 useable
= (ldt_info
.flags
>> 6) & 1;
3596 lm
= (ldt_info
.flags
>> 7) & 1;
3598 if (contents
== 3) {
3600 return -TARGET_EINVAL
;
3601 if (seg_not_present
== 0)
3602 return -TARGET_EINVAL
;
3604 /* allocate the LDT */
3606 env
->ldt
.base
= target_mmap(0,
3607 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3608 PROT_READ
|PROT_WRITE
,
3609 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3610 if (env
->ldt
.base
== -1)
3611 return -TARGET_ENOMEM
;
3612 memset(g2h(env
->ldt
.base
), 0,
3613 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3614 env
->ldt
.limit
= 0xffff;
3615 ldt_table
= g2h(env
->ldt
.base
);
3618 /* NOTE: same code as Linux kernel */
3619 /* Allow LDTs to be cleared by the user. */
3620 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3623 read_exec_only
== 1 &&
3625 limit_in_pages
== 0 &&
3626 seg_not_present
== 1 &&
3634 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3635 (ldt_info
.limit
& 0x0ffff);
3636 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3637 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3638 (ldt_info
.limit
& 0xf0000) |
3639 ((read_exec_only
^ 1) << 9) |
3641 ((seg_not_present
^ 1) << 15) |
3643 (limit_in_pages
<< 23) |
3647 entry_2
|= (useable
<< 20);
3649 /* Install the new entry ... */
3651 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3652 lp
[0] = tswap32(entry_1
);
3653 lp
[1] = tswap32(entry_2
);
3657 /* specific and weird i386 syscalls */
3658 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3659 unsigned long bytecount
)
3665 ret
= read_ldt(ptr
, bytecount
);
3668 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3671 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3674 ret
= -TARGET_ENOSYS
;
3680 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3681 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3683 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3684 struct target_modify_ldt_ldt_s ldt_info
;
3685 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3686 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3687 int seg_not_present
, useable
, lm
;
3688 uint32_t *lp
, entry_1
, entry_2
;
3691 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3692 if (!target_ldt_info
)
3693 return -TARGET_EFAULT
;
3694 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3695 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3696 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3697 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3698 if (ldt_info
.entry_number
== -1) {
3699 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3700 if (gdt_table
[i
] == 0) {
3701 ldt_info
.entry_number
= i
;
3702 target_ldt_info
->entry_number
= tswap32(i
);
3707 unlock_user_struct(target_ldt_info
, ptr
, 1);
3709 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3710 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3711 return -TARGET_EINVAL
;
3712 seg_32bit
= ldt_info
.flags
& 1;
3713 contents
= (ldt_info
.flags
>> 1) & 3;
3714 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3715 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3716 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3717 useable
= (ldt_info
.flags
>> 6) & 1;
3721 lm
= (ldt_info
.flags
>> 7) & 1;
3724 if (contents
== 3) {
3725 if (seg_not_present
== 0)
3726 return -TARGET_EINVAL
;
3729 /* NOTE: same code as Linux kernel */
3730 /* Allow LDTs to be cleared by the user. */
3731 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3732 if ((contents
== 0 &&
3733 read_exec_only
== 1 &&
3735 limit_in_pages
== 0 &&
3736 seg_not_present
== 1 &&
3744 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3745 (ldt_info
.limit
& 0x0ffff);
3746 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3747 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3748 (ldt_info
.limit
& 0xf0000) |
3749 ((read_exec_only
^ 1) << 9) |
3751 ((seg_not_present
^ 1) << 15) |
3753 (limit_in_pages
<< 23) |
3758 /* Install the new entry ... */
3760 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3761 lp
[0] = tswap32(entry_1
);
3762 lp
[1] = tswap32(entry_2
);
3766 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3768 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3769 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3770 uint32_t base_addr
, limit
, flags
;
3771 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3772 int seg_not_present
, useable
, lm
;
3773 uint32_t *lp
, entry_1
, entry_2
;
3775 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3776 if (!target_ldt_info
)
3777 return -TARGET_EFAULT
;
3778 idx
= tswap32(target_ldt_info
->entry_number
);
3779 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3780 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3781 unlock_user_struct(target_ldt_info
, ptr
, 1);
3782 return -TARGET_EINVAL
;
3784 lp
= (uint32_t *)(gdt_table
+ idx
);
3785 entry_1
= tswap32(lp
[0]);
3786 entry_2
= tswap32(lp
[1]);
3788 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3789 contents
= (entry_2
>> 10) & 3;
3790 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3791 seg_32bit
= (entry_2
>> 22) & 1;
3792 limit_in_pages
= (entry_2
>> 23) & 1;
3793 useable
= (entry_2
>> 20) & 1;
3797 lm
= (entry_2
>> 21) & 1;
3799 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3800 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3801 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3802 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3803 base_addr
= (entry_1
>> 16) |
3804 (entry_2
& 0xff000000) |
3805 ((entry_2
& 0xff) << 16);
3806 target_ldt_info
->base_addr
= tswapl(base_addr
);
3807 target_ldt_info
->limit
= tswap32(limit
);
3808 target_ldt_info
->flags
= tswap32(flags
);
3809 unlock_user_struct(target_ldt_info
, ptr
, 1);
3812 #endif /* TARGET_I386 && TARGET_ABI32 */
3814 #ifndef TARGET_ABI32
3815 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3822 case TARGET_ARCH_SET_GS
:
3823 case TARGET_ARCH_SET_FS
:
3824 if (code
== TARGET_ARCH_SET_GS
)
3828 cpu_x86_load_seg(env
, idx
, 0);
3829 env
->segs
[idx
].base
= addr
;
3831 case TARGET_ARCH_GET_GS
:
3832 case TARGET_ARCH_GET_FS
:
3833 if (code
== TARGET_ARCH_GET_GS
)
3837 val
= env
->segs
[idx
].base
;
3838 if (put_user(val
, addr
, abi_ulong
))
3839 ret
= -TARGET_EFAULT
;
3842 ret
= -TARGET_EINVAL
;
3849 #endif /* defined(TARGET_I386) */
3851 #define NEW_STACK_SIZE 0x40000
3853 #if defined(CONFIG_USE_NPTL)
3855 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3858 pthread_mutex_t mutex
;
3859 pthread_cond_t cond
;
3862 abi_ulong child_tidptr
;
3863 abi_ulong parent_tidptr
;
3867 static void *clone_func(void *arg
)
3869 new_thread_info
*info
= arg
;
3875 ts
= (TaskState
*)thread_env
->opaque
;
3876 info
->tid
= gettid();
3877 env
->host_tid
= info
->tid
;
3879 if (info
->child_tidptr
)
3880 put_user_u32(info
->tid
, info
->child_tidptr
);
3881 if (info
->parent_tidptr
)
3882 put_user_u32(info
->tid
, info
->parent_tidptr
);
3883 /* Enable signals. */
3884 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3885 /* Signal to the parent that we're ready. */
3886 pthread_mutex_lock(&info
->mutex
);
3887 pthread_cond_broadcast(&info
->cond
);
3888 pthread_mutex_unlock(&info
->mutex
);
3889 /* Wait until the parent has finshed initializing the tls state. */
3890 pthread_mutex_lock(&clone_lock
);
3891 pthread_mutex_unlock(&clone_lock
);
3898 static int clone_func(void *arg
)
3900 CPUState
*env
= arg
;
3907 /* do_fork() Must return host values and target errnos (unlike most
3908 do_*() functions). */
3909 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3910 abi_ulong parent_tidptr
, target_ulong newtls
,
3911 abi_ulong child_tidptr
)
3916 #if defined(CONFIG_USE_NPTL)
3917 unsigned int nptl_flags
;
3923 /* Emulate vfork() with fork() */
3924 if (flags
& CLONE_VFORK
)
3925 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3927 if (flags
& CLONE_VM
) {
3928 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3929 #if defined(CONFIG_USE_NPTL)
3930 new_thread_info info
;
3931 pthread_attr_t attr
;
3933 ts
= qemu_mallocz(sizeof(TaskState
));
3934 init_task_state(ts
);
3935 /* we create a new CPU instance. */
3936 new_env
= cpu_copy(env
);
3937 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3940 /* Init regs that differ from the parent. */
3941 cpu_clone_regs(new_env
, newsp
);
3942 new_env
->opaque
= ts
;
3943 ts
->bprm
= parent_ts
->bprm
;
3944 ts
->info
= parent_ts
->info
;
3945 #if defined(CONFIG_USE_NPTL)
3947 flags
&= ~CLONE_NPTL_FLAGS2
;
3949 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3950 ts
->child_tidptr
= child_tidptr
;
3953 if (nptl_flags
& CLONE_SETTLS
)
3954 cpu_set_tls (new_env
, newtls
);
3956 /* Grab a mutex so that thread setup appears atomic. */
3957 pthread_mutex_lock(&clone_lock
);
3959 memset(&info
, 0, sizeof(info
));
3960 pthread_mutex_init(&info
.mutex
, NULL
);
3961 pthread_mutex_lock(&info
.mutex
);
3962 pthread_cond_init(&info
.cond
, NULL
);
3964 if (nptl_flags
& CLONE_CHILD_SETTID
)
3965 info
.child_tidptr
= child_tidptr
;
3966 if (nptl_flags
& CLONE_PARENT_SETTID
)
3967 info
.parent_tidptr
= parent_tidptr
;
3969 ret
= pthread_attr_init(&attr
);
3970 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
3971 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
3972 /* It is not safe to deliver signals until the child has finished
3973 initializing, so temporarily block all signals. */
3974 sigfillset(&sigmask
);
3975 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3977 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3978 /* TODO: Free new CPU state if thread creation failed. */
3980 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3981 pthread_attr_destroy(&attr
);
3983 /* Wait for the child to initialize. */
3984 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3986 if (flags
& CLONE_PARENT_SETTID
)
3987 put_user_u32(ret
, parent_tidptr
);
3991 pthread_mutex_unlock(&info
.mutex
);
3992 pthread_cond_destroy(&info
.cond
);
3993 pthread_mutex_destroy(&info
.mutex
);
3994 pthread_mutex_unlock(&clone_lock
);
3996 if (flags
& CLONE_NPTL_FLAGS2
)
3998 /* This is probably going to die very quickly, but do it anyway. */
3999 new_stack
= qemu_mallocz (NEW_STACK_SIZE
);
4001 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4003 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4007 /* if no CLONE_VM, we consider it is a fork */
4008 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4013 /* Child Process. */
4014 cpu_clone_regs(env
, newsp
);
4016 #if defined(CONFIG_USE_NPTL)
4017 /* There is a race condition here. The parent process could
4018 theoretically read the TID in the child process before the child
4019 tid is set. This would require using either ptrace
4020 (not implemented) or having *_tidptr to point at a shared memory
4021 mapping. We can't repeat the spinlock hack used above because
4022 the child process gets its own copy of the lock. */
4023 if (flags
& CLONE_CHILD_SETTID
)
4024 put_user_u32(gettid(), child_tidptr
);
4025 if (flags
& CLONE_PARENT_SETTID
)
4026 put_user_u32(gettid(), parent_tidptr
);
4027 ts
= (TaskState
*)env
->opaque
;
4028 if (flags
& CLONE_SETTLS
)
4029 cpu_set_tls (env
, newtls
);
4030 if (flags
& CLONE_CHILD_CLEARTID
)
4031 ts
->child_tidptr
= child_tidptr
;
4040 /* warning : doesn't handle linux specific flags... */
4041 static int target_to_host_fcntl_cmd(int cmd
)
4044 case TARGET_F_DUPFD
:
4045 case TARGET_F_GETFD
:
4046 case TARGET_F_SETFD
:
4047 case TARGET_F_GETFL
:
4048 case TARGET_F_SETFL
:
4050 case TARGET_F_GETLK
:
4052 case TARGET_F_SETLK
:
4054 case TARGET_F_SETLKW
:
4056 case TARGET_F_GETOWN
:
4058 case TARGET_F_SETOWN
:
4060 case TARGET_F_GETSIG
:
4062 case TARGET_F_SETSIG
:
4064 #if TARGET_ABI_BITS == 32
4065 case TARGET_F_GETLK64
:
4067 case TARGET_F_SETLK64
:
4069 case TARGET_F_SETLKW64
:
4072 case TARGET_F_SETLEASE
:
4074 case TARGET_F_GETLEASE
:
4076 #ifdef F_DUPFD_CLOEXEC
4077 case TARGET_F_DUPFD_CLOEXEC
:
4078 return F_DUPFD_CLOEXEC
;
4080 case TARGET_F_NOTIFY
:
4083 return -TARGET_EINVAL
;
4085 return -TARGET_EINVAL
;
4088 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4091 struct target_flock
*target_fl
;
4092 struct flock64 fl64
;
4093 struct target_flock64
*target_fl64
;
4095 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4097 if (host_cmd
== -TARGET_EINVAL
)
4101 case TARGET_F_GETLK
:
4102 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4103 return -TARGET_EFAULT
;
4104 fl
.l_type
= tswap16(target_fl
->l_type
);
4105 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4106 fl
.l_start
= tswapl(target_fl
->l_start
);
4107 fl
.l_len
= tswapl(target_fl
->l_len
);
4108 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4109 unlock_user_struct(target_fl
, arg
, 0);
4110 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4112 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4113 return -TARGET_EFAULT
;
4114 target_fl
->l_type
= tswap16(fl
.l_type
);
4115 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4116 target_fl
->l_start
= tswapl(fl
.l_start
);
4117 target_fl
->l_len
= tswapl(fl
.l_len
);
4118 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4119 unlock_user_struct(target_fl
, arg
, 1);
4123 case TARGET_F_SETLK
:
4124 case TARGET_F_SETLKW
:
4125 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4126 return -TARGET_EFAULT
;
4127 fl
.l_type
= tswap16(target_fl
->l_type
);
4128 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4129 fl
.l_start
= tswapl(target_fl
->l_start
);
4130 fl
.l_len
= tswapl(target_fl
->l_len
);
4131 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4132 unlock_user_struct(target_fl
, arg
, 0);
4133 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4136 case TARGET_F_GETLK64
:
4137 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4138 return -TARGET_EFAULT
;
4139 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4140 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4141 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4142 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4143 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4144 unlock_user_struct(target_fl64
, arg
, 0);
4145 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4147 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4148 return -TARGET_EFAULT
;
4149 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4150 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4151 target_fl64
->l_start
= tswapl(fl64
.l_start
);
4152 target_fl64
->l_len
= tswapl(fl64
.l_len
);
4153 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4154 unlock_user_struct(target_fl64
, arg
, 1);
4157 case TARGET_F_SETLK64
:
4158 case TARGET_F_SETLKW64
:
4159 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4160 return -TARGET_EFAULT
;
4161 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4162 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4163 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4164 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4165 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4166 unlock_user_struct(target_fl64
, arg
, 0);
4167 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4170 case TARGET_F_GETFL
:
4171 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4173 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4177 case TARGET_F_SETFL
:
4178 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4181 case TARGET_F_SETOWN
:
4182 case TARGET_F_GETOWN
:
4183 case TARGET_F_SETSIG
:
4184 case TARGET_F_GETSIG
:
4185 case TARGET_F_SETLEASE
:
4186 case TARGET_F_GETLEASE
:
4187 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4191 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4199 static inline int high2lowuid(int uid
)
4207 static inline int high2lowgid(int gid
)
4215 static inline int low2highuid(int uid
)
4217 if ((int16_t)uid
== -1)
4223 static inline int low2highgid(int gid
)
4225 if ((int16_t)gid
== -1)
4230 static inline int tswapid(int id
)
4234 #else /* !USE_UID16 */
4235 static inline int high2lowuid(int uid
)
4239 static inline int high2lowgid(int gid
)
4243 static inline int low2highuid(int uid
)
4247 static inline int low2highgid(int gid
)
4251 static inline int tswapid(int id
)
4255 #endif /* USE_UID16 */
4257 void syscall_init(void)
4260 const argtype
*arg_type
;
4264 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4265 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4266 #include "syscall_types.h"
4268 #undef STRUCT_SPECIAL
4270 /* we patch the ioctl size if necessary. We rely on the fact that
4271 no ioctl has all the bits at '1' in the size field */
4273 while (ie
->target_cmd
!= 0) {
4274 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4275 TARGET_IOC_SIZEMASK
) {
4276 arg_type
= ie
->arg_type
;
4277 if (arg_type
[0] != TYPE_PTR
) {
4278 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4283 size
= thunk_type_size(arg_type
, 0);
4284 ie
->target_cmd
= (ie
->target_cmd
&
4285 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4286 (size
<< TARGET_IOC_SIZESHIFT
);
4289 /* Build target_to_host_errno_table[] table from
4290 * host_to_target_errno_table[]. */
4291 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4292 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4294 /* automatic consistency check if same arch */
4295 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4296 (defined(__x86_64__) && defined(TARGET_X86_64))
4297 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4298 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4299 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4306 #if TARGET_ABI_BITS == 32
4307 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4309 #ifdef TARGET_WORDS_BIGENDIAN
4310 return ((uint64_t)word0
<< 32) | word1
;
4312 return ((uint64_t)word1
<< 32) | word0
;
4315 #else /* TARGET_ABI_BITS == 32 */
4316 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4320 #endif /* TARGET_ABI_BITS != 32 */
4322 #ifdef TARGET_NR_truncate64
4323 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4329 if (((CPUARMState
*)cpu_env
)->eabi
)
4335 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4339 #ifdef TARGET_NR_ftruncate64
4340 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4346 if (((CPUARMState
*)cpu_env
)->eabi
)
4352 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4356 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4357 abi_ulong target_addr
)
4359 struct target_timespec
*target_ts
;
4361 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4362 return -TARGET_EFAULT
;
4363 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
4364 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
4365 unlock_user_struct(target_ts
, target_addr
, 0);
4369 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4370 struct timespec
*host_ts
)
4372 struct target_timespec
*target_ts
;
4374 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4375 return -TARGET_EFAULT
;
4376 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
4377 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
4378 unlock_user_struct(target_ts
, target_addr
, 1);
4382 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4383 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4384 abi_ulong target_addr
,
4385 struct stat
*host_st
)
4388 if (((CPUARMState
*)cpu_env
)->eabi
) {
4389 struct target_eabi_stat64
*target_st
;
4391 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4392 return -TARGET_EFAULT
;
4393 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4394 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4395 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4396 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4397 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4399 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4400 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4401 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4402 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4403 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4404 __put_user(host_st
->st_size
, &target_st
->st_size
);
4405 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4406 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4407 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4408 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4409 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4410 unlock_user_struct(target_st
, target_addr
, 1);
4414 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4415 struct target_stat
*target_st
;
4417 struct target_stat64
*target_st
;
4420 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4421 return -TARGET_EFAULT
;
4422 memset(target_st
, 0, sizeof(*target_st
));
4423 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4424 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4425 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4426 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4428 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4429 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4430 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4431 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4432 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4433 /* XXX: better use of kernel struct */
4434 __put_user(host_st
->st_size
, &target_st
->st_size
);
4435 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4436 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4437 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4438 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4439 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4440 unlock_user_struct(target_st
, target_addr
, 1);
4447 #if defined(CONFIG_USE_NPTL)
4448 /* ??? Using host futex calls even when target atomic operations
4449 are not really atomic probably breaks things. However implementing
4450 futexes locally would make futexes shared between multiple processes
4451 tricky. However they're probably useless because guest atomic
4452 operations won't work either. */
4453 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4454 target_ulong uaddr2
, int val3
)
4456 struct timespec ts
, *pts
;
4459 /* ??? We assume FUTEX_* constants are the same on both host
4461 #ifdef FUTEX_CMD_MASK
4462 base_op
= op
& FUTEX_CMD_MASK
;
4470 target_to_host_timespec(pts
, timeout
);
4474 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4477 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4479 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4481 case FUTEX_CMP_REQUEUE
:
4483 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4484 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4485 But the prototype takes a `struct timespec *'; insert casts
4486 to satisfy the compiler. We do not need to tswap TIMEOUT
4487 since it's not compared to guest memory. */
4488 pts
= (struct timespec
*)(uintptr_t) timeout
;
4489 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4491 (base_op
== FUTEX_CMP_REQUEUE
4495 return -TARGET_ENOSYS
;
4500 /* Map host to target signal numbers for the wait family of syscalls.
4501 Assume all other status bits are the same. */
4502 static int host_to_target_waitstatus(int status
)
4504 if (WIFSIGNALED(status
)) {
4505 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4507 if (WIFSTOPPED(status
)) {
4508 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4514 int get_osversion(void)
4516 static int osversion
;
4517 struct new_utsname buf
;
4522 if (qemu_uname_release
&& *qemu_uname_release
) {
4523 s
= qemu_uname_release
;
4525 if (sys_uname(&buf
))
4530 for (i
= 0; i
< 3; i
++) {
4532 while (*s
>= '0' && *s
<= '9') {
4537 tmp
= (tmp
<< 8) + n
;
4545 /* do_syscall() should always have a single exit point at the end so
4546 that actions, such as logging of syscall results, can be performed.
4547 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4548 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4549 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4550 abi_long arg5
, abi_long arg6
, abi_long arg7
,
4559 gemu_log("syscall %d", num
);
4562 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4565 case TARGET_NR_exit
:
4566 #ifdef CONFIG_USE_NPTL
4567 /* In old applications this may be used to implement _exit(2).
4568 However in threaded applictions it is used for thread termination,
4569 and _exit_group is used for application termination.
4570 Do thread termination if we have more then one thread. */
4571 /* FIXME: This probably breaks if a signal arrives. We should probably
4572 be disabling signals. */
4573 if (first_cpu
->next_cpu
) {
4581 while (p
&& p
!= (CPUState
*)cpu_env
) {
4582 lastp
= &p
->next_cpu
;
4585 /* If we didn't find the CPU for this thread then something is
4589 /* Remove the CPU from the list. */
4590 *lastp
= p
->next_cpu
;
4592 ts
= ((CPUState
*)cpu_env
)->opaque
;
4593 if (ts
->child_tidptr
) {
4594 put_user_u32(0, ts
->child_tidptr
);
4595 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4607 gdb_exit(cpu_env
, arg1
);
4609 ret
= 0; /* avoid warning */
4611 case TARGET_NR_read
:
4615 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4617 ret
= get_errno(read(arg1
, p
, arg3
));
4618 unlock_user(p
, arg2
, ret
);
4621 case TARGET_NR_write
:
4622 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4624 ret
= get_errno(write(arg1
, p
, arg3
));
4625 unlock_user(p
, arg2
, 0);
4627 case TARGET_NR_open
:
4628 if (!(p
= lock_user_string(arg1
)))
4630 ret
= get_errno(open(path(p
),
4631 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4633 unlock_user(p
, arg1
, 0);
4635 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4636 case TARGET_NR_openat
:
4637 if (!(p
= lock_user_string(arg2
)))
4639 ret
= get_errno(sys_openat(arg1
,
4641 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4643 unlock_user(p
, arg2
, 0);
4646 case TARGET_NR_close
:
4647 ret
= get_errno(close(arg1
));
4652 case TARGET_NR_fork
:
4653 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4655 #ifdef TARGET_NR_waitpid
4656 case TARGET_NR_waitpid
:
4659 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4660 if (!is_error(ret
) && arg2
4661 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4666 #ifdef TARGET_NR_waitid
4667 case TARGET_NR_waitid
:
4671 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4672 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4673 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4675 host_to_target_siginfo(p
, &info
);
4676 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4681 #ifdef TARGET_NR_creat /* not on alpha */
4682 case TARGET_NR_creat
:
4683 if (!(p
= lock_user_string(arg1
)))
4685 ret
= get_errno(creat(p
, arg2
));
4686 unlock_user(p
, arg1
, 0);
4689 case TARGET_NR_link
:
4692 p
= lock_user_string(arg1
);
4693 p2
= lock_user_string(arg2
);
4695 ret
= -TARGET_EFAULT
;
4697 ret
= get_errno(link(p
, p2
));
4698 unlock_user(p2
, arg2
, 0);
4699 unlock_user(p
, arg1
, 0);
4702 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4703 case TARGET_NR_linkat
:
4708 p
= lock_user_string(arg2
);
4709 p2
= lock_user_string(arg4
);
4711 ret
= -TARGET_EFAULT
;
4713 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4714 unlock_user(p
, arg2
, 0);
4715 unlock_user(p2
, arg4
, 0);
4719 case TARGET_NR_unlink
:
4720 if (!(p
= lock_user_string(arg1
)))
4722 ret
= get_errno(unlink(p
));
4723 unlock_user(p
, arg1
, 0);
4725 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4726 case TARGET_NR_unlinkat
:
4727 if (!(p
= lock_user_string(arg2
)))
4729 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4730 unlock_user(p
, arg2
, 0);
4733 case TARGET_NR_execve
:
4735 char **argp
, **envp
;
4738 abi_ulong guest_argp
;
4739 abi_ulong guest_envp
;
4745 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4746 if (get_user_ual(addr
, gp
))
4754 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4755 if (get_user_ual(addr
, gp
))
4762 argp
= alloca((argc
+ 1) * sizeof(void *));
4763 envp
= alloca((envc
+ 1) * sizeof(void *));
4765 for (gp
= guest_argp
, q
= argp
; gp
;
4766 gp
+= sizeof(abi_ulong
), q
++) {
4767 if (get_user_ual(addr
, gp
))
4771 if (!(*q
= lock_user_string(addr
)))
4776 for (gp
= guest_envp
, q
= envp
; gp
;
4777 gp
+= sizeof(abi_ulong
), q
++) {
4778 if (get_user_ual(addr
, gp
))
4782 if (!(*q
= lock_user_string(addr
)))
4787 if (!(p
= lock_user_string(arg1
)))
4789 ret
= get_errno(execve(p
, argp
, envp
));
4790 unlock_user(p
, arg1
, 0);
4795 ret
= -TARGET_EFAULT
;
4798 for (gp
= guest_argp
, q
= argp
; *q
;
4799 gp
+= sizeof(abi_ulong
), q
++) {
4800 if (get_user_ual(addr
, gp
)
4803 unlock_user(*q
, addr
, 0);
4805 for (gp
= guest_envp
, q
= envp
; *q
;
4806 gp
+= sizeof(abi_ulong
), q
++) {
4807 if (get_user_ual(addr
, gp
)
4810 unlock_user(*q
, addr
, 0);
4814 case TARGET_NR_chdir
:
4815 if (!(p
= lock_user_string(arg1
)))
4817 ret
= get_errno(chdir(p
));
4818 unlock_user(p
, arg1
, 0);
4820 #ifdef TARGET_NR_time
4821 case TARGET_NR_time
:
4824 ret
= get_errno(time(&host_time
));
4827 && put_user_sal(host_time
, arg1
))
4832 case TARGET_NR_mknod
:
4833 if (!(p
= lock_user_string(arg1
)))
4835 ret
= get_errno(mknod(p
, arg2
, arg3
));
4836 unlock_user(p
, arg1
, 0);
4838 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4839 case TARGET_NR_mknodat
:
4840 if (!(p
= lock_user_string(arg2
)))
4842 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4843 unlock_user(p
, arg2
, 0);
4846 case TARGET_NR_chmod
:
4847 if (!(p
= lock_user_string(arg1
)))
4849 ret
= get_errno(chmod(p
, arg2
));
4850 unlock_user(p
, arg1
, 0);
4852 #ifdef TARGET_NR_break
4853 case TARGET_NR_break
:
4856 #ifdef TARGET_NR_oldstat
4857 case TARGET_NR_oldstat
:
4860 case TARGET_NR_lseek
:
4861 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4863 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4864 /* Alpha specific */
4865 case TARGET_NR_getxpid
:
4866 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4867 ret
= get_errno(getpid());
4870 #ifdef TARGET_NR_getpid
4871 case TARGET_NR_getpid
:
4872 ret
= get_errno(getpid());
4875 case TARGET_NR_mount
:
4877 /* need to look at the data field */
4879 p
= lock_user_string(arg1
);
4880 p2
= lock_user_string(arg2
);
4881 p3
= lock_user_string(arg3
);
4882 if (!p
|| !p2
|| !p3
)
4883 ret
= -TARGET_EFAULT
;
4885 /* FIXME - arg5 should be locked, but it isn't clear how to
4886 * do that since it's not guaranteed to be a NULL-terminated
4890 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4892 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4894 unlock_user(p
, arg1
, 0);
4895 unlock_user(p2
, arg2
, 0);
4896 unlock_user(p3
, arg3
, 0);
4899 #ifdef TARGET_NR_umount
4900 case TARGET_NR_umount
:
4901 if (!(p
= lock_user_string(arg1
)))
4903 ret
= get_errno(umount(p
));
4904 unlock_user(p
, arg1
, 0);
4907 #ifdef TARGET_NR_stime /* not on alpha */
4908 case TARGET_NR_stime
:
4911 if (get_user_sal(host_time
, arg1
))
4913 ret
= get_errno(stime(&host_time
));
4917 case TARGET_NR_ptrace
:
4919 #ifdef TARGET_NR_alarm /* not on alpha */
4920 case TARGET_NR_alarm
:
4924 #ifdef TARGET_NR_oldfstat
4925 case TARGET_NR_oldfstat
:
4928 #ifdef TARGET_NR_pause /* not on alpha */
4929 case TARGET_NR_pause
:
4930 ret
= get_errno(pause());
4933 #ifdef TARGET_NR_utime
4934 case TARGET_NR_utime
:
4936 struct utimbuf tbuf
, *host_tbuf
;
4937 struct target_utimbuf
*target_tbuf
;
4939 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4941 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4942 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4943 unlock_user_struct(target_tbuf
, arg2
, 0);
4948 if (!(p
= lock_user_string(arg1
)))
4950 ret
= get_errno(utime(p
, host_tbuf
));
4951 unlock_user(p
, arg1
, 0);
4955 case TARGET_NR_utimes
:
4957 struct timeval
*tvp
, tv
[2];
4959 if (copy_from_user_timeval(&tv
[0], arg2
)
4960 || copy_from_user_timeval(&tv
[1],
4961 arg2
+ sizeof(struct target_timeval
)))
4967 if (!(p
= lock_user_string(arg1
)))
4969 ret
= get_errno(utimes(p
, tvp
));
4970 unlock_user(p
, arg1
, 0);
4973 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4974 case TARGET_NR_futimesat
:
4976 struct timeval
*tvp
, tv
[2];
4978 if (copy_from_user_timeval(&tv
[0], arg3
)
4979 || copy_from_user_timeval(&tv
[1],
4980 arg3
+ sizeof(struct target_timeval
)))
4986 if (!(p
= lock_user_string(arg2
)))
4988 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4989 unlock_user(p
, arg2
, 0);
4993 #ifdef TARGET_NR_stty
4994 case TARGET_NR_stty
:
4997 #ifdef TARGET_NR_gtty
4998 case TARGET_NR_gtty
:
5001 case TARGET_NR_access
:
5002 if (!(p
= lock_user_string(arg1
)))
5004 ret
= get_errno(access(path(p
), arg2
));
5005 unlock_user(p
, arg1
, 0);
5007 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5008 case TARGET_NR_faccessat
:
5009 if (!(p
= lock_user_string(arg2
)))
5011 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5012 unlock_user(p
, arg2
, 0);
5015 #ifdef TARGET_NR_nice /* not on alpha */
5016 case TARGET_NR_nice
:
5017 ret
= get_errno(nice(arg1
));
5020 #ifdef TARGET_NR_ftime
5021 case TARGET_NR_ftime
:
5024 case TARGET_NR_sync
:
5028 case TARGET_NR_kill
:
5029 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5031 case TARGET_NR_rename
:
5034 p
= lock_user_string(arg1
);
5035 p2
= lock_user_string(arg2
);
5037 ret
= -TARGET_EFAULT
;
5039 ret
= get_errno(rename(p
, p2
));
5040 unlock_user(p2
, arg2
, 0);
5041 unlock_user(p
, arg1
, 0);
5044 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5045 case TARGET_NR_renameat
:
5048 p
= lock_user_string(arg2
);
5049 p2
= lock_user_string(arg4
);
5051 ret
= -TARGET_EFAULT
;
5053 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5054 unlock_user(p2
, arg4
, 0);
5055 unlock_user(p
, arg2
, 0);
5059 case TARGET_NR_mkdir
:
5060 if (!(p
= lock_user_string(arg1
)))
5062 ret
= get_errno(mkdir(p
, arg2
));
5063 unlock_user(p
, arg1
, 0);
5065 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5066 case TARGET_NR_mkdirat
:
5067 if (!(p
= lock_user_string(arg2
)))
5069 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5070 unlock_user(p
, arg2
, 0);
5073 case TARGET_NR_rmdir
:
5074 if (!(p
= lock_user_string(arg1
)))
5076 ret
= get_errno(rmdir(p
));
5077 unlock_user(p
, arg1
, 0);
5080 ret
= get_errno(dup(arg1
));
5082 case TARGET_NR_pipe
:
5083 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5085 #ifdef TARGET_NR_pipe2
5086 case TARGET_NR_pipe2
:
5087 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5090 case TARGET_NR_times
:
5092 struct target_tms
*tmsp
;
5094 ret
= get_errno(times(&tms
));
5096 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5099 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
5100 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
5101 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
5102 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
5105 ret
= host_to_target_clock_t(ret
);
5108 #ifdef TARGET_NR_prof
5109 case TARGET_NR_prof
:
5112 #ifdef TARGET_NR_signal
5113 case TARGET_NR_signal
:
5116 case TARGET_NR_acct
:
5118 ret
= get_errno(acct(NULL
));
5120 if (!(p
= lock_user_string(arg1
)))
5122 ret
= get_errno(acct(path(p
)));
5123 unlock_user(p
, arg1
, 0);
5126 #ifdef TARGET_NR_umount2 /* not on alpha */
5127 case TARGET_NR_umount2
:
5128 if (!(p
= lock_user_string(arg1
)))
5130 ret
= get_errno(umount2(p
, arg2
));
5131 unlock_user(p
, arg1
, 0);
5134 #ifdef TARGET_NR_lock
5135 case TARGET_NR_lock
:
5138 case TARGET_NR_ioctl
:
5139 ret
= do_ioctl(arg1
, arg2
, arg3
);
5141 case TARGET_NR_fcntl
:
5142 ret
= do_fcntl(arg1
, arg2
, arg3
);
5144 #ifdef TARGET_NR_mpx
5148 case TARGET_NR_setpgid
:
5149 ret
= get_errno(setpgid(arg1
, arg2
));
5151 #ifdef TARGET_NR_ulimit
5152 case TARGET_NR_ulimit
:
5155 #ifdef TARGET_NR_oldolduname
5156 case TARGET_NR_oldolduname
:
5159 case TARGET_NR_umask
:
5160 ret
= get_errno(umask(arg1
));
5162 case TARGET_NR_chroot
:
5163 if (!(p
= lock_user_string(arg1
)))
5165 ret
= get_errno(chroot(p
));
5166 unlock_user(p
, arg1
, 0);
5168 case TARGET_NR_ustat
:
5170 case TARGET_NR_dup2
:
5171 ret
= get_errno(dup2(arg1
, arg2
));
5173 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5174 case TARGET_NR_dup3
:
5175 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5178 #ifdef TARGET_NR_getppid /* not on alpha */
5179 case TARGET_NR_getppid
:
5180 ret
= get_errno(getppid());
5183 case TARGET_NR_getpgrp
:
5184 ret
= get_errno(getpgrp());
5186 case TARGET_NR_setsid
:
5187 ret
= get_errno(setsid());
5189 #ifdef TARGET_NR_sigaction
5190 case TARGET_NR_sigaction
:
5192 #if defined(TARGET_ALPHA)
5193 struct target_sigaction act
, oact
, *pact
= 0;
5194 struct target_old_sigaction
*old_act
;
5196 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5198 act
._sa_handler
= old_act
->_sa_handler
;
5199 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5200 act
.sa_flags
= old_act
->sa_flags
;
5201 act
.sa_restorer
= 0;
5202 unlock_user_struct(old_act
, arg2
, 0);
5205 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5206 if (!is_error(ret
) && arg3
) {
5207 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5209 old_act
->_sa_handler
= oact
._sa_handler
;
5210 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5211 old_act
->sa_flags
= oact
.sa_flags
;
5212 unlock_user_struct(old_act
, arg3
, 1);
5214 #elif defined(TARGET_MIPS)
5215 struct target_sigaction act
, oact
, *pact
, *old_act
;
5218 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5220 act
._sa_handler
= old_act
->_sa_handler
;
5221 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5222 act
.sa_flags
= old_act
->sa_flags
;
5223 unlock_user_struct(old_act
, arg2
, 0);
5229 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5231 if (!is_error(ret
) && arg3
) {
5232 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5234 old_act
->_sa_handler
= oact
._sa_handler
;
5235 old_act
->sa_flags
= oact
.sa_flags
;
5236 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5237 old_act
->sa_mask
.sig
[1] = 0;
5238 old_act
->sa_mask
.sig
[2] = 0;
5239 old_act
->sa_mask
.sig
[3] = 0;
5240 unlock_user_struct(old_act
, arg3
, 1);
5243 struct target_old_sigaction
*old_act
;
5244 struct target_sigaction act
, oact
, *pact
;
5246 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5248 act
._sa_handler
= old_act
->_sa_handler
;
5249 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5250 act
.sa_flags
= old_act
->sa_flags
;
5251 act
.sa_restorer
= old_act
->sa_restorer
;
5252 unlock_user_struct(old_act
, arg2
, 0);
5257 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5258 if (!is_error(ret
) && arg3
) {
5259 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5261 old_act
->_sa_handler
= oact
._sa_handler
;
5262 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5263 old_act
->sa_flags
= oact
.sa_flags
;
5264 old_act
->sa_restorer
= oact
.sa_restorer
;
5265 unlock_user_struct(old_act
, arg3
, 1);
5271 case TARGET_NR_rt_sigaction
:
5273 #if defined(TARGET_ALPHA)
5274 struct target_sigaction act
, oact
, *pact
= 0;
5275 struct target_rt_sigaction
*rt_act
;
5276 /* ??? arg4 == sizeof(sigset_t). */
5278 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5280 act
._sa_handler
= rt_act
->_sa_handler
;
5281 act
.sa_mask
= rt_act
->sa_mask
;
5282 act
.sa_flags
= rt_act
->sa_flags
;
5283 act
.sa_restorer
= arg5
;
5284 unlock_user_struct(rt_act
, arg2
, 0);
5287 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5288 if (!is_error(ret
) && arg3
) {
5289 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5291 rt_act
->_sa_handler
= oact
._sa_handler
;
5292 rt_act
->sa_mask
= oact
.sa_mask
;
5293 rt_act
->sa_flags
= oact
.sa_flags
;
5294 unlock_user_struct(rt_act
, arg3
, 1);
5297 struct target_sigaction
*act
;
5298 struct target_sigaction
*oact
;
5301 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5306 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5307 ret
= -TARGET_EFAULT
;
5308 goto rt_sigaction_fail
;
5312 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5315 unlock_user_struct(act
, arg2
, 0);
5317 unlock_user_struct(oact
, arg3
, 1);
5321 #ifdef TARGET_NR_sgetmask /* not on alpha */
5322 case TARGET_NR_sgetmask
:
5325 abi_ulong target_set
;
5326 sigprocmask(0, NULL
, &cur_set
);
5327 host_to_target_old_sigset(&target_set
, &cur_set
);
5332 #ifdef TARGET_NR_ssetmask /* not on alpha */
5333 case TARGET_NR_ssetmask
:
5335 sigset_t set
, oset
, cur_set
;
5336 abi_ulong target_set
= arg1
;
5337 sigprocmask(0, NULL
, &cur_set
);
5338 target_to_host_old_sigset(&set
, &target_set
);
5339 sigorset(&set
, &set
, &cur_set
);
5340 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5341 host_to_target_old_sigset(&target_set
, &oset
);
5346 #ifdef TARGET_NR_sigprocmask
5347 case TARGET_NR_sigprocmask
:
5349 #if defined(TARGET_ALPHA)
5350 sigset_t set
, oldset
;
5355 case TARGET_SIG_BLOCK
:
5358 case TARGET_SIG_UNBLOCK
:
5361 case TARGET_SIG_SETMASK
:
5365 ret
= -TARGET_EINVAL
;
5369 target_to_host_old_sigset(&set
, &mask
);
5371 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5373 if (!is_error(ret
)) {
5374 host_to_target_old_sigset(&mask
, &oldset
);
5376 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5379 sigset_t set
, oldset
, *set_ptr
;
5384 case TARGET_SIG_BLOCK
:
5387 case TARGET_SIG_UNBLOCK
:
5390 case TARGET_SIG_SETMASK
:
5394 ret
= -TARGET_EINVAL
;
5397 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5399 target_to_host_old_sigset(&set
, p
);
5400 unlock_user(p
, arg2
, 0);
5406 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5407 if (!is_error(ret
) && arg3
) {
5408 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5410 host_to_target_old_sigset(p
, &oldset
);
5411 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5417 case TARGET_NR_rt_sigprocmask
:
5420 sigset_t set
, oldset
, *set_ptr
;
5424 case TARGET_SIG_BLOCK
:
5427 case TARGET_SIG_UNBLOCK
:
5430 case TARGET_SIG_SETMASK
:
5434 ret
= -TARGET_EINVAL
;
5437 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5439 target_to_host_sigset(&set
, p
);
5440 unlock_user(p
, arg2
, 0);
5446 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5447 if (!is_error(ret
) && arg3
) {
5448 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5450 host_to_target_sigset(p
, &oldset
);
5451 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5455 #ifdef TARGET_NR_sigpending
5456 case TARGET_NR_sigpending
:
5459 ret
= get_errno(sigpending(&set
));
5460 if (!is_error(ret
)) {
5461 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5463 host_to_target_old_sigset(p
, &set
);
5464 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5469 case TARGET_NR_rt_sigpending
:
5472 ret
= get_errno(sigpending(&set
));
5473 if (!is_error(ret
)) {
5474 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5476 host_to_target_sigset(p
, &set
);
5477 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5481 #ifdef TARGET_NR_sigsuspend
5482 case TARGET_NR_sigsuspend
:
5485 #if defined(TARGET_ALPHA)
5486 abi_ulong mask
= arg1
;
5487 target_to_host_old_sigset(&set
, &mask
);
5489 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5491 target_to_host_old_sigset(&set
, p
);
5492 unlock_user(p
, arg1
, 0);
5494 ret
= get_errno(sigsuspend(&set
));
5498 case TARGET_NR_rt_sigsuspend
:
5501 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5503 target_to_host_sigset(&set
, p
);
5504 unlock_user(p
, arg1
, 0);
5505 ret
= get_errno(sigsuspend(&set
));
5508 case TARGET_NR_rt_sigtimedwait
:
5511 struct timespec uts
, *puts
;
5514 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5516 target_to_host_sigset(&set
, p
);
5517 unlock_user(p
, arg1
, 0);
5520 target_to_host_timespec(puts
, arg3
);
5524 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5525 if (!is_error(ret
) && arg2
) {
5526 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5528 host_to_target_siginfo(p
, &uinfo
);
5529 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5533 case TARGET_NR_rt_sigqueueinfo
:
5536 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5538 target_to_host_siginfo(&uinfo
, p
);
5539 unlock_user(p
, arg1
, 0);
5540 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5543 #ifdef TARGET_NR_sigreturn
5544 case TARGET_NR_sigreturn
:
5545 /* NOTE: ret is eax, so not transcoding must be done */
5546 ret
= do_sigreturn(cpu_env
);
5549 case TARGET_NR_rt_sigreturn
:
5550 /* NOTE: ret is eax, so not transcoding must be done */
5551 ret
= do_rt_sigreturn(cpu_env
);
5553 case TARGET_NR_sethostname
:
5554 if (!(p
= lock_user_string(arg1
)))
5556 ret
= get_errno(sethostname(p
, arg2
));
5557 unlock_user(p
, arg1
, 0);
5559 case TARGET_NR_setrlimit
:
5561 int resource
= arg1
;
5562 struct target_rlimit
*target_rlim
;
5564 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5566 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5567 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5568 unlock_user_struct(target_rlim
, arg2
, 0);
5569 ret
= get_errno(setrlimit(resource
, &rlim
));
5572 case TARGET_NR_getrlimit
:
5574 int resource
= arg1
;
5575 struct target_rlimit
*target_rlim
;
5578 ret
= get_errno(getrlimit(resource
, &rlim
));
5579 if (!is_error(ret
)) {
5580 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5582 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5583 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5584 unlock_user_struct(target_rlim
, arg2
, 1);
5588 case TARGET_NR_getrusage
:
5590 struct rusage rusage
;
5591 ret
= get_errno(getrusage(arg1
, &rusage
));
5592 if (!is_error(ret
)) {
5593 host_to_target_rusage(arg2
, &rusage
);
5597 case TARGET_NR_gettimeofday
:
5600 ret
= get_errno(gettimeofday(&tv
, NULL
));
5601 if (!is_error(ret
)) {
5602 if (copy_to_user_timeval(arg1
, &tv
))
5607 case TARGET_NR_settimeofday
:
5610 if (copy_from_user_timeval(&tv
, arg1
))
5612 ret
= get_errno(settimeofday(&tv
, NULL
));
5615 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5616 case TARGET_NR_select
:
5618 struct target_sel_arg_struct
*sel
;
5619 abi_ulong inp
, outp
, exp
, tvp
;
5622 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5624 nsel
= tswapl(sel
->n
);
5625 inp
= tswapl(sel
->inp
);
5626 outp
= tswapl(sel
->outp
);
5627 exp
= tswapl(sel
->exp
);
5628 tvp
= tswapl(sel
->tvp
);
5629 unlock_user_struct(sel
, arg1
, 0);
5630 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5634 #ifdef TARGET_NR_pselect6
5635 case TARGET_NR_pselect6
:
5637 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
5638 fd_set rfds
, wfds
, efds
;
5639 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
5640 struct timespec ts
, *ts_ptr
;
5643 * The 6th arg is actually two args smashed together,
5644 * so we cannot use the C library.
5652 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
5653 target_sigset_t
*target_sigset
;
5661 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
5665 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
5669 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
5675 * This takes a timespec, and not a timeval, so we cannot
5676 * use the do_select() helper ...
5679 if (target_to_host_timespec(&ts
, ts_addr
)) {
5687 /* Extract the two packed args for the sigset */
5690 sig
.size
= _NSIG
/ 8;
5692 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
5696 arg_sigset
= tswapl(arg7
[0]);
5697 arg_sigsize
= tswapl(arg7
[1]);
5698 unlock_user(arg7
, arg6
, 0);
5702 if (arg_sigsize
!= sizeof(*target_sigset
)) {
5703 /* Like the kernel, we enforce correct size sigsets */
5704 ret
= -TARGET_EINVAL
;
5707 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
5708 sizeof(*target_sigset
), 1);
5709 if (!target_sigset
) {
5712 target_to_host_sigset(&set
, target_sigset
);
5713 unlock_user(target_sigset
, arg_sigset
, 0);
5721 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
5724 if (!is_error(ret
)) {
5725 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
5727 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
5729 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
5732 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
5738 case TARGET_NR_symlink
:
5741 p
= lock_user_string(arg1
);
5742 p2
= lock_user_string(arg2
);
5744 ret
= -TARGET_EFAULT
;
5746 ret
= get_errno(symlink(p
, p2
));
5747 unlock_user(p2
, arg2
, 0);
5748 unlock_user(p
, arg1
, 0);
5751 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5752 case TARGET_NR_symlinkat
:
5755 p
= lock_user_string(arg1
);
5756 p2
= lock_user_string(arg3
);
5758 ret
= -TARGET_EFAULT
;
5760 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5761 unlock_user(p2
, arg3
, 0);
5762 unlock_user(p
, arg1
, 0);
5766 #ifdef TARGET_NR_oldlstat
5767 case TARGET_NR_oldlstat
:
5770 case TARGET_NR_readlink
:
5773 p
= lock_user_string(arg1
);
5774 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5776 ret
= -TARGET_EFAULT
;
5778 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5779 char real
[PATH_MAX
];
5780 temp
= realpath(exec_path
,real
);
5781 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5782 snprintf((char *)p2
, arg3
, "%s", real
);
5785 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5787 unlock_user(p2
, arg2
, ret
);
5788 unlock_user(p
, arg1
, 0);
5791 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5792 case TARGET_NR_readlinkat
:
5795 p
= lock_user_string(arg2
);
5796 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5798 ret
= -TARGET_EFAULT
;
5800 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5801 unlock_user(p2
, arg3
, ret
);
5802 unlock_user(p
, arg2
, 0);
5806 #ifdef TARGET_NR_uselib
5807 case TARGET_NR_uselib
:
5810 #ifdef TARGET_NR_swapon
5811 case TARGET_NR_swapon
:
5812 if (!(p
= lock_user_string(arg1
)))
5814 ret
= get_errno(swapon(p
, arg2
));
5815 unlock_user(p
, arg1
, 0);
5818 case TARGET_NR_reboot
:
5820 #ifdef TARGET_NR_readdir
5821 case TARGET_NR_readdir
:
5824 #ifdef TARGET_NR_mmap
5825 case TARGET_NR_mmap
:
5826 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5827 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5828 || defined(TARGET_S390X)
5831 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5832 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5840 unlock_user(v
, arg1
, 0);
5841 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5842 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5846 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5847 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5853 #ifdef TARGET_NR_mmap2
5854 case TARGET_NR_mmap2
:
5856 #define MMAP_SHIFT 12
5858 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5859 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5861 arg6
<< MMAP_SHIFT
));
5864 case TARGET_NR_munmap
:
5865 ret
= get_errno(target_munmap(arg1
, arg2
));
5867 case TARGET_NR_mprotect
:
5869 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
5870 /* Special hack to detect libc making the stack executable. */
5871 if ((arg3
& PROT_GROWSDOWN
)
5872 && arg1
>= ts
->info
->stack_limit
5873 && arg1
<= ts
->info
->start_stack
) {
5874 arg3
&= ~PROT_GROWSDOWN
;
5875 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
5876 arg1
= ts
->info
->stack_limit
;
5879 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5881 #ifdef TARGET_NR_mremap
5882 case TARGET_NR_mremap
:
5883 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5886 /* ??? msync/mlock/munlock are broken for softmmu. */
5887 #ifdef TARGET_NR_msync
5888 case TARGET_NR_msync
:
5889 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5892 #ifdef TARGET_NR_mlock
5893 case TARGET_NR_mlock
:
5894 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5897 #ifdef TARGET_NR_munlock
5898 case TARGET_NR_munlock
:
5899 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5902 #ifdef TARGET_NR_mlockall
5903 case TARGET_NR_mlockall
:
5904 ret
= get_errno(mlockall(arg1
));
5907 #ifdef TARGET_NR_munlockall
5908 case TARGET_NR_munlockall
:
5909 ret
= get_errno(munlockall());
5912 case TARGET_NR_truncate
:
5913 if (!(p
= lock_user_string(arg1
)))
5915 ret
= get_errno(truncate(p
, arg2
));
5916 unlock_user(p
, arg1
, 0);
5918 case TARGET_NR_ftruncate
:
5919 ret
= get_errno(ftruncate(arg1
, arg2
));
5921 case TARGET_NR_fchmod
:
5922 ret
= get_errno(fchmod(arg1
, arg2
));
5924 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5925 case TARGET_NR_fchmodat
:
5926 if (!(p
= lock_user_string(arg2
)))
5928 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5929 unlock_user(p
, arg2
, 0);
5932 case TARGET_NR_getpriority
:
5933 /* libc does special remapping of the return value of
5934 * sys_getpriority() so it's just easiest to call
5935 * sys_getpriority() directly rather than through libc. */
5936 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5938 case TARGET_NR_setpriority
:
5939 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5941 #ifdef TARGET_NR_profil
5942 case TARGET_NR_profil
:
5945 case TARGET_NR_statfs
:
5946 if (!(p
= lock_user_string(arg1
)))
5948 ret
= get_errno(statfs(path(p
), &stfs
));
5949 unlock_user(p
, arg1
, 0);
5951 if (!is_error(ret
)) {
5952 struct target_statfs
*target_stfs
;
5954 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5956 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5957 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5958 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5959 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5960 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5961 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5962 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5963 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5964 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5965 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5966 unlock_user_struct(target_stfs
, arg2
, 1);
5969 case TARGET_NR_fstatfs
:
5970 ret
= get_errno(fstatfs(arg1
, &stfs
));
5971 goto convert_statfs
;
5972 #ifdef TARGET_NR_statfs64
5973 case TARGET_NR_statfs64
:
5974 if (!(p
= lock_user_string(arg1
)))
5976 ret
= get_errno(statfs(path(p
), &stfs
));
5977 unlock_user(p
, arg1
, 0);
5979 if (!is_error(ret
)) {
5980 struct target_statfs64
*target_stfs
;
5982 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5984 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5985 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5986 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5987 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5988 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5989 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5990 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5991 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5992 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5993 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5994 unlock_user_struct(target_stfs
, arg3
, 1);
5997 case TARGET_NR_fstatfs64
:
5998 ret
= get_errno(fstatfs(arg1
, &stfs
));
5999 goto convert_statfs64
;
6001 #ifdef TARGET_NR_ioperm
6002 case TARGET_NR_ioperm
:
6005 #ifdef TARGET_NR_socketcall
6006 case TARGET_NR_socketcall
:
6007 ret
= do_socketcall(arg1
, arg2
);
6010 #ifdef TARGET_NR_accept
6011 case TARGET_NR_accept
:
6012 ret
= do_accept(arg1
, arg2
, arg3
);
6015 #ifdef TARGET_NR_bind
6016 case TARGET_NR_bind
:
6017 ret
= do_bind(arg1
, arg2
, arg3
);
6020 #ifdef TARGET_NR_connect
6021 case TARGET_NR_connect
:
6022 ret
= do_connect(arg1
, arg2
, arg3
);
6025 #ifdef TARGET_NR_getpeername
6026 case TARGET_NR_getpeername
:
6027 ret
= do_getpeername(arg1
, arg2
, arg3
);
6030 #ifdef TARGET_NR_getsockname
6031 case TARGET_NR_getsockname
:
6032 ret
= do_getsockname(arg1
, arg2
, arg3
);
6035 #ifdef TARGET_NR_getsockopt
6036 case TARGET_NR_getsockopt
:
6037 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6040 #ifdef TARGET_NR_listen
6041 case TARGET_NR_listen
:
6042 ret
= get_errno(listen(arg1
, arg2
));
6045 #ifdef TARGET_NR_recv
6046 case TARGET_NR_recv
:
6047 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6050 #ifdef TARGET_NR_recvfrom
6051 case TARGET_NR_recvfrom
:
6052 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6055 #ifdef TARGET_NR_recvmsg
6056 case TARGET_NR_recvmsg
:
6057 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6060 #ifdef TARGET_NR_send
6061 case TARGET_NR_send
:
6062 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6065 #ifdef TARGET_NR_sendmsg
6066 case TARGET_NR_sendmsg
:
6067 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6070 #ifdef TARGET_NR_sendto
6071 case TARGET_NR_sendto
:
6072 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6075 #ifdef TARGET_NR_shutdown
6076 case TARGET_NR_shutdown
:
6077 ret
= get_errno(shutdown(arg1
, arg2
));
6080 #ifdef TARGET_NR_socket
6081 case TARGET_NR_socket
:
6082 ret
= do_socket(arg1
, arg2
, arg3
);
6085 #ifdef TARGET_NR_socketpair
6086 case TARGET_NR_socketpair
:
6087 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6090 #ifdef TARGET_NR_setsockopt
6091 case TARGET_NR_setsockopt
:
6092 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6096 case TARGET_NR_syslog
:
6097 if (!(p
= lock_user_string(arg2
)))
6099 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6100 unlock_user(p
, arg2
, 0);
6103 case TARGET_NR_setitimer
:
6105 struct itimerval value
, ovalue
, *pvalue
;
6109 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6110 || copy_from_user_timeval(&pvalue
->it_value
,
6111 arg2
+ sizeof(struct target_timeval
)))
6116 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6117 if (!is_error(ret
) && arg3
) {
6118 if (copy_to_user_timeval(arg3
,
6119 &ovalue
.it_interval
)
6120 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6126 case TARGET_NR_getitimer
:
6128 struct itimerval value
;
6130 ret
= get_errno(getitimer(arg1
, &value
));
6131 if (!is_error(ret
) && arg2
) {
6132 if (copy_to_user_timeval(arg2
,
6134 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6140 case TARGET_NR_stat
:
6141 if (!(p
= lock_user_string(arg1
)))
6143 ret
= get_errno(stat(path(p
), &st
));
6144 unlock_user(p
, arg1
, 0);
6146 case TARGET_NR_lstat
:
6147 if (!(p
= lock_user_string(arg1
)))
6149 ret
= get_errno(lstat(path(p
), &st
));
6150 unlock_user(p
, arg1
, 0);
6152 case TARGET_NR_fstat
:
6154 ret
= get_errno(fstat(arg1
, &st
));
6156 if (!is_error(ret
)) {
6157 struct target_stat
*target_st
;
6159 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6161 memset(target_st
, 0, sizeof(*target_st
));
6162 __put_user(st
.st_dev
, &target_st
->st_dev
);
6163 __put_user(st
.st_ino
, &target_st
->st_ino
);
6164 __put_user(st
.st_mode
, &target_st
->st_mode
);
6165 __put_user(st
.st_uid
, &target_st
->st_uid
);
6166 __put_user(st
.st_gid
, &target_st
->st_gid
);
6167 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6168 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6169 __put_user(st
.st_size
, &target_st
->st_size
);
6170 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6171 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6172 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6173 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6174 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6175 unlock_user_struct(target_st
, arg2
, 1);
6179 #ifdef TARGET_NR_olduname
6180 case TARGET_NR_olduname
:
6183 #ifdef TARGET_NR_iopl
6184 case TARGET_NR_iopl
:
6187 case TARGET_NR_vhangup
:
6188 ret
= get_errno(vhangup());
6190 #ifdef TARGET_NR_idle
6191 case TARGET_NR_idle
:
6194 #ifdef TARGET_NR_syscall
6195 case TARGET_NR_syscall
:
6196 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6197 arg6
, arg7
, arg8
, 0);
6200 case TARGET_NR_wait4
:
6203 abi_long status_ptr
= arg2
;
6204 struct rusage rusage
, *rusage_ptr
;
6205 abi_ulong target_rusage
= arg4
;
6207 rusage_ptr
= &rusage
;
6210 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6211 if (!is_error(ret
)) {
6213 status
= host_to_target_waitstatus(status
);
6214 if (put_user_s32(status
, status_ptr
))
6218 host_to_target_rusage(target_rusage
, &rusage
);
6222 #ifdef TARGET_NR_swapoff
6223 case TARGET_NR_swapoff
:
6224 if (!(p
= lock_user_string(arg1
)))
6226 ret
= get_errno(swapoff(p
));
6227 unlock_user(p
, arg1
, 0);
6230 case TARGET_NR_sysinfo
:
6232 struct target_sysinfo
*target_value
;
6233 struct sysinfo value
;
6234 ret
= get_errno(sysinfo(&value
));
6235 if (!is_error(ret
) && arg1
)
6237 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6239 __put_user(value
.uptime
, &target_value
->uptime
);
6240 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6241 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6242 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6243 __put_user(value
.totalram
, &target_value
->totalram
);
6244 __put_user(value
.freeram
, &target_value
->freeram
);
6245 __put_user(value
.sharedram
, &target_value
->sharedram
);
6246 __put_user(value
.bufferram
, &target_value
->bufferram
);
6247 __put_user(value
.totalswap
, &target_value
->totalswap
);
6248 __put_user(value
.freeswap
, &target_value
->freeswap
);
6249 __put_user(value
.procs
, &target_value
->procs
);
6250 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6251 __put_user(value
.freehigh
, &target_value
->freehigh
);
6252 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6253 unlock_user_struct(target_value
, arg1
, 1);
6257 #ifdef TARGET_NR_ipc
6259 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6262 #ifdef TARGET_NR_semget
6263 case TARGET_NR_semget
:
6264 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6267 #ifdef TARGET_NR_semop
6268 case TARGET_NR_semop
:
6269 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6272 #ifdef TARGET_NR_semctl
6273 case TARGET_NR_semctl
:
6274 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6277 #ifdef TARGET_NR_msgctl
6278 case TARGET_NR_msgctl
:
6279 ret
= do_msgctl(arg1
, arg2
, arg3
);
6282 #ifdef TARGET_NR_msgget
6283 case TARGET_NR_msgget
:
6284 ret
= get_errno(msgget(arg1
, arg2
));
6287 #ifdef TARGET_NR_msgrcv
6288 case TARGET_NR_msgrcv
:
6289 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6292 #ifdef TARGET_NR_msgsnd
6293 case TARGET_NR_msgsnd
:
6294 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6297 #ifdef TARGET_NR_shmget
6298 case TARGET_NR_shmget
:
6299 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6302 #ifdef TARGET_NR_shmctl
6303 case TARGET_NR_shmctl
:
6304 ret
= do_shmctl(arg1
, arg2
, arg3
);
6307 #ifdef TARGET_NR_shmat
6308 case TARGET_NR_shmat
:
6309 ret
= do_shmat(arg1
, arg2
, arg3
);
6312 #ifdef TARGET_NR_shmdt
6313 case TARGET_NR_shmdt
:
6314 ret
= do_shmdt(arg1
);
6317 case TARGET_NR_fsync
:
6318 ret
= get_errno(fsync(arg1
));
6320 case TARGET_NR_clone
:
6321 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6322 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6323 #elif defined(TARGET_CRIS)
6324 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6325 #elif defined(TARGET_S390X)
6326 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6328 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6331 #ifdef __NR_exit_group
6332 /* new thread calls */
6333 case TARGET_NR_exit_group
:
6337 gdb_exit(cpu_env
, arg1
);
6338 ret
= get_errno(exit_group(arg1
));
6341 case TARGET_NR_setdomainname
:
6342 if (!(p
= lock_user_string(arg1
)))
6344 ret
= get_errno(setdomainname(p
, arg2
));
6345 unlock_user(p
, arg1
, 0);
6347 case TARGET_NR_uname
:
6348 /* no need to transcode because we use the linux syscall */
6350 struct new_utsname
* buf
;
6352 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6354 ret
= get_errno(sys_uname(buf
));
6355 if (!is_error(ret
)) {
6356 /* Overrite the native machine name with whatever is being
6358 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6359 /* Allow the user to override the reported release. */
6360 if (qemu_uname_release
&& *qemu_uname_release
)
6361 strcpy (buf
->release
, qemu_uname_release
);
6363 unlock_user_struct(buf
, arg1
, 1);
6367 case TARGET_NR_modify_ldt
:
6368 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6370 #if !defined(TARGET_X86_64)
6371 case TARGET_NR_vm86old
:
6373 case TARGET_NR_vm86
:
6374 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6378 case TARGET_NR_adjtimex
:
6380 #ifdef TARGET_NR_create_module
6381 case TARGET_NR_create_module
:
6383 case TARGET_NR_init_module
:
6384 case TARGET_NR_delete_module
:
6385 #ifdef TARGET_NR_get_kernel_syms
6386 case TARGET_NR_get_kernel_syms
:
6389 case TARGET_NR_quotactl
:
6391 case TARGET_NR_getpgid
:
6392 ret
= get_errno(getpgid(arg1
));
6394 case TARGET_NR_fchdir
:
6395 ret
= get_errno(fchdir(arg1
));
6397 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6398 case TARGET_NR_bdflush
:
6401 #ifdef TARGET_NR_sysfs
6402 case TARGET_NR_sysfs
:
6405 case TARGET_NR_personality
:
6406 ret
= get_errno(personality(arg1
));
6408 #ifdef TARGET_NR_afs_syscall
6409 case TARGET_NR_afs_syscall
:
6412 #ifdef TARGET_NR__llseek /* Not on alpha */
6413 case TARGET_NR__llseek
:
6416 #if !defined(__NR_llseek)
6417 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6419 ret
= get_errno(res
);
6424 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6426 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6432 case TARGET_NR_getdents
:
6433 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6435 struct target_dirent
*target_dirp
;
6436 struct linux_dirent
*dirp
;
6437 abi_long count
= arg3
;
6439 dirp
= malloc(count
);
6441 ret
= -TARGET_ENOMEM
;
6445 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6446 if (!is_error(ret
)) {
6447 struct linux_dirent
*de
;
6448 struct target_dirent
*tde
;
6450 int reclen
, treclen
;
6451 int count1
, tnamelen
;
6455 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6459 reclen
= de
->d_reclen
;
6460 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6461 tde
->d_reclen
= tswap16(treclen
);
6462 tde
->d_ino
= tswapl(de
->d_ino
);
6463 tde
->d_off
= tswapl(de
->d_off
);
6464 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6467 /* XXX: may not be correct */
6468 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6469 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6471 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6475 unlock_user(target_dirp
, arg2
, ret
);
6481 struct linux_dirent
*dirp
;
6482 abi_long count
= arg3
;
6484 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6486 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6487 if (!is_error(ret
)) {
6488 struct linux_dirent
*de
;
6493 reclen
= de
->d_reclen
;
6496 de
->d_reclen
= tswap16(reclen
);
6497 tswapls(&de
->d_ino
);
6498 tswapls(&de
->d_off
);
6499 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6503 unlock_user(dirp
, arg2
, ret
);
6507 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6508 case TARGET_NR_getdents64
:
6510 struct linux_dirent64
*dirp
;
6511 abi_long count
= arg3
;
6512 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6514 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6515 if (!is_error(ret
)) {
6516 struct linux_dirent64
*de
;
6521 reclen
= de
->d_reclen
;
6524 de
->d_reclen
= tswap16(reclen
);
6525 tswap64s((uint64_t *)&de
->d_ino
);
6526 tswap64s((uint64_t *)&de
->d_off
);
6527 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6531 unlock_user(dirp
, arg2
, ret
);
6534 #endif /* TARGET_NR_getdents64 */
6535 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6537 case TARGET_NR_select
:
6539 case TARGET_NR__newselect
:
6541 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6544 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6545 # ifdef TARGET_NR_poll
6546 case TARGET_NR_poll
:
6548 # ifdef TARGET_NR_ppoll
6549 case TARGET_NR_ppoll
:
6552 struct target_pollfd
*target_pfd
;
6553 unsigned int nfds
= arg2
;
6558 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6562 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6563 for(i
= 0; i
< nfds
; i
++) {
6564 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6565 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6568 # ifdef TARGET_NR_ppoll
6569 if (num
== TARGET_NR_ppoll
) {
6570 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
6571 target_sigset_t
*target_set
;
6572 sigset_t _set
, *set
= &_set
;
6575 if (target_to_host_timespec(timeout_ts
, arg3
)) {
6576 unlock_user(target_pfd
, arg1
, 0);
6584 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
6586 unlock_user(target_pfd
, arg1
, 0);
6589 target_to_host_sigset(set
, target_set
);
6594 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
6596 if (!is_error(ret
) && arg3
) {
6597 host_to_target_timespec(arg3
, timeout_ts
);
6600 unlock_user(target_set
, arg4
, 0);
6604 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6606 if (!is_error(ret
)) {
6607 for(i
= 0; i
< nfds
; i
++) {
6608 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6611 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
6615 case TARGET_NR_flock
:
6616 /* NOTE: the flock constant seems to be the same for every
6618 ret
= get_errno(flock(arg1
, arg2
));
6620 case TARGET_NR_readv
:
6625 vec
= alloca(count
* sizeof(struct iovec
));
6626 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6628 ret
= get_errno(readv(arg1
, vec
, count
));
6629 unlock_iovec(vec
, arg2
, count
, 1);
6632 case TARGET_NR_writev
:
6637 vec
= alloca(count
* sizeof(struct iovec
));
6638 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6640 ret
= get_errno(writev(arg1
, vec
, count
));
6641 unlock_iovec(vec
, arg2
, count
, 0);
6644 case TARGET_NR_getsid
:
6645 ret
= get_errno(getsid(arg1
));
6647 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6648 case TARGET_NR_fdatasync
:
6649 ret
= get_errno(fdatasync(arg1
));
6652 case TARGET_NR__sysctl
:
6653 /* We don't implement this, but ENOTDIR is always a safe
6655 ret
= -TARGET_ENOTDIR
;
6657 case TARGET_NR_sched_getaffinity
:
6659 unsigned int mask_size
;
6660 unsigned long *mask
;
6663 * sched_getaffinity needs multiples of ulong, so need to take
6664 * care of mismatches between target ulong and host ulong sizes.
6666 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6667 ret
= -TARGET_EINVAL
;
6670 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6672 mask
= alloca(mask_size
);
6673 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
6675 if (!is_error(ret
)) {
6676 if (copy_to_user(arg3
, mask
, ret
)) {
6682 case TARGET_NR_sched_setaffinity
:
6684 unsigned int mask_size
;
6685 unsigned long *mask
;
6688 * sched_setaffinity needs multiples of ulong, so need to take
6689 * care of mismatches between target ulong and host ulong sizes.
6691 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6692 ret
= -TARGET_EINVAL
;
6695 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6697 mask
= alloca(mask_size
);
6698 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
6701 memcpy(mask
, p
, arg2
);
6702 unlock_user_struct(p
, arg2
, 0);
6704 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
6707 case TARGET_NR_sched_setparam
:
6709 struct sched_param
*target_schp
;
6710 struct sched_param schp
;
6712 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6714 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6715 unlock_user_struct(target_schp
, arg2
, 0);
6716 ret
= get_errno(sched_setparam(arg1
, &schp
));
6719 case TARGET_NR_sched_getparam
:
6721 struct sched_param
*target_schp
;
6722 struct sched_param schp
;
6723 ret
= get_errno(sched_getparam(arg1
, &schp
));
6724 if (!is_error(ret
)) {
6725 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6727 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6728 unlock_user_struct(target_schp
, arg2
, 1);
6732 case TARGET_NR_sched_setscheduler
:
6734 struct sched_param
*target_schp
;
6735 struct sched_param schp
;
6736 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6738 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6739 unlock_user_struct(target_schp
, arg3
, 0);
6740 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6743 case TARGET_NR_sched_getscheduler
:
6744 ret
= get_errno(sched_getscheduler(arg1
));
6746 case TARGET_NR_sched_yield
:
6747 ret
= get_errno(sched_yield());
6749 case TARGET_NR_sched_get_priority_max
:
6750 ret
= get_errno(sched_get_priority_max(arg1
));
6752 case TARGET_NR_sched_get_priority_min
:
6753 ret
= get_errno(sched_get_priority_min(arg1
));
6755 case TARGET_NR_sched_rr_get_interval
:
6758 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6759 if (!is_error(ret
)) {
6760 host_to_target_timespec(arg2
, &ts
);
6764 case TARGET_NR_nanosleep
:
6766 struct timespec req
, rem
;
6767 target_to_host_timespec(&req
, arg1
);
6768 ret
= get_errno(nanosleep(&req
, &rem
));
6769 if (is_error(ret
) && arg2
) {
6770 host_to_target_timespec(arg2
, &rem
);
6774 #ifdef TARGET_NR_query_module
6775 case TARGET_NR_query_module
:
6778 #ifdef TARGET_NR_nfsservctl
6779 case TARGET_NR_nfsservctl
:
6782 case TARGET_NR_prctl
:
6785 case PR_GET_PDEATHSIG
:
6788 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6789 if (!is_error(ret
) && arg2
6790 && put_user_ual(deathsig
, arg2
))
6795 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6799 #ifdef TARGET_NR_arch_prctl
6800 case TARGET_NR_arch_prctl
:
6801 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6802 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6808 #ifdef TARGET_NR_pread
6809 case TARGET_NR_pread
:
6811 if (((CPUARMState
*)cpu_env
)->eabi
)
6814 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6816 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6817 unlock_user(p
, arg2
, ret
);
6819 case TARGET_NR_pwrite
:
6821 if (((CPUARMState
*)cpu_env
)->eabi
)
6824 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6826 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6827 unlock_user(p
, arg2
, 0);
6830 #ifdef TARGET_NR_pread64
6831 case TARGET_NR_pread64
:
6832 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6834 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6835 unlock_user(p
, arg2
, ret
);
6837 case TARGET_NR_pwrite64
:
6838 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6840 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6841 unlock_user(p
, arg2
, 0);
6844 case TARGET_NR_getcwd
:
6845 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6847 ret
= get_errno(sys_getcwd1(p
, arg2
));
6848 unlock_user(p
, arg1
, ret
);
6850 case TARGET_NR_capget
:
6852 case TARGET_NR_capset
:
6854 case TARGET_NR_sigaltstack
:
6855 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6856 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6857 defined(TARGET_M68K) || defined(TARGET_S390X)
6858 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6863 case TARGET_NR_sendfile
:
6865 #ifdef TARGET_NR_getpmsg
6866 case TARGET_NR_getpmsg
:
6869 #ifdef TARGET_NR_putpmsg
6870 case TARGET_NR_putpmsg
:
6873 #ifdef TARGET_NR_vfork
6874 case TARGET_NR_vfork
:
6875 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6879 #ifdef TARGET_NR_ugetrlimit
6880 case TARGET_NR_ugetrlimit
:
6883 ret
= get_errno(getrlimit(arg1
, &rlim
));
6884 if (!is_error(ret
)) {
6885 struct target_rlimit
*target_rlim
;
6886 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6888 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6889 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6890 unlock_user_struct(target_rlim
, arg2
, 1);
6895 #ifdef TARGET_NR_truncate64
6896 case TARGET_NR_truncate64
:
6897 if (!(p
= lock_user_string(arg1
)))
6899 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6900 unlock_user(p
, arg1
, 0);
6903 #ifdef TARGET_NR_ftruncate64
6904 case TARGET_NR_ftruncate64
:
6905 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6908 #ifdef TARGET_NR_stat64
6909 case TARGET_NR_stat64
:
6910 if (!(p
= lock_user_string(arg1
)))
6912 ret
= get_errno(stat(path(p
), &st
));
6913 unlock_user(p
, arg1
, 0);
6915 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6918 #ifdef TARGET_NR_lstat64
6919 case TARGET_NR_lstat64
:
6920 if (!(p
= lock_user_string(arg1
)))
6922 ret
= get_errno(lstat(path(p
), &st
));
6923 unlock_user(p
, arg1
, 0);
6925 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6928 #ifdef TARGET_NR_fstat64
6929 case TARGET_NR_fstat64
:
6930 ret
= get_errno(fstat(arg1
, &st
));
6932 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6935 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6936 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6937 #ifdef TARGET_NR_fstatat64
6938 case TARGET_NR_fstatat64
:
6940 #ifdef TARGET_NR_newfstatat
6941 case TARGET_NR_newfstatat
:
6943 if (!(p
= lock_user_string(arg2
)))
6945 #ifdef __NR_fstatat64
6946 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6948 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6951 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6954 case TARGET_NR_lchown
:
6955 if (!(p
= lock_user_string(arg1
)))
6957 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6958 unlock_user(p
, arg1
, 0);
6960 #ifdef TARGET_NR_getuid
6961 case TARGET_NR_getuid
:
6962 ret
= get_errno(high2lowuid(getuid()));
6965 #ifdef TARGET_NR_getgid
6966 case TARGET_NR_getgid
:
6967 ret
= get_errno(high2lowgid(getgid()));
6970 #ifdef TARGET_NR_geteuid
6971 case TARGET_NR_geteuid
:
6972 ret
= get_errno(high2lowuid(geteuid()));
6975 #ifdef TARGET_NR_getegid
6976 case TARGET_NR_getegid
:
6977 ret
= get_errno(high2lowgid(getegid()));
6980 case TARGET_NR_setreuid
:
6981 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6983 case TARGET_NR_setregid
:
6984 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6986 case TARGET_NR_getgroups
:
6988 int gidsetsize
= arg1
;
6989 target_id
*target_grouplist
;
6993 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6994 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6995 if (gidsetsize
== 0)
6997 if (!is_error(ret
)) {
6998 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6999 if (!target_grouplist
)
7001 for(i
= 0;i
< ret
; i
++)
7002 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7003 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7007 case TARGET_NR_setgroups
:
7009 int gidsetsize
= arg1
;
7010 target_id
*target_grouplist
;
7014 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7015 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7016 if (!target_grouplist
) {
7017 ret
= -TARGET_EFAULT
;
7020 for(i
= 0;i
< gidsetsize
; i
++)
7021 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7022 unlock_user(target_grouplist
, arg2
, 0);
7023 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7026 case TARGET_NR_fchown
:
7027 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7029 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7030 case TARGET_NR_fchownat
:
7031 if (!(p
= lock_user_string(arg2
)))
7033 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7034 unlock_user(p
, arg2
, 0);
7037 #ifdef TARGET_NR_setresuid
7038 case TARGET_NR_setresuid
:
7039 ret
= get_errno(setresuid(low2highuid(arg1
),
7041 low2highuid(arg3
)));
7044 #ifdef TARGET_NR_getresuid
7045 case TARGET_NR_getresuid
:
7047 uid_t ruid
, euid
, suid
;
7048 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7049 if (!is_error(ret
)) {
7050 if (put_user_u16(high2lowuid(ruid
), arg1
)
7051 || put_user_u16(high2lowuid(euid
), arg2
)
7052 || put_user_u16(high2lowuid(suid
), arg3
))
7058 #ifdef TARGET_NR_getresgid
7059 case TARGET_NR_setresgid
:
7060 ret
= get_errno(setresgid(low2highgid(arg1
),
7062 low2highgid(arg3
)));
7065 #ifdef TARGET_NR_getresgid
7066 case TARGET_NR_getresgid
:
7068 gid_t rgid
, egid
, sgid
;
7069 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7070 if (!is_error(ret
)) {
7071 if (put_user_u16(high2lowgid(rgid
), arg1
)
7072 || put_user_u16(high2lowgid(egid
), arg2
)
7073 || put_user_u16(high2lowgid(sgid
), arg3
))
7079 case TARGET_NR_chown
:
7080 if (!(p
= lock_user_string(arg1
)))
7082 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7083 unlock_user(p
, arg1
, 0);
7085 case TARGET_NR_setuid
:
7086 ret
= get_errno(setuid(low2highuid(arg1
)));
7088 case TARGET_NR_setgid
:
7089 ret
= get_errno(setgid(low2highgid(arg1
)));
7091 case TARGET_NR_setfsuid
:
7092 ret
= get_errno(setfsuid(arg1
));
7094 case TARGET_NR_setfsgid
:
7095 ret
= get_errno(setfsgid(arg1
));
7098 #ifdef TARGET_NR_lchown32
7099 case TARGET_NR_lchown32
:
7100 if (!(p
= lock_user_string(arg1
)))
7102 ret
= get_errno(lchown(p
, arg2
, arg3
));
7103 unlock_user(p
, arg1
, 0);
7106 #ifdef TARGET_NR_getuid32
7107 case TARGET_NR_getuid32
:
7108 ret
= get_errno(getuid());
7112 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7113 /* Alpha specific */
7114 case TARGET_NR_getxuid
:
7118 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7120 ret
= get_errno(getuid());
7123 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7124 /* Alpha specific */
7125 case TARGET_NR_getxgid
:
7129 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7131 ret
= get_errno(getgid());
7134 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7135 /* Alpha specific */
7136 case TARGET_NR_osf_getsysinfo
:
7137 ret
= -TARGET_EOPNOTSUPP
;
7139 case TARGET_GSI_IEEE_FP_CONTROL
:
7141 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7143 /* Copied from linux ieee_fpcr_to_swcr. */
7144 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7145 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7146 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7147 | SWCR_TRAP_ENABLE_DZE
7148 | SWCR_TRAP_ENABLE_OVF
);
7149 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7150 | SWCR_TRAP_ENABLE_INE
);
7151 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7152 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7154 if (put_user_u64 (swcr
, arg2
))
7160 /* case GSI_IEEE_STATE_AT_SIGNAL:
7161 -- Not implemented in linux kernel.
7163 -- Retrieves current unaligned access state; not much used.
7165 -- Retrieves implver information; surely not used.
7167 -- Grabs a copy of the HWRPB; surely not used.
7172 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7173 /* Alpha specific */
7174 case TARGET_NR_osf_setsysinfo
:
7175 ret
= -TARGET_EOPNOTSUPP
;
7177 case TARGET_SSI_IEEE_FP_CONTROL
:
7178 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7180 uint64_t swcr
, fpcr
, orig_fpcr
;
7182 if (get_user_u64 (swcr
, arg2
))
7184 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7185 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7187 /* Copied from linux ieee_swcr_to_fpcr. */
7188 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7189 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7190 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7191 | SWCR_TRAP_ENABLE_DZE
7192 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7193 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7194 | SWCR_TRAP_ENABLE_INE
)) << 57;
7195 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7196 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7198 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
7201 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
7202 /* Old exceptions are not signaled. */
7203 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7205 /* If any exceptions set by this call, and are unmasked,
7212 /* case SSI_NVPAIRS:
7213 -- Used with SSIN_UACPROC to enable unaligned accesses.
7214 case SSI_IEEE_STATE_AT_SIGNAL:
7215 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7216 -- Not implemented in linux kernel
7221 #ifdef TARGET_NR_osf_sigprocmask
7222 /* Alpha specific. */
7223 case TARGET_NR_osf_sigprocmask
:
7227 sigset_t set
, oldset
;
7230 case TARGET_SIG_BLOCK
:
7233 case TARGET_SIG_UNBLOCK
:
7236 case TARGET_SIG_SETMASK
:
7240 ret
= -TARGET_EINVAL
;
7244 target_to_host_old_sigset(&set
, &mask
);
7245 sigprocmask(how
, &set
, &oldset
);
7246 host_to_target_old_sigset(&mask
, &oldset
);
7252 #ifdef TARGET_NR_getgid32
7253 case TARGET_NR_getgid32
:
7254 ret
= get_errno(getgid());
7257 #ifdef TARGET_NR_geteuid32
7258 case TARGET_NR_geteuid32
:
7259 ret
= get_errno(geteuid());
7262 #ifdef TARGET_NR_getegid32
7263 case TARGET_NR_getegid32
:
7264 ret
= get_errno(getegid());
7267 #ifdef TARGET_NR_setreuid32
7268 case TARGET_NR_setreuid32
:
7269 ret
= get_errno(setreuid(arg1
, arg2
));
7272 #ifdef TARGET_NR_setregid32
7273 case TARGET_NR_setregid32
:
7274 ret
= get_errno(setregid(arg1
, arg2
));
7277 #ifdef TARGET_NR_getgroups32
7278 case TARGET_NR_getgroups32
:
7280 int gidsetsize
= arg1
;
7281 uint32_t *target_grouplist
;
7285 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7286 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7287 if (gidsetsize
== 0)
7289 if (!is_error(ret
)) {
7290 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7291 if (!target_grouplist
) {
7292 ret
= -TARGET_EFAULT
;
7295 for(i
= 0;i
< ret
; i
++)
7296 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7297 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7302 #ifdef TARGET_NR_setgroups32
7303 case TARGET_NR_setgroups32
:
7305 int gidsetsize
= arg1
;
7306 uint32_t *target_grouplist
;
7310 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7311 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7312 if (!target_grouplist
) {
7313 ret
= -TARGET_EFAULT
;
7316 for(i
= 0;i
< gidsetsize
; i
++)
7317 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7318 unlock_user(target_grouplist
, arg2
, 0);
7319 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7323 #ifdef TARGET_NR_fchown32
7324 case TARGET_NR_fchown32
:
7325 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7328 #ifdef TARGET_NR_setresuid32
7329 case TARGET_NR_setresuid32
:
7330 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7333 #ifdef TARGET_NR_getresuid32
7334 case TARGET_NR_getresuid32
:
7336 uid_t ruid
, euid
, suid
;
7337 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7338 if (!is_error(ret
)) {
7339 if (put_user_u32(ruid
, arg1
)
7340 || put_user_u32(euid
, arg2
)
7341 || put_user_u32(suid
, arg3
))
7347 #ifdef TARGET_NR_setresgid32
7348 case TARGET_NR_setresgid32
:
7349 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7352 #ifdef TARGET_NR_getresgid32
7353 case TARGET_NR_getresgid32
:
7355 gid_t rgid
, egid
, sgid
;
7356 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7357 if (!is_error(ret
)) {
7358 if (put_user_u32(rgid
, arg1
)
7359 || put_user_u32(egid
, arg2
)
7360 || put_user_u32(sgid
, arg3
))
7366 #ifdef TARGET_NR_chown32
7367 case TARGET_NR_chown32
:
7368 if (!(p
= lock_user_string(arg1
)))
7370 ret
= get_errno(chown(p
, arg2
, arg3
));
7371 unlock_user(p
, arg1
, 0);
7374 #ifdef TARGET_NR_setuid32
7375 case TARGET_NR_setuid32
:
7376 ret
= get_errno(setuid(arg1
));
7379 #ifdef TARGET_NR_setgid32
7380 case TARGET_NR_setgid32
:
7381 ret
= get_errno(setgid(arg1
));
7384 #ifdef TARGET_NR_setfsuid32
7385 case TARGET_NR_setfsuid32
:
7386 ret
= get_errno(setfsuid(arg1
));
7389 #ifdef TARGET_NR_setfsgid32
7390 case TARGET_NR_setfsgid32
:
7391 ret
= get_errno(setfsgid(arg1
));
7395 case TARGET_NR_pivot_root
:
7397 #ifdef TARGET_NR_mincore
7398 case TARGET_NR_mincore
:
7401 ret
= -TARGET_EFAULT
;
7402 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7404 if (!(p
= lock_user_string(arg3
)))
7406 ret
= get_errno(mincore(a
, arg2
, p
));
7407 unlock_user(p
, arg3
, ret
);
7409 unlock_user(a
, arg1
, 0);
7413 #ifdef TARGET_NR_arm_fadvise64_64
7414 case TARGET_NR_arm_fadvise64_64
:
7417 * arm_fadvise64_64 looks like fadvise64_64 but
7418 * with different argument order
7426 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7427 #ifdef TARGET_NR_fadvise64_64
7428 case TARGET_NR_fadvise64_64
:
7430 #ifdef TARGET_NR_fadvise64
7431 case TARGET_NR_fadvise64
:
7435 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7436 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7437 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7438 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7442 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7445 #ifdef TARGET_NR_madvise
7446 case TARGET_NR_madvise
:
7447 /* A straight passthrough may not be safe because qemu sometimes
7448 turns private flie-backed mappings into anonymous mappings.
7449 This will break MADV_DONTNEED.
7450 This is a hint, so ignoring and returning success is ok. */
7454 #if TARGET_ABI_BITS == 32
7455 case TARGET_NR_fcntl64
:
7459 struct target_flock64
*target_fl
;
7461 struct target_eabi_flock64
*target_efl
;
7464 cmd
= target_to_host_fcntl_cmd(arg2
);
7465 if (cmd
== -TARGET_EINVAL
)
7469 case TARGET_F_GETLK64
:
7471 if (((CPUARMState
*)cpu_env
)->eabi
) {
7472 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7474 fl
.l_type
= tswap16(target_efl
->l_type
);
7475 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7476 fl
.l_start
= tswap64(target_efl
->l_start
);
7477 fl
.l_len
= tswap64(target_efl
->l_len
);
7478 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7479 unlock_user_struct(target_efl
, arg3
, 0);
7483 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7485 fl
.l_type
= tswap16(target_fl
->l_type
);
7486 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7487 fl
.l_start
= tswap64(target_fl
->l_start
);
7488 fl
.l_len
= tswap64(target_fl
->l_len
);
7489 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7490 unlock_user_struct(target_fl
, arg3
, 0);
7492 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7495 if (((CPUARMState
*)cpu_env
)->eabi
) {
7496 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7498 target_efl
->l_type
= tswap16(fl
.l_type
);
7499 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7500 target_efl
->l_start
= tswap64(fl
.l_start
);
7501 target_efl
->l_len
= tswap64(fl
.l_len
);
7502 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7503 unlock_user_struct(target_efl
, arg3
, 1);
7507 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7509 target_fl
->l_type
= tswap16(fl
.l_type
);
7510 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7511 target_fl
->l_start
= tswap64(fl
.l_start
);
7512 target_fl
->l_len
= tswap64(fl
.l_len
);
7513 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7514 unlock_user_struct(target_fl
, arg3
, 1);
7519 case TARGET_F_SETLK64
:
7520 case TARGET_F_SETLKW64
:
7522 if (((CPUARMState
*)cpu_env
)->eabi
) {
7523 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7525 fl
.l_type
= tswap16(target_efl
->l_type
);
7526 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7527 fl
.l_start
= tswap64(target_efl
->l_start
);
7528 fl
.l_len
= tswap64(target_efl
->l_len
);
7529 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7530 unlock_user_struct(target_efl
, arg3
, 0);
7534 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7536 fl
.l_type
= tswap16(target_fl
->l_type
);
7537 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7538 fl
.l_start
= tswap64(target_fl
->l_start
);
7539 fl
.l_len
= tswap64(target_fl
->l_len
);
7540 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7541 unlock_user_struct(target_fl
, arg3
, 0);
7543 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7546 ret
= do_fcntl(arg1
, arg2
, arg3
);
7552 #ifdef TARGET_NR_cacheflush
7553 case TARGET_NR_cacheflush
:
7554 /* self-modifying code is handled automatically, so nothing needed */
7558 #ifdef TARGET_NR_security
7559 case TARGET_NR_security
:
7562 #ifdef TARGET_NR_getpagesize
7563 case TARGET_NR_getpagesize
:
7564 ret
= TARGET_PAGE_SIZE
;
7567 case TARGET_NR_gettid
:
7568 ret
= get_errno(gettid());
7570 #ifdef TARGET_NR_readahead
7571 case TARGET_NR_readahead
:
7572 #if TARGET_ABI_BITS == 32
7574 if (((CPUARMState
*)cpu_env
)->eabi
)
7581 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7583 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7587 #ifdef TARGET_NR_setxattr
7588 case TARGET_NR_setxattr
:
7589 case TARGET_NR_lsetxattr
:
7590 case TARGET_NR_fsetxattr
:
7591 case TARGET_NR_getxattr
:
7592 case TARGET_NR_lgetxattr
:
7593 case TARGET_NR_fgetxattr
:
7594 case TARGET_NR_listxattr
:
7595 case TARGET_NR_llistxattr
:
7596 case TARGET_NR_flistxattr
:
7597 case TARGET_NR_removexattr
:
7598 case TARGET_NR_lremovexattr
:
7599 case TARGET_NR_fremovexattr
:
7600 ret
= -TARGET_EOPNOTSUPP
;
7603 #ifdef TARGET_NR_set_thread_area
7604 case TARGET_NR_set_thread_area
:
7605 #if defined(TARGET_MIPS)
7606 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7609 #elif defined(TARGET_CRIS)
7611 ret
= -TARGET_EINVAL
;
7613 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7617 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7618 ret
= do_set_thread_area(cpu_env
, arg1
);
7621 goto unimplemented_nowarn
;
7624 #ifdef TARGET_NR_get_thread_area
7625 case TARGET_NR_get_thread_area
:
7626 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7627 ret
= do_get_thread_area(cpu_env
, arg1
);
7629 goto unimplemented_nowarn
;
7632 #ifdef TARGET_NR_getdomainname
7633 case TARGET_NR_getdomainname
:
7634 goto unimplemented_nowarn
;
7637 #ifdef TARGET_NR_clock_gettime
7638 case TARGET_NR_clock_gettime
:
7641 ret
= get_errno(clock_gettime(arg1
, &ts
));
7642 if (!is_error(ret
)) {
7643 host_to_target_timespec(arg2
, &ts
);
7648 #ifdef TARGET_NR_clock_getres
7649 case TARGET_NR_clock_getres
:
7652 ret
= get_errno(clock_getres(arg1
, &ts
));
7653 if (!is_error(ret
)) {
7654 host_to_target_timespec(arg2
, &ts
);
7659 #ifdef TARGET_NR_clock_nanosleep
7660 case TARGET_NR_clock_nanosleep
:
7663 target_to_host_timespec(&ts
, arg3
);
7664 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7666 host_to_target_timespec(arg4
, &ts
);
7671 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7672 case TARGET_NR_set_tid_address
:
7673 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7677 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7678 case TARGET_NR_tkill
:
7679 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7683 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7684 case TARGET_NR_tgkill
:
7685 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7686 target_to_host_signal(arg3
)));
7690 #ifdef TARGET_NR_set_robust_list
7691 case TARGET_NR_set_robust_list
:
7692 goto unimplemented_nowarn
;
7695 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7696 case TARGET_NR_utimensat
:
7698 struct timespec
*tsp
, ts
[2];
7702 target_to_host_timespec(ts
, arg3
);
7703 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7707 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7709 if (!(p
= lock_user_string(arg2
))) {
7710 ret
= -TARGET_EFAULT
;
7713 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7714 unlock_user(p
, arg2
, 0);
7719 #if defined(CONFIG_USE_NPTL)
7720 case TARGET_NR_futex
:
7721 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7724 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7725 case TARGET_NR_inotify_init
:
7726 ret
= get_errno(sys_inotify_init());
7729 #ifdef CONFIG_INOTIFY1
7730 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7731 case TARGET_NR_inotify_init1
:
7732 ret
= get_errno(sys_inotify_init1(arg1
));
7736 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7737 case TARGET_NR_inotify_add_watch
:
7738 p
= lock_user_string(arg2
);
7739 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7740 unlock_user(p
, arg2
, 0);
7743 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7744 case TARGET_NR_inotify_rm_watch
:
7745 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7749 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7750 case TARGET_NR_mq_open
:
7752 struct mq_attr posix_mq_attr
;
7754 p
= lock_user_string(arg1
- 1);
7756 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7757 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7758 unlock_user (p
, arg1
, 0);
7762 case TARGET_NR_mq_unlink
:
7763 p
= lock_user_string(arg1
- 1);
7764 ret
= get_errno(mq_unlink(p
));
7765 unlock_user (p
, arg1
, 0);
7768 case TARGET_NR_mq_timedsend
:
7772 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7774 target_to_host_timespec(&ts
, arg5
);
7775 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7776 host_to_target_timespec(arg5
, &ts
);
7779 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7780 unlock_user (p
, arg2
, arg3
);
7784 case TARGET_NR_mq_timedreceive
:
7789 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7791 target_to_host_timespec(&ts
, arg5
);
7792 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7793 host_to_target_timespec(arg5
, &ts
);
7796 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7797 unlock_user (p
, arg2
, arg3
);
7799 put_user_u32(prio
, arg4
);
7803 /* Not implemented for now... */
7804 /* case TARGET_NR_mq_notify: */
7807 case TARGET_NR_mq_getsetattr
:
7809 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7812 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7813 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7816 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7817 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7824 #ifdef CONFIG_SPLICE
7825 #ifdef TARGET_NR_tee
7828 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7832 #ifdef TARGET_NR_splice
7833 case TARGET_NR_splice
:
7835 loff_t loff_in
, loff_out
;
7836 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7838 get_user_u64(loff_in
, arg2
);
7839 ploff_in
= &loff_in
;
7842 get_user_u64(loff_out
, arg2
);
7843 ploff_out
= &loff_out
;
7845 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7849 #ifdef TARGET_NR_vmsplice
7850 case TARGET_NR_vmsplice
:
7855 vec
= alloca(count
* sizeof(struct iovec
));
7856 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7858 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7859 unlock_iovec(vec
, arg2
, count
, 0);
7863 #endif /* CONFIG_SPLICE */
7864 #ifdef CONFIG_EVENTFD
7865 #if defined(TARGET_NR_eventfd)
7866 case TARGET_NR_eventfd
:
7867 ret
= get_errno(eventfd(arg1
, 0));
7870 #if defined(TARGET_NR_eventfd2)
7871 case TARGET_NR_eventfd2
:
7872 ret
= get_errno(eventfd(arg1
, arg2
));
7875 #endif /* CONFIG_EVENTFD */
7876 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7877 case TARGET_NR_fallocate
:
7878 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7881 #if defined(CONFIG_SYNC_FILE_RANGE)
7882 #if defined(TARGET_NR_sync_file_range)
7883 case TARGET_NR_sync_file_range
:
7884 #if TARGET_ABI_BITS == 32
7885 #if defined(TARGET_MIPS)
7886 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7887 target_offset64(arg5
, arg6
), arg7
));
7889 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
7890 target_offset64(arg4
, arg5
), arg6
));
7891 #endif /* !TARGET_MIPS */
7893 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
7897 #if defined(TARGET_NR_sync_file_range2)
7898 case TARGET_NR_sync_file_range2
:
7899 /* This is like sync_file_range but the arguments are reordered */
7900 #if TARGET_ABI_BITS == 32
7901 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7902 target_offset64(arg5
, arg6
), arg2
));
7904 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
7909 #if defined(CONFIG_EPOLL)
7910 #if defined(TARGET_NR_epoll_create)
7911 case TARGET_NR_epoll_create
:
7912 ret
= get_errno(epoll_create(arg1
));
7915 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7916 case TARGET_NR_epoll_create1
:
7917 ret
= get_errno(epoll_create1(arg1
));
7920 #if defined(TARGET_NR_epoll_ctl)
7921 case TARGET_NR_epoll_ctl
:
7923 struct epoll_event ep
;
7924 struct epoll_event
*epp
= 0;
7926 struct target_epoll_event
*target_ep
;
7927 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
7930 ep
.events
= tswap32(target_ep
->events
);
7931 /* The epoll_data_t union is just opaque data to the kernel,
7932 * so we transfer all 64 bits across and need not worry what
7933 * actual data type it is.
7935 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
7936 unlock_user_struct(target_ep
, arg4
, 0);
7939 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
7944 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7945 #define IMPLEMENT_EPOLL_PWAIT
7947 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7948 #if defined(TARGET_NR_epoll_wait)
7949 case TARGET_NR_epoll_wait
:
7951 #if defined(IMPLEMENT_EPOLL_PWAIT)
7952 case TARGET_NR_epoll_pwait
:
7955 struct target_epoll_event
*target_ep
;
7956 struct epoll_event
*ep
;
7958 int maxevents
= arg3
;
7961 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
7962 maxevents
* sizeof(struct target_epoll_event
), 1);
7967 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
7970 #if defined(IMPLEMENT_EPOLL_PWAIT)
7971 case TARGET_NR_epoll_pwait
:
7973 target_sigset_t
*target_set
;
7974 sigset_t _set
, *set
= &_set
;
7977 target_set
= lock_user(VERIFY_READ
, arg5
,
7978 sizeof(target_sigset_t
), 1);
7980 unlock_user(target_ep
, arg2
, 0);
7983 target_to_host_sigset(set
, target_set
);
7984 unlock_user(target_set
, arg5
, 0);
7989 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
7993 #if defined(TARGET_NR_epoll_wait)
7994 case TARGET_NR_epoll_wait
:
7995 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
7999 ret
= -TARGET_ENOSYS
;
8001 if (!is_error(ret
)) {
8003 for (i
= 0; i
< ret
; i
++) {
8004 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8005 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8008 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8013 #ifdef TARGET_NR_prlimit64
8014 case TARGET_NR_prlimit64
:
8016 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8017 struct target_rlimit64
*target_rnew
, *target_rold
;
8018 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8020 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8023 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8024 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8025 unlock_user_struct(target_rnew
, arg3
, 0);
8029 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8030 if (!is_error(ret
) && arg4
) {
8031 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8034 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8035 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8036 unlock_user_struct(target_rold
, arg4
, 1);
8043 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8044 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8045 unimplemented_nowarn
:
8047 ret
= -TARGET_ENOSYS
;
8052 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8055 print_syscall_ret(num
, ret
);
8058 ret
= -TARGET_EFAULT
;