4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <qemu-common.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #define termios host_termios
74 #define winsize host_winsize
75 #define termio host_termio
76 #define sgttyb host_sgttyb /* same as target */
77 #define tchars host_tchars /* same as target */
78 #define ltchars host_ltchars /* same as target */
80 #include <linux/termios.h>
81 #include <linux/unistd.h>
82 #include <linux/utsname.h>
83 #include <linux/cdrom.h>
84 #include <linux/hdreg.h>
85 #include <linux/soundcard.h>
87 #include <linux/mtio.h>
89 #if defined(CONFIG_FIEMAP)
90 #include <linux/fiemap.h>
94 #include "linux_loop.h"
95 #include "cpu-uname.h"
98 #include "qemu-common.h"
100 #if defined(CONFIG_USE_NPTL)
101 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
102 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
104 /* XXX: Hardcode the above values. */
105 #define CLONE_NPTL_FLAGS2 0
110 //#include <linux/msdos_fs.h>
111 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
112 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
123 #define _syscall0(type,name) \
124 static type name (void) \
126 return syscall(__NR_##name); \
129 #define _syscall1(type,name,type1,arg1) \
130 static type name (type1 arg1) \
132 return syscall(__NR_##name, arg1); \
135 #define _syscall2(type,name,type1,arg1,type2,arg2) \
136 static type name (type1 arg1,type2 arg2) \
138 return syscall(__NR_##name, arg1, arg2); \
141 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
142 static type name (type1 arg1,type2 arg2,type3 arg3) \
144 return syscall(__NR_##name, arg1, arg2, arg3); \
147 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
148 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
150 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
153 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
155 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
157 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
161 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
162 type5,arg5,type6,arg6) \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
170 #define __NR_sys_uname __NR_uname
171 #define __NR_sys_faccessat __NR_faccessat
172 #define __NR_sys_fchmodat __NR_fchmodat
173 #define __NR_sys_fchownat __NR_fchownat
174 #define __NR_sys_fstatat64 __NR_fstatat64
175 #define __NR_sys_futimesat __NR_futimesat
176 #define __NR_sys_getcwd1 __NR_getcwd
177 #define __NR_sys_getdents __NR_getdents
178 #define __NR_sys_getdents64 __NR_getdents64
179 #define __NR_sys_getpriority __NR_getpriority
180 #define __NR_sys_linkat __NR_linkat
181 #define __NR_sys_mkdirat __NR_mkdirat
182 #define __NR_sys_mknodat __NR_mknodat
183 #define __NR_sys_newfstatat __NR_newfstatat
184 #define __NR_sys_openat __NR_openat
185 #define __NR_sys_readlinkat __NR_readlinkat
186 #define __NR_sys_renameat __NR_renameat
187 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
188 #define __NR_sys_symlinkat __NR_symlinkat
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_unlinkat __NR_unlinkat
193 #define __NR_sys_utimensat __NR_utimensat
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
200 #define __NR__llseek __NR_lseek
204 _syscall0(int, gettid
)
206 /* This is a replacement for the host gettid() and must return a host
208 static int gettid(void) {
212 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
213 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
214 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
216 _syscall2(int, sys_getpriority
, int, which
, int, who
);
217 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
218 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
219 loff_t
*, res
, uint
, wh
);
221 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
222 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
223 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
224 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
226 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
227 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
229 #ifdef __NR_exit_group
230 _syscall1(int,exit_group
,int,error_code
)
232 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
233 _syscall1(int,set_tid_address
,int *,tidptr
)
235 #if defined(CONFIG_USE_NPTL)
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
238 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
241 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
242 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
243 unsigned long *, user_mask_ptr
);
244 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
245 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
246 unsigned long *, user_mask_ptr
);
248 static bitmask_transtbl fcntl_flags_tbl
[] = {
249 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
250 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
251 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
252 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
253 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
254 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
255 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
256 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
257 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
258 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
259 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
260 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
261 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
262 #if defined(O_DIRECT)
263 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
268 #define COPY_UTSNAME_FIELD(dest, src) \
270 /* __NEW_UTS_LEN doesn't include terminating null */ \
271 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
272 (dest)[__NEW_UTS_LEN] = '\0'; \
275 static int sys_uname(struct new_utsname
*buf
)
277 struct utsname uts_buf
;
279 if (uname(&uts_buf
) < 0)
283 * Just in case these have some differences, we
284 * translate utsname to new_utsname (which is the
285 * struct linux kernel uses).
288 bzero(buf
, sizeof (*buf
));
289 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
290 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
291 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
292 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
293 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
295 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
299 #undef COPY_UTSNAME_FIELD
302 static int sys_getcwd1(char *buf
, size_t size
)
304 if (getcwd(buf
, size
) == NULL
) {
305 /* getcwd() sets errno */
308 return strlen(buf
)+1;
313 * Host system seems to have atfile syscall stubs available. We
314 * now enable them one by one as specified by target syscall_nr.h.
317 #ifdef TARGET_NR_faccessat
318 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
320 return (faccessat(dirfd
, pathname
, mode
, 0));
323 #ifdef TARGET_NR_fchmodat
324 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
326 return (fchmodat(dirfd
, pathname
, mode
, 0));
329 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
330 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
331 gid_t group
, int flags
)
333 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
336 #ifdef __NR_fstatat64
337 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
340 return (fstatat(dirfd
, pathname
, buf
, flags
));
343 #ifdef __NR_newfstatat
344 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
347 return (fstatat(dirfd
, pathname
, buf
, flags
));
350 #ifdef TARGET_NR_futimesat
351 static int sys_futimesat(int dirfd
, const char *pathname
,
352 const struct timeval times
[2])
354 return (futimesat(dirfd
, pathname
, times
));
357 #ifdef TARGET_NR_linkat
358 static int sys_linkat(int olddirfd
, const char *oldpath
,
359 int newdirfd
, const char *newpath
, int flags
)
361 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
364 #ifdef TARGET_NR_mkdirat
365 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
367 return (mkdirat(dirfd
, pathname
, mode
));
370 #ifdef TARGET_NR_mknodat
371 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
374 return (mknodat(dirfd
, pathname
, mode
, dev
));
377 #ifdef TARGET_NR_openat
378 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
381 * open(2) has extra parameter 'mode' when called with
384 if ((flags
& O_CREAT
) != 0) {
389 * Get the 'mode' parameter and translate it to
393 mode
= va_arg(ap
, mode_t
);
394 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
397 return (openat(dirfd
, pathname
, flags
, mode
));
399 return (openat(dirfd
, pathname
, flags
));
402 #ifdef TARGET_NR_readlinkat
403 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
405 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
408 #ifdef TARGET_NR_renameat
409 static int sys_renameat(int olddirfd
, const char *oldpath
,
410 int newdirfd
, const char *newpath
)
412 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
415 #ifdef TARGET_NR_symlinkat
416 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
418 return (symlinkat(oldpath
, newdirfd
, newpath
));
421 #ifdef TARGET_NR_unlinkat
422 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
424 return (unlinkat(dirfd
, pathname
, flags
));
427 #else /* !CONFIG_ATFILE */
430 * Try direct syscalls instead
432 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
433 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
435 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
436 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
438 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
439 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
440 uid_t
,owner
,gid_t
,group
,int,flags
)
442 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
443 defined(__NR_fstatat64)
444 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
445 struct stat
*,buf
,int,flags
)
447 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
448 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
449 const struct timeval
*,times
)
451 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
452 defined(__NR_newfstatat)
453 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
454 struct stat
*,buf
,int,flags
)
456 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
457 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
458 int,newdirfd
,const char *,newpath
,int,flags
)
460 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
461 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
463 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
464 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
465 mode_t
,mode
,dev_t
,dev
)
467 #if defined(TARGET_NR_openat) && defined(__NR_openat)
468 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
470 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
471 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
472 char *,buf
,size_t,bufsize
)
474 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
475 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
476 int,newdirfd
,const char *,newpath
)
478 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
479 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
480 int,newdirfd
,const char *,newpath
)
482 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
483 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
486 #endif /* CONFIG_ATFILE */
488 #ifdef CONFIG_UTIMENSAT
489 static int sys_utimensat(int dirfd
, const char *pathname
,
490 const struct timespec times
[2], int flags
)
492 if (pathname
== NULL
)
493 return futimens(dirfd
, times
);
495 return utimensat(dirfd
, pathname
, times
, flags
);
498 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
499 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
500 const struct timespec
*,tsp
,int,flags
)
502 #endif /* CONFIG_UTIMENSAT */
504 #ifdef CONFIG_INOTIFY
505 #include <sys/inotify.h>
507 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
508 static int sys_inotify_init(void)
510 return (inotify_init());
513 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
514 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
516 return (inotify_add_watch(fd
, pathname
, mask
));
519 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
520 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
522 return (inotify_rm_watch(fd
, wd
));
525 #ifdef CONFIG_INOTIFY1
526 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
527 static int sys_inotify_init1(int flags
)
529 return (inotify_init1(flags
));
534 /* Userspace can usually survive runtime without inotify */
535 #undef TARGET_NR_inotify_init
536 #undef TARGET_NR_inotify_init1
537 #undef TARGET_NR_inotify_add_watch
538 #undef TARGET_NR_inotify_rm_watch
539 #endif /* CONFIG_INOTIFY */
541 #if defined(TARGET_NR_ppoll)
543 # define __NR_ppoll -1
545 #define __NR_sys_ppoll __NR_ppoll
546 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
547 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
551 extern int personality(int);
552 extern int flock(int, int);
553 extern int setfsuid(int);
554 extern int setfsgid(int);
555 extern int setgroups(int, gid_t
*);
557 #define ERRNO_TABLE_SIZE 1200
559 /* target_to_host_errno_table[] is initialized from
560 * host_to_target_errno_table[] in syscall_init(). */
561 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
565 * This list is the union of errno values overridden in asm-<arch>/errno.h
566 * minus the errnos that are not actually generic to all archs.
568 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
569 [EIDRM
] = TARGET_EIDRM
,
570 [ECHRNG
] = TARGET_ECHRNG
,
571 [EL2NSYNC
] = TARGET_EL2NSYNC
,
572 [EL3HLT
] = TARGET_EL3HLT
,
573 [EL3RST
] = TARGET_EL3RST
,
574 [ELNRNG
] = TARGET_ELNRNG
,
575 [EUNATCH
] = TARGET_EUNATCH
,
576 [ENOCSI
] = TARGET_ENOCSI
,
577 [EL2HLT
] = TARGET_EL2HLT
,
578 [EDEADLK
] = TARGET_EDEADLK
,
579 [ENOLCK
] = TARGET_ENOLCK
,
580 [EBADE
] = TARGET_EBADE
,
581 [EBADR
] = TARGET_EBADR
,
582 [EXFULL
] = TARGET_EXFULL
,
583 [ENOANO
] = TARGET_ENOANO
,
584 [EBADRQC
] = TARGET_EBADRQC
,
585 [EBADSLT
] = TARGET_EBADSLT
,
586 [EBFONT
] = TARGET_EBFONT
,
587 [ENOSTR
] = TARGET_ENOSTR
,
588 [ENODATA
] = TARGET_ENODATA
,
589 [ETIME
] = TARGET_ETIME
,
590 [ENOSR
] = TARGET_ENOSR
,
591 [ENONET
] = TARGET_ENONET
,
592 [ENOPKG
] = TARGET_ENOPKG
,
593 [EREMOTE
] = TARGET_EREMOTE
,
594 [ENOLINK
] = TARGET_ENOLINK
,
595 [EADV
] = TARGET_EADV
,
596 [ESRMNT
] = TARGET_ESRMNT
,
597 [ECOMM
] = TARGET_ECOMM
,
598 [EPROTO
] = TARGET_EPROTO
,
599 [EDOTDOT
] = TARGET_EDOTDOT
,
600 [EMULTIHOP
] = TARGET_EMULTIHOP
,
601 [EBADMSG
] = TARGET_EBADMSG
,
602 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
603 [EOVERFLOW
] = TARGET_EOVERFLOW
,
604 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
605 [EBADFD
] = TARGET_EBADFD
,
606 [EREMCHG
] = TARGET_EREMCHG
,
607 [ELIBACC
] = TARGET_ELIBACC
,
608 [ELIBBAD
] = TARGET_ELIBBAD
,
609 [ELIBSCN
] = TARGET_ELIBSCN
,
610 [ELIBMAX
] = TARGET_ELIBMAX
,
611 [ELIBEXEC
] = TARGET_ELIBEXEC
,
612 [EILSEQ
] = TARGET_EILSEQ
,
613 [ENOSYS
] = TARGET_ENOSYS
,
614 [ELOOP
] = TARGET_ELOOP
,
615 [ERESTART
] = TARGET_ERESTART
,
616 [ESTRPIPE
] = TARGET_ESTRPIPE
,
617 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
618 [EUSERS
] = TARGET_EUSERS
,
619 [ENOTSOCK
] = TARGET_ENOTSOCK
,
620 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
621 [EMSGSIZE
] = TARGET_EMSGSIZE
,
622 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
623 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
624 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
625 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
626 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
627 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
628 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
629 [EADDRINUSE
] = TARGET_EADDRINUSE
,
630 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
631 [ENETDOWN
] = TARGET_ENETDOWN
,
632 [ENETUNREACH
] = TARGET_ENETUNREACH
,
633 [ENETRESET
] = TARGET_ENETRESET
,
634 [ECONNABORTED
] = TARGET_ECONNABORTED
,
635 [ECONNRESET
] = TARGET_ECONNRESET
,
636 [ENOBUFS
] = TARGET_ENOBUFS
,
637 [EISCONN
] = TARGET_EISCONN
,
638 [ENOTCONN
] = TARGET_ENOTCONN
,
639 [EUCLEAN
] = TARGET_EUCLEAN
,
640 [ENOTNAM
] = TARGET_ENOTNAM
,
641 [ENAVAIL
] = TARGET_ENAVAIL
,
642 [EISNAM
] = TARGET_EISNAM
,
643 [EREMOTEIO
] = TARGET_EREMOTEIO
,
644 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
645 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
646 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
647 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
648 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
649 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
650 [EALREADY
] = TARGET_EALREADY
,
651 [EINPROGRESS
] = TARGET_EINPROGRESS
,
652 [ESTALE
] = TARGET_ESTALE
,
653 [ECANCELED
] = TARGET_ECANCELED
,
654 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
655 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
657 [ENOKEY
] = TARGET_ENOKEY
,
660 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
663 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
666 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
669 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
671 #ifdef ENOTRECOVERABLE
672 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
676 static inline int host_to_target_errno(int err
)
678 if(host_to_target_errno_table
[err
])
679 return host_to_target_errno_table
[err
];
683 static inline int target_to_host_errno(int err
)
685 if (target_to_host_errno_table
[err
])
686 return target_to_host_errno_table
[err
];
690 static inline abi_long
get_errno(abi_long ret
)
693 return -host_to_target_errno(errno
);
698 static inline int is_error(abi_long ret
)
700 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
703 char *target_strerror(int err
)
705 return strerror(target_to_host_errno(err
));
708 static abi_ulong target_brk
;
709 static abi_ulong target_original_brk
;
711 void target_set_brk(abi_ulong new_brk
)
713 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
716 /* do_brk() must return target values and target errnos. */
717 abi_long
do_brk(abi_ulong new_brk
)
720 abi_long mapped_addr
;
725 if (new_brk
< target_original_brk
)
728 brk_page
= HOST_PAGE_ALIGN(target_brk
);
730 /* If the new brk is less than this, set it and we're done... */
731 if (new_brk
< brk_page
) {
732 target_brk
= new_brk
;
736 /* We need to allocate more memory after the brk... */
737 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
738 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
739 PROT_READ
|PROT_WRITE
,
740 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
742 #if defined(TARGET_ALPHA)
743 /* We (partially) emulate OSF/1 on Alpha, which requires we
744 return a proper errno, not an unchanged brk value. */
745 if (is_error(mapped_addr
)) {
746 return -TARGET_ENOMEM
;
750 if (!is_error(mapped_addr
)) {
751 target_brk
= new_brk
;
756 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
757 abi_ulong target_fds_addr
,
761 abi_ulong b
, *target_fds
;
763 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
764 if (!(target_fds
= lock_user(VERIFY_READ
,
766 sizeof(abi_ulong
) * nw
,
768 return -TARGET_EFAULT
;
772 for (i
= 0; i
< nw
; i
++) {
773 /* grab the abi_ulong */
774 __get_user(b
, &target_fds
[i
]);
775 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
776 /* check the bit inside the abi_ulong */
783 unlock_user(target_fds
, target_fds_addr
, 0);
788 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
794 abi_ulong
*target_fds
;
796 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
797 if (!(target_fds
= lock_user(VERIFY_WRITE
,
799 sizeof(abi_ulong
) * nw
,
801 return -TARGET_EFAULT
;
804 for (i
= 0; i
< nw
; i
++) {
806 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
807 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
810 __put_user(v
, &target_fds
[i
]);
813 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
818 #if defined(__alpha__)
824 static inline abi_long
host_to_target_clock_t(long ticks
)
826 #if HOST_HZ == TARGET_HZ
829 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
833 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
834 const struct rusage
*rusage
)
836 struct target_rusage
*target_rusage
;
838 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
839 return -TARGET_EFAULT
;
840 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
841 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
842 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
843 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
844 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
845 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
846 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
847 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
848 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
849 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
850 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
851 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
852 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
853 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
854 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
855 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
856 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
857 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
858 unlock_user_struct(target_rusage
, target_addr
, 1);
863 static inline rlim_t
target_to_host_rlim(target_ulong target_rlim
)
865 if (target_rlim
== TARGET_RLIM_INFINITY
)
866 return RLIM_INFINITY
;
868 return tswapl(target_rlim
);
871 static inline target_ulong
host_to_target_rlim(rlim_t rlim
)
873 if (rlim
== RLIM_INFINITY
|| rlim
!= (target_long
)rlim
)
874 return TARGET_RLIM_INFINITY
;
879 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
880 abi_ulong target_tv_addr
)
882 struct target_timeval
*target_tv
;
884 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
885 return -TARGET_EFAULT
;
887 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
888 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
890 unlock_user_struct(target_tv
, target_tv_addr
, 0);
895 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
896 const struct timeval
*tv
)
898 struct target_timeval
*target_tv
;
900 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
901 return -TARGET_EFAULT
;
903 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
904 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
906 unlock_user_struct(target_tv
, target_tv_addr
, 1);
911 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
914 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
915 abi_ulong target_mq_attr_addr
)
917 struct target_mq_attr
*target_mq_attr
;
919 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
920 target_mq_attr_addr
, 1))
921 return -TARGET_EFAULT
;
923 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
924 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
925 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
926 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
928 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
933 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
934 const struct mq_attr
*attr
)
936 struct target_mq_attr
*target_mq_attr
;
938 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
939 target_mq_attr_addr
, 0))
940 return -TARGET_EFAULT
;
942 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
943 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
944 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
945 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
947 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
953 /* do_select() must return target values and target errnos. */
954 static abi_long
do_select(int n
,
955 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
956 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
958 fd_set rfds
, wfds
, efds
;
959 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
960 struct timeval tv
, *tv_ptr
;
964 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
965 return -TARGET_EFAULT
;
971 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
972 return -TARGET_EFAULT
;
978 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
979 return -TARGET_EFAULT
;
985 if (target_tv_addr
) {
986 if (copy_from_user_timeval(&tv
, target_tv_addr
))
987 return -TARGET_EFAULT
;
993 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
995 if (!is_error(ret
)) {
996 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
997 return -TARGET_EFAULT
;
998 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
999 return -TARGET_EFAULT
;
1000 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1001 return -TARGET_EFAULT
;
1003 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1004 return -TARGET_EFAULT
;
1010 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1013 return pipe2(host_pipe
, flags
);
1019 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1020 int flags
, int is_pipe2
)
1024 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1027 return get_errno(ret
);
1029 /* Several targets have special calling conventions for the original
1030 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1032 #if defined(TARGET_ALPHA)
1033 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1034 return host_pipe
[0];
1035 #elif defined(TARGET_MIPS)
1036 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1037 return host_pipe
[0];
1038 #elif defined(TARGET_SH4)
1039 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1040 return host_pipe
[0];
1044 if (put_user_s32(host_pipe
[0], pipedes
)
1045 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1046 return -TARGET_EFAULT
;
1047 return get_errno(ret
);
1050 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1051 abi_ulong target_addr
,
1054 struct target_ip_mreqn
*target_smreqn
;
1056 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1058 return -TARGET_EFAULT
;
1059 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1060 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1061 if (len
== sizeof(struct target_ip_mreqn
))
1062 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
1063 unlock_user(target_smreqn
, target_addr
, 0);
1068 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1069 abi_ulong target_addr
,
1072 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1073 sa_family_t sa_family
;
1074 struct target_sockaddr
*target_saddr
;
1076 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1078 return -TARGET_EFAULT
;
1080 sa_family
= tswap16(target_saddr
->sa_family
);
1082 /* Oops. The caller might send a incomplete sun_path; sun_path
1083 * must be terminated by \0 (see the manual page), but
1084 * unfortunately it is quite common to specify sockaddr_un
1085 * length as "strlen(x->sun_path)" while it should be
1086 * "strlen(...) + 1". We'll fix that here if needed.
1087 * Linux kernel has a similar feature.
1090 if (sa_family
== AF_UNIX
) {
1091 if (len
< unix_maxlen
&& len
> 0) {
1092 char *cp
= (char*)target_saddr
;
1094 if ( cp
[len
-1] && !cp
[len
] )
1097 if (len
> unix_maxlen
)
1101 memcpy(addr
, target_saddr
, len
);
1102 addr
->sa_family
= sa_family
;
1103 unlock_user(target_saddr
, target_addr
, 0);
1108 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1109 struct sockaddr
*addr
,
1112 struct target_sockaddr
*target_saddr
;
1114 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1116 return -TARGET_EFAULT
;
1117 memcpy(target_saddr
, addr
, len
);
1118 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1119 unlock_user(target_saddr
, target_addr
, len
);
1124 /* ??? Should this also swap msgh->name? */
1125 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1126 struct target_msghdr
*target_msgh
)
1128 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1129 abi_long msg_controllen
;
1130 abi_ulong target_cmsg_addr
;
1131 struct target_cmsghdr
*target_cmsg
;
1132 socklen_t space
= 0;
1134 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1135 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1137 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1138 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1140 return -TARGET_EFAULT
;
1142 while (cmsg
&& target_cmsg
) {
1143 void *data
= CMSG_DATA(cmsg
);
1144 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1146 int len
= tswapl(target_cmsg
->cmsg_len
)
1147 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1149 space
+= CMSG_SPACE(len
);
1150 if (space
> msgh
->msg_controllen
) {
1151 space
-= CMSG_SPACE(len
);
1152 gemu_log("Host cmsg overflow\n");
1156 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1157 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1158 cmsg
->cmsg_len
= CMSG_LEN(len
);
1160 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1161 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1162 memcpy(data
, target_data
, len
);
1164 int *fd
= (int *)data
;
1165 int *target_fd
= (int *)target_data
;
1166 int i
, numfds
= len
/ sizeof(int);
1168 for (i
= 0; i
< numfds
; i
++)
1169 fd
[i
] = tswap32(target_fd
[i
]);
1172 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1173 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1175 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1177 msgh
->msg_controllen
= space
;
1181 /* ??? Should this also swap msgh->name? */
1182 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1183 struct msghdr
*msgh
)
1185 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1186 abi_long msg_controllen
;
1187 abi_ulong target_cmsg_addr
;
1188 struct target_cmsghdr
*target_cmsg
;
1189 socklen_t space
= 0;
1191 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1192 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1194 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1195 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1197 return -TARGET_EFAULT
;
1199 while (cmsg
&& target_cmsg
) {
1200 void *data
= CMSG_DATA(cmsg
);
1201 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1203 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1205 space
+= TARGET_CMSG_SPACE(len
);
1206 if (space
> msg_controllen
) {
1207 space
-= TARGET_CMSG_SPACE(len
);
1208 gemu_log("Target cmsg overflow\n");
1212 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1213 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1214 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1216 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1217 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1218 memcpy(target_data
, data
, len
);
1220 int *fd
= (int *)data
;
1221 int *target_fd
= (int *)target_data
;
1222 int i
, numfds
= len
/ sizeof(int);
1224 for (i
= 0; i
< numfds
; i
++)
1225 target_fd
[i
] = tswap32(fd
[i
]);
1228 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1229 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1231 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1233 target_msgh
->msg_controllen
= tswapl(space
);
1237 /* do_setsockopt() Must return target values and target errnos. */
1238 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1239 abi_ulong optval_addr
, socklen_t optlen
)
1243 struct ip_mreqn
*ip_mreq
;
1244 struct ip_mreq_source
*ip_mreq_source
;
1248 /* TCP options all take an 'int' value. */
1249 if (optlen
< sizeof(uint32_t))
1250 return -TARGET_EINVAL
;
1252 if (get_user_u32(val
, optval_addr
))
1253 return -TARGET_EFAULT
;
1254 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1261 case IP_ROUTER_ALERT
:
1265 case IP_MTU_DISCOVER
:
1271 case IP_MULTICAST_TTL
:
1272 case IP_MULTICAST_LOOP
:
1274 if (optlen
>= sizeof(uint32_t)) {
1275 if (get_user_u32(val
, optval_addr
))
1276 return -TARGET_EFAULT
;
1277 } else if (optlen
>= 1) {
1278 if (get_user_u8(val
, optval_addr
))
1279 return -TARGET_EFAULT
;
1281 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1283 case IP_ADD_MEMBERSHIP
:
1284 case IP_DROP_MEMBERSHIP
:
1285 if (optlen
< sizeof (struct target_ip_mreq
) ||
1286 optlen
> sizeof (struct target_ip_mreqn
))
1287 return -TARGET_EINVAL
;
1289 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1290 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1291 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1294 case IP_BLOCK_SOURCE
:
1295 case IP_UNBLOCK_SOURCE
:
1296 case IP_ADD_SOURCE_MEMBERSHIP
:
1297 case IP_DROP_SOURCE_MEMBERSHIP
:
1298 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1299 return -TARGET_EINVAL
;
1301 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1302 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1303 unlock_user (ip_mreq_source
, optval_addr
, 0);
1310 case TARGET_SOL_SOCKET
:
1312 /* Options with 'int' argument. */
1313 case TARGET_SO_DEBUG
:
1316 case TARGET_SO_REUSEADDR
:
1317 optname
= SO_REUSEADDR
;
1319 case TARGET_SO_TYPE
:
1322 case TARGET_SO_ERROR
:
1325 case TARGET_SO_DONTROUTE
:
1326 optname
= SO_DONTROUTE
;
1328 case TARGET_SO_BROADCAST
:
1329 optname
= SO_BROADCAST
;
1331 case TARGET_SO_SNDBUF
:
1332 optname
= SO_SNDBUF
;
1334 case TARGET_SO_RCVBUF
:
1335 optname
= SO_RCVBUF
;
1337 case TARGET_SO_KEEPALIVE
:
1338 optname
= SO_KEEPALIVE
;
1340 case TARGET_SO_OOBINLINE
:
1341 optname
= SO_OOBINLINE
;
1343 case TARGET_SO_NO_CHECK
:
1344 optname
= SO_NO_CHECK
;
1346 case TARGET_SO_PRIORITY
:
1347 optname
= SO_PRIORITY
;
1350 case TARGET_SO_BSDCOMPAT
:
1351 optname
= SO_BSDCOMPAT
;
1354 case TARGET_SO_PASSCRED
:
1355 optname
= SO_PASSCRED
;
1357 case TARGET_SO_TIMESTAMP
:
1358 optname
= SO_TIMESTAMP
;
1360 case TARGET_SO_RCVLOWAT
:
1361 optname
= SO_RCVLOWAT
;
1363 case TARGET_SO_RCVTIMEO
:
1364 optname
= SO_RCVTIMEO
;
1366 case TARGET_SO_SNDTIMEO
:
1367 optname
= SO_SNDTIMEO
;
1373 if (optlen
< sizeof(uint32_t))
1374 return -TARGET_EINVAL
;
1376 if (get_user_u32(val
, optval_addr
))
1377 return -TARGET_EFAULT
;
1378 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1382 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1383 ret
= -TARGET_ENOPROTOOPT
;
1388 /* do_getsockopt() Must return target values and target errnos. */
1389 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1390 abi_ulong optval_addr
, abi_ulong optlen
)
1397 case TARGET_SOL_SOCKET
:
1400 /* These don't just return a single integer */
1401 case TARGET_SO_LINGER
:
1402 case TARGET_SO_RCVTIMEO
:
1403 case TARGET_SO_SNDTIMEO
:
1404 case TARGET_SO_PEERCRED
:
1405 case TARGET_SO_PEERNAME
:
1407 /* Options with 'int' argument. */
1408 case TARGET_SO_DEBUG
:
1411 case TARGET_SO_REUSEADDR
:
1412 optname
= SO_REUSEADDR
;
1414 case TARGET_SO_TYPE
:
1417 case TARGET_SO_ERROR
:
1420 case TARGET_SO_DONTROUTE
:
1421 optname
= SO_DONTROUTE
;
1423 case TARGET_SO_BROADCAST
:
1424 optname
= SO_BROADCAST
;
1426 case TARGET_SO_SNDBUF
:
1427 optname
= SO_SNDBUF
;
1429 case TARGET_SO_RCVBUF
:
1430 optname
= SO_RCVBUF
;
1432 case TARGET_SO_KEEPALIVE
:
1433 optname
= SO_KEEPALIVE
;
1435 case TARGET_SO_OOBINLINE
:
1436 optname
= SO_OOBINLINE
;
1438 case TARGET_SO_NO_CHECK
:
1439 optname
= SO_NO_CHECK
;
1441 case TARGET_SO_PRIORITY
:
1442 optname
= SO_PRIORITY
;
1445 case TARGET_SO_BSDCOMPAT
:
1446 optname
= SO_BSDCOMPAT
;
1449 case TARGET_SO_PASSCRED
:
1450 optname
= SO_PASSCRED
;
1452 case TARGET_SO_TIMESTAMP
:
1453 optname
= SO_TIMESTAMP
;
1455 case TARGET_SO_RCVLOWAT
:
1456 optname
= SO_RCVLOWAT
;
1463 /* TCP options all take an 'int' value. */
1465 if (get_user_u32(len
, optlen
))
1466 return -TARGET_EFAULT
;
1468 return -TARGET_EINVAL
;
1470 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1476 if (put_user_u32(val
, optval_addr
))
1477 return -TARGET_EFAULT
;
1479 if (put_user_u8(val
, optval_addr
))
1480 return -TARGET_EFAULT
;
1482 if (put_user_u32(len
, optlen
))
1483 return -TARGET_EFAULT
;
1490 case IP_ROUTER_ALERT
:
1494 case IP_MTU_DISCOVER
:
1500 case IP_MULTICAST_TTL
:
1501 case IP_MULTICAST_LOOP
:
1502 if (get_user_u32(len
, optlen
))
1503 return -TARGET_EFAULT
;
1505 return -TARGET_EINVAL
;
1507 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1510 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1512 if (put_user_u32(len
, optlen
)
1513 || put_user_u8(val
, optval_addr
))
1514 return -TARGET_EFAULT
;
1516 if (len
> sizeof(int))
1518 if (put_user_u32(len
, optlen
)
1519 || put_user_u32(val
, optval_addr
))
1520 return -TARGET_EFAULT
;
1524 ret
= -TARGET_ENOPROTOOPT
;
1530 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1532 ret
= -TARGET_EOPNOTSUPP
;
1539 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1540 * other lock functions have a return code of 0 for failure.
1542 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1543 int count
, int copy
)
1545 struct target_iovec
*target_vec
;
1549 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1551 return -TARGET_EFAULT
;
1552 for(i
= 0;i
< count
; i
++) {
1553 base
= tswapl(target_vec
[i
].iov_base
);
1554 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1555 if (vec
[i
].iov_len
!= 0) {
1556 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1557 /* Don't check lock_user return value. We must call writev even
1558 if a element has invalid base address. */
1560 /* zero length pointer is ignored */
1561 vec
[i
].iov_base
= NULL
;
1564 unlock_user (target_vec
, target_addr
, 0);
1568 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1569 int count
, int copy
)
1571 struct target_iovec
*target_vec
;
1575 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1577 return -TARGET_EFAULT
;
1578 for(i
= 0;i
< count
; i
++) {
1579 if (target_vec
[i
].iov_base
) {
1580 base
= tswapl(target_vec
[i
].iov_base
);
1581 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1584 unlock_user (target_vec
, target_addr
, 0);
1589 /* do_socket() Must return target values and target errnos. */
1590 static abi_long
do_socket(int domain
, int type
, int protocol
)
1592 #if defined(TARGET_MIPS)
1594 case TARGET_SOCK_DGRAM
:
1597 case TARGET_SOCK_STREAM
:
1600 case TARGET_SOCK_RAW
:
1603 case TARGET_SOCK_RDM
:
1606 case TARGET_SOCK_SEQPACKET
:
1607 type
= SOCK_SEQPACKET
;
1609 case TARGET_SOCK_PACKET
:
1614 if (domain
== PF_NETLINK
)
1615 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1616 return get_errno(socket(domain
, type
, protocol
));
1619 /* do_bind() Must return target values and target errnos. */
1620 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1626 if ((int)addrlen
< 0) {
1627 return -TARGET_EINVAL
;
1630 addr
= alloca(addrlen
+1);
1632 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1636 return get_errno(bind(sockfd
, addr
, addrlen
));
1639 /* do_connect() Must return target values and target errnos. */
1640 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1646 if ((int)addrlen
< 0) {
1647 return -TARGET_EINVAL
;
1650 addr
= alloca(addrlen
);
1652 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1656 return get_errno(connect(sockfd
, addr
, addrlen
));
1659 /* do_sendrecvmsg() Must return target values and target errnos. */
1660 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1661 int flags
, int send
)
1664 struct target_msghdr
*msgp
;
1668 abi_ulong target_vec
;
1671 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1675 return -TARGET_EFAULT
;
1676 if (msgp
->msg_name
) {
1677 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1678 msg
.msg_name
= alloca(msg
.msg_namelen
);
1679 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1682 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1686 msg
.msg_name
= NULL
;
1687 msg
.msg_namelen
= 0;
1689 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1690 msg
.msg_control
= alloca(msg
.msg_controllen
);
1691 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1693 count
= tswapl(msgp
->msg_iovlen
);
1694 vec
= alloca(count
* sizeof(struct iovec
));
1695 target_vec
= tswapl(msgp
->msg_iov
);
1696 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1697 msg
.msg_iovlen
= count
;
1701 ret
= target_to_host_cmsg(&msg
, msgp
);
1703 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1705 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1706 if (!is_error(ret
)) {
1708 ret
= host_to_target_cmsg(msgp
, &msg
);
1713 unlock_iovec(vec
, target_vec
, count
, !send
);
1714 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1718 /* do_accept() Must return target values and target errnos. */
1719 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1720 abi_ulong target_addrlen_addr
)
1726 if (target_addr
== 0)
1727 return get_errno(accept(fd
, NULL
, NULL
));
1729 /* linux returns EINVAL if addrlen pointer is invalid */
1730 if (get_user_u32(addrlen
, target_addrlen_addr
))
1731 return -TARGET_EINVAL
;
1733 if ((int)addrlen
< 0) {
1734 return -TARGET_EINVAL
;
1737 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1738 return -TARGET_EINVAL
;
1740 addr
= alloca(addrlen
);
1742 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1743 if (!is_error(ret
)) {
1744 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1745 if (put_user_u32(addrlen
, target_addrlen_addr
))
1746 ret
= -TARGET_EFAULT
;
1751 /* do_getpeername() Must return target values and target errnos. */
1752 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1753 abi_ulong target_addrlen_addr
)
1759 if (get_user_u32(addrlen
, target_addrlen_addr
))
1760 return -TARGET_EFAULT
;
1762 if ((int)addrlen
< 0) {
1763 return -TARGET_EINVAL
;
1766 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1767 return -TARGET_EFAULT
;
1769 addr
= alloca(addrlen
);
1771 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1772 if (!is_error(ret
)) {
1773 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1774 if (put_user_u32(addrlen
, target_addrlen_addr
))
1775 ret
= -TARGET_EFAULT
;
1780 /* do_getsockname() Must return target values and target errnos. */
1781 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1782 abi_ulong target_addrlen_addr
)
1788 if (get_user_u32(addrlen
, target_addrlen_addr
))
1789 return -TARGET_EFAULT
;
1791 if ((int)addrlen
< 0) {
1792 return -TARGET_EINVAL
;
1795 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1796 return -TARGET_EFAULT
;
1798 addr
= alloca(addrlen
);
1800 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1801 if (!is_error(ret
)) {
1802 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1803 if (put_user_u32(addrlen
, target_addrlen_addr
))
1804 ret
= -TARGET_EFAULT
;
1809 /* do_socketpair() Must return target values and target errnos. */
1810 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1811 abi_ulong target_tab_addr
)
1816 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1817 if (!is_error(ret
)) {
1818 if (put_user_s32(tab
[0], target_tab_addr
)
1819 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1820 ret
= -TARGET_EFAULT
;
1825 /* do_sendto() Must return target values and target errnos. */
1826 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1827 abi_ulong target_addr
, socklen_t addrlen
)
1833 if ((int)addrlen
< 0) {
1834 return -TARGET_EINVAL
;
1837 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1839 return -TARGET_EFAULT
;
1841 addr
= alloca(addrlen
);
1842 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1844 unlock_user(host_msg
, msg
, 0);
1847 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1849 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1851 unlock_user(host_msg
, msg
, 0);
1855 /* do_recvfrom() Must return target values and target errnos. */
1856 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1857 abi_ulong target_addr
,
1858 abi_ulong target_addrlen
)
1865 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1867 return -TARGET_EFAULT
;
1869 if (get_user_u32(addrlen
, target_addrlen
)) {
1870 ret
= -TARGET_EFAULT
;
1873 if ((int)addrlen
< 0) {
1874 ret
= -TARGET_EINVAL
;
1877 addr
= alloca(addrlen
);
1878 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1880 addr
= NULL
; /* To keep compiler quiet. */
1881 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1883 if (!is_error(ret
)) {
1885 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1886 if (put_user_u32(addrlen
, target_addrlen
)) {
1887 ret
= -TARGET_EFAULT
;
1891 unlock_user(host_msg
, msg
, len
);
1894 unlock_user(host_msg
, msg
, 0);
1899 #ifdef TARGET_NR_socketcall
1900 /* do_socketcall() Must return target values and target errnos. */
1901 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1904 const int n
= sizeof(abi_ulong
);
1909 abi_ulong domain
, type
, protocol
;
1911 if (get_user_ual(domain
, vptr
)
1912 || get_user_ual(type
, vptr
+ n
)
1913 || get_user_ual(protocol
, vptr
+ 2 * n
))
1914 return -TARGET_EFAULT
;
1916 ret
= do_socket(domain
, type
, protocol
);
1922 abi_ulong target_addr
;
1925 if (get_user_ual(sockfd
, vptr
)
1926 || get_user_ual(target_addr
, vptr
+ n
)
1927 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1928 return -TARGET_EFAULT
;
1930 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1933 case SOCKOP_connect
:
1936 abi_ulong target_addr
;
1939 if (get_user_ual(sockfd
, vptr
)
1940 || get_user_ual(target_addr
, vptr
+ n
)
1941 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1942 return -TARGET_EFAULT
;
1944 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1949 abi_ulong sockfd
, backlog
;
1951 if (get_user_ual(sockfd
, vptr
)
1952 || get_user_ual(backlog
, vptr
+ n
))
1953 return -TARGET_EFAULT
;
1955 ret
= get_errno(listen(sockfd
, backlog
));
1961 abi_ulong target_addr
, target_addrlen
;
1963 if (get_user_ual(sockfd
, vptr
)
1964 || get_user_ual(target_addr
, vptr
+ n
)
1965 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1966 return -TARGET_EFAULT
;
1968 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1971 case SOCKOP_getsockname
:
1974 abi_ulong target_addr
, target_addrlen
;
1976 if (get_user_ual(sockfd
, vptr
)
1977 || get_user_ual(target_addr
, vptr
+ n
)
1978 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1979 return -TARGET_EFAULT
;
1981 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1984 case SOCKOP_getpeername
:
1987 abi_ulong target_addr
, target_addrlen
;
1989 if (get_user_ual(sockfd
, vptr
)
1990 || get_user_ual(target_addr
, vptr
+ n
)
1991 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1992 return -TARGET_EFAULT
;
1994 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1997 case SOCKOP_socketpair
:
1999 abi_ulong domain
, type
, protocol
;
2002 if (get_user_ual(domain
, vptr
)
2003 || get_user_ual(type
, vptr
+ n
)
2004 || get_user_ual(protocol
, vptr
+ 2 * n
)
2005 || get_user_ual(tab
, vptr
+ 3 * n
))
2006 return -TARGET_EFAULT
;
2008 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2018 if (get_user_ual(sockfd
, vptr
)
2019 || get_user_ual(msg
, vptr
+ n
)
2020 || get_user_ual(len
, vptr
+ 2 * n
)
2021 || get_user_ual(flags
, vptr
+ 3 * n
))
2022 return -TARGET_EFAULT
;
2024 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2034 if (get_user_ual(sockfd
, vptr
)
2035 || get_user_ual(msg
, vptr
+ n
)
2036 || get_user_ual(len
, vptr
+ 2 * n
)
2037 || get_user_ual(flags
, vptr
+ 3 * n
))
2038 return -TARGET_EFAULT
;
2040 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2052 if (get_user_ual(sockfd
, vptr
)
2053 || get_user_ual(msg
, vptr
+ n
)
2054 || get_user_ual(len
, vptr
+ 2 * n
)
2055 || get_user_ual(flags
, vptr
+ 3 * n
)
2056 || get_user_ual(addr
, vptr
+ 4 * n
)
2057 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2058 return -TARGET_EFAULT
;
2060 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2063 case SOCKOP_recvfrom
:
2072 if (get_user_ual(sockfd
, vptr
)
2073 || get_user_ual(msg
, vptr
+ n
)
2074 || get_user_ual(len
, vptr
+ 2 * n
)
2075 || get_user_ual(flags
, vptr
+ 3 * n
)
2076 || get_user_ual(addr
, vptr
+ 4 * n
)
2077 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2078 return -TARGET_EFAULT
;
2080 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2083 case SOCKOP_shutdown
:
2085 abi_ulong sockfd
, how
;
2087 if (get_user_ual(sockfd
, vptr
)
2088 || get_user_ual(how
, vptr
+ n
))
2089 return -TARGET_EFAULT
;
2091 ret
= get_errno(shutdown(sockfd
, how
));
2094 case SOCKOP_sendmsg
:
2095 case SOCKOP_recvmsg
:
2098 abi_ulong target_msg
;
2101 if (get_user_ual(fd
, vptr
)
2102 || get_user_ual(target_msg
, vptr
+ n
)
2103 || get_user_ual(flags
, vptr
+ 2 * n
))
2104 return -TARGET_EFAULT
;
2106 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2107 (num
== SOCKOP_sendmsg
));
2110 case SOCKOP_setsockopt
:
2118 if (get_user_ual(sockfd
, vptr
)
2119 || get_user_ual(level
, vptr
+ n
)
2120 || get_user_ual(optname
, vptr
+ 2 * n
)
2121 || get_user_ual(optval
, vptr
+ 3 * n
)
2122 || get_user_ual(optlen
, vptr
+ 4 * n
))
2123 return -TARGET_EFAULT
;
2125 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2128 case SOCKOP_getsockopt
:
2136 if (get_user_ual(sockfd
, vptr
)
2137 || get_user_ual(level
, vptr
+ n
)
2138 || get_user_ual(optname
, vptr
+ 2 * n
)
2139 || get_user_ual(optval
, vptr
+ 3 * n
)
2140 || get_user_ual(optlen
, vptr
+ 4 * n
))
2141 return -TARGET_EFAULT
;
2143 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2147 gemu_log("Unsupported socketcall: %d\n", num
);
2148 ret
= -TARGET_ENOSYS
;
2155 #define N_SHM_REGIONS 32
2157 static struct shm_region
{
2160 } shm_regions
[N_SHM_REGIONS
];
2162 struct target_ipc_perm
2169 unsigned short int mode
;
2170 unsigned short int __pad1
;
2171 unsigned short int __seq
;
2172 unsigned short int __pad2
;
2173 abi_ulong __unused1
;
2174 abi_ulong __unused2
;
2177 struct target_semid_ds
2179 struct target_ipc_perm sem_perm
;
2180 abi_ulong sem_otime
;
2181 abi_ulong __unused1
;
2182 abi_ulong sem_ctime
;
2183 abi_ulong __unused2
;
2184 abi_ulong sem_nsems
;
2185 abi_ulong __unused3
;
2186 abi_ulong __unused4
;
2189 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2190 abi_ulong target_addr
)
2192 struct target_ipc_perm
*target_ip
;
2193 struct target_semid_ds
*target_sd
;
2195 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2196 return -TARGET_EFAULT
;
2197 target_ip
= &(target_sd
->sem_perm
);
2198 host_ip
->__key
= tswapl(target_ip
->__key
);
2199 host_ip
->uid
= tswapl(target_ip
->uid
);
2200 host_ip
->gid
= tswapl(target_ip
->gid
);
2201 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2202 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2203 host_ip
->mode
= tswapl(target_ip
->mode
);
2204 unlock_user_struct(target_sd
, target_addr
, 0);
2208 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2209 struct ipc_perm
*host_ip
)
2211 struct target_ipc_perm
*target_ip
;
2212 struct target_semid_ds
*target_sd
;
2214 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2215 return -TARGET_EFAULT
;
2216 target_ip
= &(target_sd
->sem_perm
);
2217 target_ip
->__key
= tswapl(host_ip
->__key
);
2218 target_ip
->uid
= tswapl(host_ip
->uid
);
2219 target_ip
->gid
= tswapl(host_ip
->gid
);
2220 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2221 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2222 target_ip
->mode
= tswapl(host_ip
->mode
);
2223 unlock_user_struct(target_sd
, target_addr
, 1);
2227 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2228 abi_ulong target_addr
)
2230 struct target_semid_ds
*target_sd
;
2232 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2233 return -TARGET_EFAULT
;
2234 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2235 return -TARGET_EFAULT
;
2236 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2237 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2238 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2239 unlock_user_struct(target_sd
, target_addr
, 0);
2243 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2244 struct semid_ds
*host_sd
)
2246 struct target_semid_ds
*target_sd
;
2248 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2249 return -TARGET_EFAULT
;
2250 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2251 return -TARGET_EFAULT
;;
2252 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2253 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2254 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2255 unlock_user_struct(target_sd
, target_addr
, 1);
2259 struct target_seminfo
{
2272 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2273 struct seminfo
*host_seminfo
)
2275 struct target_seminfo
*target_seminfo
;
2276 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2277 return -TARGET_EFAULT
;
2278 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2279 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2280 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2281 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2282 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2283 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2284 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2285 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2286 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2287 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2288 unlock_user_struct(target_seminfo
, target_addr
, 1);
2294 struct semid_ds
*buf
;
2295 unsigned short *array
;
2296 struct seminfo
*__buf
;
2299 union target_semun
{
2306 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2307 abi_ulong target_addr
)
2310 unsigned short *array
;
2312 struct semid_ds semid_ds
;
2315 semun
.buf
= &semid_ds
;
2317 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2319 return get_errno(ret
);
2321 nsems
= semid_ds
.sem_nsems
;
2323 *host_array
= malloc(nsems
*sizeof(unsigned short));
2324 array
= lock_user(VERIFY_READ
, target_addr
,
2325 nsems
*sizeof(unsigned short), 1);
2327 return -TARGET_EFAULT
;
2329 for(i
=0; i
<nsems
; i
++) {
2330 __get_user((*host_array
)[i
], &array
[i
]);
2332 unlock_user(array
, target_addr
, 0);
2337 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2338 unsigned short **host_array
)
2341 unsigned short *array
;
2343 struct semid_ds semid_ds
;
2346 semun
.buf
= &semid_ds
;
2348 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2350 return get_errno(ret
);
2352 nsems
= semid_ds
.sem_nsems
;
2354 array
= lock_user(VERIFY_WRITE
, target_addr
,
2355 nsems
*sizeof(unsigned short), 0);
2357 return -TARGET_EFAULT
;
2359 for(i
=0; i
<nsems
; i
++) {
2360 __put_user((*host_array
)[i
], &array
[i
]);
2363 unlock_user(array
, target_addr
, 1);
2368 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2369 union target_semun target_su
)
2372 struct semid_ds dsarg
;
2373 unsigned short *array
= NULL
;
2374 struct seminfo seminfo
;
2375 abi_long ret
= -TARGET_EINVAL
;
2382 arg
.val
= tswapl(target_su
.val
);
2383 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2384 target_su
.val
= tswapl(arg
.val
);
2388 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2392 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2393 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2400 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2404 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2405 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2411 arg
.__buf
= &seminfo
;
2412 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2413 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2421 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2428 struct target_sembuf
{
2429 unsigned short sem_num
;
2434 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2435 abi_ulong target_addr
,
2438 struct target_sembuf
*target_sembuf
;
2441 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2442 nsops
*sizeof(struct target_sembuf
), 1);
2444 return -TARGET_EFAULT
;
2446 for(i
=0; i
<nsops
; i
++) {
2447 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2448 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2449 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2452 unlock_user(target_sembuf
, target_addr
, 0);
2457 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2459 struct sembuf sops
[nsops
];
2461 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2462 return -TARGET_EFAULT
;
2464 return semop(semid
, sops
, nsops
);
2467 struct target_msqid_ds
2469 struct target_ipc_perm msg_perm
;
2470 abi_ulong msg_stime
;
2471 #if TARGET_ABI_BITS == 32
2472 abi_ulong __unused1
;
2474 abi_ulong msg_rtime
;
2475 #if TARGET_ABI_BITS == 32
2476 abi_ulong __unused2
;
2478 abi_ulong msg_ctime
;
2479 #if TARGET_ABI_BITS == 32
2480 abi_ulong __unused3
;
2482 abi_ulong __msg_cbytes
;
2484 abi_ulong msg_qbytes
;
2485 abi_ulong msg_lspid
;
2486 abi_ulong msg_lrpid
;
2487 abi_ulong __unused4
;
2488 abi_ulong __unused5
;
2491 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2492 abi_ulong target_addr
)
2494 struct target_msqid_ds
*target_md
;
2496 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2497 return -TARGET_EFAULT
;
2498 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2499 return -TARGET_EFAULT
;
2500 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2501 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2502 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2503 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2504 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2505 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2506 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2507 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2508 unlock_user_struct(target_md
, target_addr
, 0);
2512 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2513 struct msqid_ds
*host_md
)
2515 struct target_msqid_ds
*target_md
;
2517 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2518 return -TARGET_EFAULT
;
2519 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2520 return -TARGET_EFAULT
;
2521 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2522 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2523 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2524 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2525 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2526 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2527 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2528 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2529 unlock_user_struct(target_md
, target_addr
, 1);
2533 struct target_msginfo
{
2541 unsigned short int msgseg
;
2544 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2545 struct msginfo
*host_msginfo
)
2547 struct target_msginfo
*target_msginfo
;
2548 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2549 return -TARGET_EFAULT
;
2550 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2551 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2552 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2553 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2554 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2555 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2556 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2557 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2558 unlock_user_struct(target_msginfo
, target_addr
, 1);
2562 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2564 struct msqid_ds dsarg
;
2565 struct msginfo msginfo
;
2566 abi_long ret
= -TARGET_EINVAL
;
2574 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2575 return -TARGET_EFAULT
;
2576 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2577 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2578 return -TARGET_EFAULT
;
2581 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2585 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2586 if (host_to_target_msginfo(ptr
, &msginfo
))
2587 return -TARGET_EFAULT
;
2594 struct target_msgbuf
{
2599 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2600 unsigned int msgsz
, int msgflg
)
2602 struct target_msgbuf
*target_mb
;
2603 struct msgbuf
*host_mb
;
2606 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2607 return -TARGET_EFAULT
;
2608 host_mb
= malloc(msgsz
+sizeof(long));
2609 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2610 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2611 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2613 unlock_user_struct(target_mb
, msgp
, 0);
2618 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2619 unsigned int msgsz
, abi_long msgtyp
,
2622 struct target_msgbuf
*target_mb
;
2624 struct msgbuf
*host_mb
;
2627 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2628 return -TARGET_EFAULT
;
2630 host_mb
= malloc(msgsz
+sizeof(long));
2631 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2634 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2635 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2636 if (!target_mtext
) {
2637 ret
= -TARGET_EFAULT
;
2640 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2641 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2644 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2649 unlock_user_struct(target_mb
, msgp
, 1);
2653 struct target_shmid_ds
2655 struct target_ipc_perm shm_perm
;
2656 abi_ulong shm_segsz
;
2657 abi_ulong shm_atime
;
2658 #if TARGET_ABI_BITS == 32
2659 abi_ulong __unused1
;
2661 abi_ulong shm_dtime
;
2662 #if TARGET_ABI_BITS == 32
2663 abi_ulong __unused2
;
2665 abi_ulong shm_ctime
;
2666 #if TARGET_ABI_BITS == 32
2667 abi_ulong __unused3
;
2671 abi_ulong shm_nattch
;
2672 unsigned long int __unused4
;
2673 unsigned long int __unused5
;
2676 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2677 abi_ulong target_addr
)
2679 struct target_shmid_ds
*target_sd
;
2681 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2682 return -TARGET_EFAULT
;
2683 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2684 return -TARGET_EFAULT
;
2685 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2686 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2687 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2688 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2689 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2690 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2691 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2692 unlock_user_struct(target_sd
, target_addr
, 0);
2696 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2697 struct shmid_ds
*host_sd
)
2699 struct target_shmid_ds
*target_sd
;
2701 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2702 return -TARGET_EFAULT
;
2703 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2704 return -TARGET_EFAULT
;
2705 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2706 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2707 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2708 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2709 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2710 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2711 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2712 unlock_user_struct(target_sd
, target_addr
, 1);
2716 struct target_shminfo
{
2724 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2725 struct shminfo
*host_shminfo
)
2727 struct target_shminfo
*target_shminfo
;
2728 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2729 return -TARGET_EFAULT
;
2730 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2731 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2732 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2733 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2734 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2735 unlock_user_struct(target_shminfo
, target_addr
, 1);
2739 struct target_shm_info
{
2744 abi_ulong swap_attempts
;
2745 abi_ulong swap_successes
;
2748 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2749 struct shm_info
*host_shm_info
)
2751 struct target_shm_info
*target_shm_info
;
2752 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2753 return -TARGET_EFAULT
;
2754 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2755 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2756 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2757 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2758 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2759 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2760 unlock_user_struct(target_shm_info
, target_addr
, 1);
2764 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2766 struct shmid_ds dsarg
;
2767 struct shminfo shminfo
;
2768 struct shm_info shm_info
;
2769 abi_long ret
= -TARGET_EINVAL
;
2777 if (target_to_host_shmid_ds(&dsarg
, buf
))
2778 return -TARGET_EFAULT
;
2779 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2780 if (host_to_target_shmid_ds(buf
, &dsarg
))
2781 return -TARGET_EFAULT
;
2784 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2785 if (host_to_target_shminfo(buf
, &shminfo
))
2786 return -TARGET_EFAULT
;
2789 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2790 if (host_to_target_shm_info(buf
, &shm_info
))
2791 return -TARGET_EFAULT
;
2796 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2803 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2807 struct shmid_ds shm_info
;
2810 /* find out the length of the shared memory segment */
2811 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2812 if (is_error(ret
)) {
2813 /* can't get length, bail out */
2820 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2822 abi_ulong mmap_start
;
2824 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2826 if (mmap_start
== -1) {
2828 host_raddr
= (void *)-1;
2830 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2833 if (host_raddr
== (void *)-1) {
2835 return get_errno((long)host_raddr
);
2837 raddr
=h2g((unsigned long)host_raddr
);
2839 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2840 PAGE_VALID
| PAGE_READ
|
2841 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2843 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2844 if (shm_regions
[i
].start
== 0) {
2845 shm_regions
[i
].start
= raddr
;
2846 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2856 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2860 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2861 if (shm_regions
[i
].start
== shmaddr
) {
2862 shm_regions
[i
].start
= 0;
2863 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
2868 return get_errno(shmdt(g2h(shmaddr
)));
2871 #ifdef TARGET_NR_ipc
2872 /* ??? This only works with linear mappings. */
2873 /* do_ipc() must return target values and target errnos. */
2874 static abi_long
do_ipc(unsigned int call
, int first
,
2875 int second
, int third
,
2876 abi_long ptr
, abi_long fifth
)
2881 version
= call
>> 16;
2886 ret
= do_semop(first
, ptr
, second
);
2890 ret
= get_errno(semget(first
, second
, third
));
2894 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2898 ret
= get_errno(msgget(first
, second
));
2902 ret
= do_msgsnd(first
, ptr
, second
, third
);
2906 ret
= do_msgctl(first
, second
, ptr
);
2913 struct target_ipc_kludge
{
2918 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2919 ret
= -TARGET_EFAULT
;
2923 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2925 unlock_user_struct(tmp
, ptr
, 0);
2929 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2938 raddr
= do_shmat(first
, ptr
, second
);
2939 if (is_error(raddr
))
2940 return get_errno(raddr
);
2941 if (put_user_ual(raddr
, third
))
2942 return -TARGET_EFAULT
;
2946 ret
= -TARGET_EINVAL
;
2951 ret
= do_shmdt(ptr
);
2955 /* IPC_* flag values are the same on all linux platforms */
2956 ret
= get_errno(shmget(first
, second
, third
));
2959 /* IPC_* and SHM_* command values are the same on all linux platforms */
2961 ret
= do_shmctl(first
, second
, third
);
2964 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2965 ret
= -TARGET_ENOSYS
;
2972 /* kernel structure types definitions */
2975 #define STRUCT(name, ...) STRUCT_ ## name,
2976 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2978 #include "syscall_types.h"
2981 #undef STRUCT_SPECIAL
2983 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2984 #define STRUCT_SPECIAL(name)
2985 #include "syscall_types.h"
2987 #undef STRUCT_SPECIAL
2989 typedef struct IOCTLEntry IOCTLEntry
;
2991 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
2992 int fd
, abi_long cmd
, abi_long arg
);
2995 unsigned int target_cmd
;
2996 unsigned int host_cmd
;
2999 do_ioctl_fn
*do_ioctl
;
3000 const argtype arg_type
[5];
3003 #define IOC_R 0x0001
3004 #define IOC_W 0x0002
3005 #define IOC_RW (IOC_R | IOC_W)
3007 #define MAX_STRUCT_SIZE 4096
3009 #ifdef CONFIG_FIEMAP
3010 /* So fiemap access checks don't overflow on 32 bit systems.
3011 * This is very slightly smaller than the limit imposed by
3012 * the underlying kernel.
3014 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3015 / sizeof(struct fiemap_extent))
3017 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3018 int fd
, abi_long cmd
, abi_long arg
)
3020 /* The parameter for this ioctl is a struct fiemap followed
3021 * by an array of struct fiemap_extent whose size is set
3022 * in fiemap->fm_extent_count. The array is filled in by the
3025 int target_size_in
, target_size_out
;
3027 const argtype
*arg_type
= ie
->arg_type
;
3028 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3031 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3035 assert(arg_type
[0] == TYPE_PTR
);
3036 assert(ie
->access
== IOC_RW
);
3038 target_size_in
= thunk_type_size(arg_type
, 0);
3039 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3041 return -TARGET_EFAULT
;
3043 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3044 unlock_user(argptr
, arg
, 0);
3045 fm
= (struct fiemap
*)buf_temp
;
3046 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3047 return -TARGET_EINVAL
;
3050 outbufsz
= sizeof (*fm
) +
3051 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3053 if (outbufsz
> MAX_STRUCT_SIZE
) {
3054 /* We can't fit all the extents into the fixed size buffer.
3055 * Allocate one that is large enough and use it instead.
3057 fm
= malloc(outbufsz
);
3059 return -TARGET_ENOMEM
;
3061 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3064 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3065 if (!is_error(ret
)) {
3066 target_size_out
= target_size_in
;
3067 /* An extent_count of 0 means we were only counting the extents
3068 * so there are no structs to copy
3070 if (fm
->fm_extent_count
!= 0) {
3071 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3073 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3075 ret
= -TARGET_EFAULT
;
3077 /* Convert the struct fiemap */
3078 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3079 if (fm
->fm_extent_count
!= 0) {
3080 p
= argptr
+ target_size_in
;
3081 /* ...and then all the struct fiemap_extents */
3082 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3083 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3088 unlock_user(argptr
, arg
, target_size_out
);
3098 static IOCTLEntry ioctl_entries
[] = {
3099 #define IOCTL(cmd, access, ...) \
3100 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3101 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3102 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3107 /* ??? Implement proper locking for ioctls. */
3108 /* do_ioctl() Must return target values and target errnos. */
3109 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3111 const IOCTLEntry
*ie
;
3112 const argtype
*arg_type
;
3114 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3120 if (ie
->target_cmd
== 0) {
3121 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3122 return -TARGET_ENOSYS
;
3124 if (ie
->target_cmd
== cmd
)
3128 arg_type
= ie
->arg_type
;
3130 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3133 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3136 switch(arg_type
[0]) {
3139 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3144 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3148 target_size
= thunk_type_size(arg_type
, 0);
3149 switch(ie
->access
) {
3151 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3152 if (!is_error(ret
)) {
3153 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3155 return -TARGET_EFAULT
;
3156 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3157 unlock_user(argptr
, arg
, target_size
);
3161 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3163 return -TARGET_EFAULT
;
3164 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3165 unlock_user(argptr
, arg
, 0);
3166 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3170 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3172 return -TARGET_EFAULT
;
3173 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3174 unlock_user(argptr
, arg
, 0);
3175 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3176 if (!is_error(ret
)) {
3177 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3179 return -TARGET_EFAULT
;
3180 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3181 unlock_user(argptr
, arg
, target_size
);
3187 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3188 (long)cmd
, arg_type
[0]);
3189 ret
= -TARGET_ENOSYS
;
3195 static const bitmask_transtbl iflag_tbl
[] = {
3196 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3197 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3198 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3199 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3200 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3201 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3202 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3203 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3204 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3205 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3206 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3207 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3208 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3209 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3213 static const bitmask_transtbl oflag_tbl
[] = {
3214 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3215 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3216 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3217 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3218 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3219 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3220 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3221 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3222 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3223 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3224 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3225 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3226 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3227 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3228 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3229 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3230 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3231 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3232 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3233 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3234 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3235 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3236 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3237 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3241 static const bitmask_transtbl cflag_tbl
[] = {
3242 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3243 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3244 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3245 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3246 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3247 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3248 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3249 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3250 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3251 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3252 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3253 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3254 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3255 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3256 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3257 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3258 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3259 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3260 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3261 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3262 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3263 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3264 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3265 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3266 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3267 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3268 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3269 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3270 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3271 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3272 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3276 static const bitmask_transtbl lflag_tbl
[] = {
3277 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3278 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3279 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3280 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3281 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3282 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3283 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3284 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3285 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3286 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3287 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3288 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3289 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3290 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3291 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3295 static void target_to_host_termios (void *dst
, const void *src
)
3297 struct host_termios
*host
= dst
;
3298 const struct target_termios
*target
= src
;
3301 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3303 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3305 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3307 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3308 host
->c_line
= target
->c_line
;
3310 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3311 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3312 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3313 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3314 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3315 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3316 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3317 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3318 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3319 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3320 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3321 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3322 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3323 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3324 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3325 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3326 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3327 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3330 static void host_to_target_termios (void *dst
, const void *src
)
3332 struct target_termios
*target
= dst
;
3333 const struct host_termios
*host
= src
;
3336 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3338 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3340 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3342 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3343 target
->c_line
= host
->c_line
;
3345 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3346 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3347 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3348 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3349 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3350 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3351 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3352 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3353 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3354 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3355 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3356 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3357 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3358 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3359 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3360 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3361 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3362 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3365 static const StructEntry struct_termios_def
= {
3366 .convert
= { host_to_target_termios
, target_to_host_termios
},
3367 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3368 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3371 static bitmask_transtbl mmap_flags_tbl
[] = {
3372 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3373 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3374 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3375 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3376 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3377 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3378 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3379 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3383 #if defined(TARGET_I386)
3385 /* NOTE: there is really one LDT for all the threads */
3386 static uint8_t *ldt_table
;
3388 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3395 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3396 if (size
> bytecount
)
3398 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3400 return -TARGET_EFAULT
;
3401 /* ??? Should this by byteswapped? */
3402 memcpy(p
, ldt_table
, size
);
3403 unlock_user(p
, ptr
, size
);
3407 /* XXX: add locking support */
3408 static abi_long
write_ldt(CPUX86State
*env
,
3409 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3411 struct target_modify_ldt_ldt_s ldt_info
;
3412 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3413 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3414 int seg_not_present
, useable
, lm
;
3415 uint32_t *lp
, entry_1
, entry_2
;
3417 if (bytecount
!= sizeof(ldt_info
))
3418 return -TARGET_EINVAL
;
3419 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3420 return -TARGET_EFAULT
;
3421 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3422 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3423 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3424 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3425 unlock_user_struct(target_ldt_info
, ptr
, 0);
3427 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3428 return -TARGET_EINVAL
;
3429 seg_32bit
= ldt_info
.flags
& 1;
3430 contents
= (ldt_info
.flags
>> 1) & 3;
3431 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3432 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3433 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3434 useable
= (ldt_info
.flags
>> 6) & 1;
3438 lm
= (ldt_info
.flags
>> 7) & 1;
3440 if (contents
== 3) {
3442 return -TARGET_EINVAL
;
3443 if (seg_not_present
== 0)
3444 return -TARGET_EINVAL
;
3446 /* allocate the LDT */
3448 env
->ldt
.base
= target_mmap(0,
3449 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3450 PROT_READ
|PROT_WRITE
,
3451 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3452 if (env
->ldt
.base
== -1)
3453 return -TARGET_ENOMEM
;
3454 memset(g2h(env
->ldt
.base
), 0,
3455 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3456 env
->ldt
.limit
= 0xffff;
3457 ldt_table
= g2h(env
->ldt
.base
);
3460 /* NOTE: same code as Linux kernel */
3461 /* Allow LDTs to be cleared by the user. */
3462 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3465 read_exec_only
== 1 &&
3467 limit_in_pages
== 0 &&
3468 seg_not_present
== 1 &&
3476 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3477 (ldt_info
.limit
& 0x0ffff);
3478 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3479 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3480 (ldt_info
.limit
& 0xf0000) |
3481 ((read_exec_only
^ 1) << 9) |
3483 ((seg_not_present
^ 1) << 15) |
3485 (limit_in_pages
<< 23) |
3489 entry_2
|= (useable
<< 20);
3491 /* Install the new entry ... */
3493 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3494 lp
[0] = tswap32(entry_1
);
3495 lp
[1] = tswap32(entry_2
);
3499 /* specific and weird i386 syscalls */
3500 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3501 unsigned long bytecount
)
3507 ret
= read_ldt(ptr
, bytecount
);
3510 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3513 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3516 ret
= -TARGET_ENOSYS
;
3522 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3523 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3525 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3526 struct target_modify_ldt_ldt_s ldt_info
;
3527 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3528 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3529 int seg_not_present
, useable
, lm
;
3530 uint32_t *lp
, entry_1
, entry_2
;
3533 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3534 if (!target_ldt_info
)
3535 return -TARGET_EFAULT
;
3536 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3537 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3538 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3539 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3540 if (ldt_info
.entry_number
== -1) {
3541 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3542 if (gdt_table
[i
] == 0) {
3543 ldt_info
.entry_number
= i
;
3544 target_ldt_info
->entry_number
= tswap32(i
);
3549 unlock_user_struct(target_ldt_info
, ptr
, 1);
3551 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3552 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3553 return -TARGET_EINVAL
;
3554 seg_32bit
= ldt_info
.flags
& 1;
3555 contents
= (ldt_info
.flags
>> 1) & 3;
3556 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3557 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3558 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3559 useable
= (ldt_info
.flags
>> 6) & 1;
3563 lm
= (ldt_info
.flags
>> 7) & 1;
3566 if (contents
== 3) {
3567 if (seg_not_present
== 0)
3568 return -TARGET_EINVAL
;
3571 /* NOTE: same code as Linux kernel */
3572 /* Allow LDTs to be cleared by the user. */
3573 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3574 if ((contents
== 0 &&
3575 read_exec_only
== 1 &&
3577 limit_in_pages
== 0 &&
3578 seg_not_present
== 1 &&
3586 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3587 (ldt_info
.limit
& 0x0ffff);
3588 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3589 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3590 (ldt_info
.limit
& 0xf0000) |
3591 ((read_exec_only
^ 1) << 9) |
3593 ((seg_not_present
^ 1) << 15) |
3595 (limit_in_pages
<< 23) |
3600 /* Install the new entry ... */
3602 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3603 lp
[0] = tswap32(entry_1
);
3604 lp
[1] = tswap32(entry_2
);
3608 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3610 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3611 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3612 uint32_t base_addr
, limit
, flags
;
3613 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3614 int seg_not_present
, useable
, lm
;
3615 uint32_t *lp
, entry_1
, entry_2
;
3617 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3618 if (!target_ldt_info
)
3619 return -TARGET_EFAULT
;
3620 idx
= tswap32(target_ldt_info
->entry_number
);
3621 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3622 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3623 unlock_user_struct(target_ldt_info
, ptr
, 1);
3624 return -TARGET_EINVAL
;
3626 lp
= (uint32_t *)(gdt_table
+ idx
);
3627 entry_1
= tswap32(lp
[0]);
3628 entry_2
= tswap32(lp
[1]);
3630 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3631 contents
= (entry_2
>> 10) & 3;
3632 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3633 seg_32bit
= (entry_2
>> 22) & 1;
3634 limit_in_pages
= (entry_2
>> 23) & 1;
3635 useable
= (entry_2
>> 20) & 1;
3639 lm
= (entry_2
>> 21) & 1;
3641 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3642 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3643 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3644 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3645 base_addr
= (entry_1
>> 16) |
3646 (entry_2
& 0xff000000) |
3647 ((entry_2
& 0xff) << 16);
3648 target_ldt_info
->base_addr
= tswapl(base_addr
);
3649 target_ldt_info
->limit
= tswap32(limit
);
3650 target_ldt_info
->flags
= tswap32(flags
);
3651 unlock_user_struct(target_ldt_info
, ptr
, 1);
3654 #endif /* TARGET_I386 && TARGET_ABI32 */
3656 #ifndef TARGET_ABI32
3657 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3664 case TARGET_ARCH_SET_GS
:
3665 case TARGET_ARCH_SET_FS
:
3666 if (code
== TARGET_ARCH_SET_GS
)
3670 cpu_x86_load_seg(env
, idx
, 0);
3671 env
->segs
[idx
].base
= addr
;
3673 case TARGET_ARCH_GET_GS
:
3674 case TARGET_ARCH_GET_FS
:
3675 if (code
== TARGET_ARCH_GET_GS
)
3679 val
= env
->segs
[idx
].base
;
3680 if (put_user(val
, addr
, abi_ulong
))
3681 return -TARGET_EFAULT
;
3684 ret
= -TARGET_EINVAL
;
3691 #endif /* defined(TARGET_I386) */
3693 #if defined(CONFIG_USE_NPTL)
3695 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3697 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3700 pthread_mutex_t mutex
;
3701 pthread_cond_t cond
;
3704 abi_ulong child_tidptr
;
3705 abi_ulong parent_tidptr
;
3709 static void *clone_func(void *arg
)
3711 new_thread_info
*info
= arg
;
3717 ts
= (TaskState
*)thread_env
->opaque
;
3718 info
->tid
= gettid();
3719 env
->host_tid
= info
->tid
;
3721 if (info
->child_tidptr
)
3722 put_user_u32(info
->tid
, info
->child_tidptr
);
3723 if (info
->parent_tidptr
)
3724 put_user_u32(info
->tid
, info
->parent_tidptr
);
3725 /* Enable signals. */
3726 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3727 /* Signal to the parent that we're ready. */
3728 pthread_mutex_lock(&info
->mutex
);
3729 pthread_cond_broadcast(&info
->cond
);
3730 pthread_mutex_unlock(&info
->mutex
);
3731 /* Wait until the parent has finshed initializing the tls state. */
3732 pthread_mutex_lock(&clone_lock
);
3733 pthread_mutex_unlock(&clone_lock
);
3739 /* this stack is the equivalent of the kernel stack associated with a
3741 #define NEW_STACK_SIZE 8192
3743 static int clone_func(void *arg
)
3745 CPUState
*env
= arg
;
3752 /* do_fork() Must return host values and target errnos (unlike most
3753 do_*() functions). */
3754 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3755 abi_ulong parent_tidptr
, target_ulong newtls
,
3756 abi_ulong child_tidptr
)
3761 #if defined(CONFIG_USE_NPTL)
3762 unsigned int nptl_flags
;
3768 /* Emulate vfork() with fork() */
3769 if (flags
& CLONE_VFORK
)
3770 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3772 if (flags
& CLONE_VM
) {
3773 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3774 #if defined(CONFIG_USE_NPTL)
3775 new_thread_info info
;
3776 pthread_attr_t attr
;
3778 ts
= qemu_mallocz(sizeof(TaskState
));
3779 init_task_state(ts
);
3780 /* we create a new CPU instance. */
3781 new_env
= cpu_copy(env
);
3782 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3785 /* Init regs that differ from the parent. */
3786 cpu_clone_regs(new_env
, newsp
);
3787 new_env
->opaque
= ts
;
3788 ts
->bprm
= parent_ts
->bprm
;
3789 ts
->info
= parent_ts
->info
;
3790 #if defined(CONFIG_USE_NPTL)
3792 flags
&= ~CLONE_NPTL_FLAGS2
;
3794 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3795 ts
->child_tidptr
= child_tidptr
;
3798 if (nptl_flags
& CLONE_SETTLS
)
3799 cpu_set_tls (new_env
, newtls
);
3801 /* Grab a mutex so that thread setup appears atomic. */
3802 pthread_mutex_lock(&clone_lock
);
3804 memset(&info
, 0, sizeof(info
));
3805 pthread_mutex_init(&info
.mutex
, NULL
);
3806 pthread_mutex_lock(&info
.mutex
);
3807 pthread_cond_init(&info
.cond
, NULL
);
3809 if (nptl_flags
& CLONE_CHILD_SETTID
)
3810 info
.child_tidptr
= child_tidptr
;
3811 if (nptl_flags
& CLONE_PARENT_SETTID
)
3812 info
.parent_tidptr
= parent_tidptr
;
3814 ret
= pthread_attr_init(&attr
);
3815 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
3816 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
3817 /* It is not safe to deliver signals until the child has finished
3818 initializing, so temporarily block all signals. */
3819 sigfillset(&sigmask
);
3820 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3822 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3823 /* TODO: Free new CPU state if thread creation failed. */
3825 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3826 pthread_attr_destroy(&attr
);
3828 /* Wait for the child to initialize. */
3829 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3831 if (flags
& CLONE_PARENT_SETTID
)
3832 put_user_u32(ret
, parent_tidptr
);
3836 pthread_mutex_unlock(&info
.mutex
);
3837 pthread_cond_destroy(&info
.cond
);
3838 pthread_mutex_destroy(&info
.mutex
);
3839 pthread_mutex_unlock(&clone_lock
);
3841 if (flags
& CLONE_NPTL_FLAGS2
)
3843 /* This is probably going to die very quickly, but do it anyway. */
3844 new_stack
= qemu_mallocz (NEW_STACK_SIZE
);
3846 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
3848 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3852 /* if no CLONE_VM, we consider it is a fork */
3853 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3858 /* Child Process. */
3859 cpu_clone_regs(env
, newsp
);
3861 #if defined(CONFIG_USE_NPTL)
3862 /* There is a race condition here. The parent process could
3863 theoretically read the TID in the child process before the child
3864 tid is set. This would require using either ptrace
3865 (not implemented) or having *_tidptr to point at a shared memory
3866 mapping. We can't repeat the spinlock hack used above because
3867 the child process gets its own copy of the lock. */
3868 if (flags
& CLONE_CHILD_SETTID
)
3869 put_user_u32(gettid(), child_tidptr
);
3870 if (flags
& CLONE_PARENT_SETTID
)
3871 put_user_u32(gettid(), parent_tidptr
);
3872 ts
= (TaskState
*)env
->opaque
;
3873 if (flags
& CLONE_SETTLS
)
3874 cpu_set_tls (env
, newtls
);
3875 if (flags
& CLONE_CHILD_CLEARTID
)
3876 ts
->child_tidptr
= child_tidptr
;
3885 /* warning : doesn't handle linux specific flags... */
3886 static int target_to_host_fcntl_cmd(int cmd
)
3889 case TARGET_F_DUPFD
:
3890 case TARGET_F_GETFD
:
3891 case TARGET_F_SETFD
:
3892 case TARGET_F_GETFL
:
3893 case TARGET_F_SETFL
:
3895 case TARGET_F_GETLK
:
3897 case TARGET_F_SETLK
:
3899 case TARGET_F_SETLKW
:
3901 case TARGET_F_GETOWN
:
3903 case TARGET_F_SETOWN
:
3905 case TARGET_F_GETSIG
:
3907 case TARGET_F_SETSIG
:
3909 #if TARGET_ABI_BITS == 32
3910 case TARGET_F_GETLK64
:
3912 case TARGET_F_SETLK64
:
3914 case TARGET_F_SETLKW64
:
3917 case TARGET_F_SETLEASE
:
3919 case TARGET_F_GETLEASE
:
3921 #ifdef F_DUPFD_CLOEXEC
3922 case TARGET_F_DUPFD_CLOEXEC
:
3923 return F_DUPFD_CLOEXEC
;
3925 case TARGET_F_NOTIFY
:
3928 return -TARGET_EINVAL
;
3930 return -TARGET_EINVAL
;
3933 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3936 struct target_flock
*target_fl
;
3937 struct flock64 fl64
;
3938 struct target_flock64
*target_fl64
;
3940 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
3942 if (host_cmd
== -TARGET_EINVAL
)
3946 case TARGET_F_GETLK
:
3947 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3948 return -TARGET_EFAULT
;
3949 fl
.l_type
= tswap16(target_fl
->l_type
);
3950 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3951 fl
.l_start
= tswapl(target_fl
->l_start
);
3952 fl
.l_len
= tswapl(target_fl
->l_len
);
3953 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3954 unlock_user_struct(target_fl
, arg
, 0);
3955 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3957 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3958 return -TARGET_EFAULT
;
3959 target_fl
->l_type
= tswap16(fl
.l_type
);
3960 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3961 target_fl
->l_start
= tswapl(fl
.l_start
);
3962 target_fl
->l_len
= tswapl(fl
.l_len
);
3963 target_fl
->l_pid
= tswap32(fl
.l_pid
);
3964 unlock_user_struct(target_fl
, arg
, 1);
3968 case TARGET_F_SETLK
:
3969 case TARGET_F_SETLKW
:
3970 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3971 return -TARGET_EFAULT
;
3972 fl
.l_type
= tswap16(target_fl
->l_type
);
3973 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3974 fl
.l_start
= tswapl(target_fl
->l_start
);
3975 fl
.l_len
= tswapl(target_fl
->l_len
);
3976 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3977 unlock_user_struct(target_fl
, arg
, 0);
3978 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3981 case TARGET_F_GETLK64
:
3982 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3983 return -TARGET_EFAULT
;
3984 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3985 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3986 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3987 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3988 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3989 unlock_user_struct(target_fl64
, arg
, 0);
3990 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3992 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3993 return -TARGET_EFAULT
;
3994 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3995 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3996 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3997 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3998 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
3999 unlock_user_struct(target_fl64
, arg
, 1);
4002 case TARGET_F_SETLK64
:
4003 case TARGET_F_SETLKW64
:
4004 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4005 return -TARGET_EFAULT
;
4006 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4007 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4008 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4009 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4010 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4011 unlock_user_struct(target_fl64
, arg
, 0);
4012 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4015 case TARGET_F_GETFL
:
4016 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4018 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4022 case TARGET_F_SETFL
:
4023 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4026 case TARGET_F_SETOWN
:
4027 case TARGET_F_GETOWN
:
4028 case TARGET_F_SETSIG
:
4029 case TARGET_F_GETSIG
:
4030 case TARGET_F_SETLEASE
:
4031 case TARGET_F_GETLEASE
:
4032 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4036 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4044 static inline int high2lowuid(int uid
)
4052 static inline int high2lowgid(int gid
)
4060 static inline int low2highuid(int uid
)
4062 if ((int16_t)uid
== -1)
4068 static inline int low2highgid(int gid
)
4070 if ((int16_t)gid
== -1)
4076 #endif /* USE_UID16 */
4078 void syscall_init(void)
4081 const argtype
*arg_type
;
4085 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4086 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4087 #include "syscall_types.h"
4089 #undef STRUCT_SPECIAL
4091 /* we patch the ioctl size if necessary. We rely on the fact that
4092 no ioctl has all the bits at '1' in the size field */
4094 while (ie
->target_cmd
!= 0) {
4095 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4096 TARGET_IOC_SIZEMASK
) {
4097 arg_type
= ie
->arg_type
;
4098 if (arg_type
[0] != TYPE_PTR
) {
4099 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4104 size
= thunk_type_size(arg_type
, 0);
4105 ie
->target_cmd
= (ie
->target_cmd
&
4106 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4107 (size
<< TARGET_IOC_SIZESHIFT
);
4110 /* Build target_to_host_errno_table[] table from
4111 * host_to_target_errno_table[]. */
4112 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4113 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4115 /* automatic consistency check if same arch */
4116 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4117 (defined(__x86_64__) && defined(TARGET_X86_64))
4118 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4119 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4120 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4127 #if TARGET_ABI_BITS == 32
4128 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4130 #ifdef TARGET_WORDS_BIGENDIAN
4131 return ((uint64_t)word0
<< 32) | word1
;
4133 return ((uint64_t)word1
<< 32) | word0
;
4136 #else /* TARGET_ABI_BITS == 32 */
4137 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4141 #endif /* TARGET_ABI_BITS != 32 */
4143 #ifdef TARGET_NR_truncate64
4144 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4150 if (((CPUARMState
*)cpu_env
)->eabi
)
4156 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4160 #ifdef TARGET_NR_ftruncate64
4161 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4167 if (((CPUARMState
*)cpu_env
)->eabi
)
4173 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4177 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4178 abi_ulong target_addr
)
4180 struct target_timespec
*target_ts
;
4182 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4183 return -TARGET_EFAULT
;
4184 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
4185 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
4186 unlock_user_struct(target_ts
, target_addr
, 0);
4190 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4191 struct timespec
*host_ts
)
4193 struct target_timespec
*target_ts
;
4195 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4196 return -TARGET_EFAULT
;
4197 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
4198 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
4199 unlock_user_struct(target_ts
, target_addr
, 1);
4203 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4204 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4205 abi_ulong target_addr
,
4206 struct stat
*host_st
)
4209 if (((CPUARMState
*)cpu_env
)->eabi
) {
4210 struct target_eabi_stat64
*target_st
;
4212 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4213 return -TARGET_EFAULT
;
4214 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4215 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4216 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4217 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4218 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4220 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4221 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4222 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4223 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4224 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4225 __put_user(host_st
->st_size
, &target_st
->st_size
);
4226 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4227 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4228 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4229 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4230 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4231 unlock_user_struct(target_st
, target_addr
, 1);
4235 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4236 struct target_stat
*target_st
;
4238 struct target_stat64
*target_st
;
4241 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4242 return -TARGET_EFAULT
;
4243 memset(target_st
, 0, sizeof(*target_st
));
4244 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4245 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4246 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4247 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4249 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4250 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4251 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4252 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4253 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4254 /* XXX: better use of kernel struct */
4255 __put_user(host_st
->st_size
, &target_st
->st_size
);
4256 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4257 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4258 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4259 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4260 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4261 unlock_user_struct(target_st
, target_addr
, 1);
4268 #if defined(CONFIG_USE_NPTL)
4269 /* ??? Using host futex calls even when target atomic operations
4270 are not really atomic probably breaks things. However implementing
4271 futexes locally would make futexes shared between multiple processes
4272 tricky. However they're probably useless because guest atomic
4273 operations won't work either. */
4274 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4275 target_ulong uaddr2
, int val3
)
4277 struct timespec ts
, *pts
;
4280 /* ??? We assume FUTEX_* constants are the same on both host
4282 #ifdef FUTEX_CMD_MASK
4283 base_op
= op
& FUTEX_CMD_MASK
;
4291 target_to_host_timespec(pts
, timeout
);
4295 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4298 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4300 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4302 case FUTEX_CMP_REQUEUE
:
4304 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4305 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4306 But the prototype takes a `struct timespec *'; insert casts
4307 to satisfy the compiler. We do not need to tswap TIMEOUT
4308 since it's not compared to guest memory. */
4309 pts
= (struct timespec
*)(uintptr_t) timeout
;
4310 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4312 (base_op
== FUTEX_CMP_REQUEUE
4316 return -TARGET_ENOSYS
;
4321 /* Map host to target signal numbers for the wait family of syscalls.
4322 Assume all other status bits are the same. */
4323 static int host_to_target_waitstatus(int status
)
4325 if (WIFSIGNALED(status
)) {
4326 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4328 if (WIFSTOPPED(status
)) {
4329 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4335 int get_osversion(void)
4337 static int osversion
;
4338 struct new_utsname buf
;
4343 if (qemu_uname_release
&& *qemu_uname_release
) {
4344 s
= qemu_uname_release
;
4346 if (sys_uname(&buf
))
4351 for (i
= 0; i
< 3; i
++) {
4353 while (*s
>= '0' && *s
<= '9') {
4358 tmp
= (tmp
<< 8) + n
;
4366 /* do_syscall() should always have a single exit point at the end so
4367 that actions, such as logging of syscall results, can be performed.
4368 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4369 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4370 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4371 abi_long arg5
, abi_long arg6
)
4379 gemu_log("syscall %d", num
);
4382 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4385 case TARGET_NR_exit
:
4386 #ifdef CONFIG_USE_NPTL
4387 /* In old applications this may be used to implement _exit(2).
4388 However in threaded applictions it is used for thread termination,
4389 and _exit_group is used for application termination.
4390 Do thread termination if we have more then one thread. */
4391 /* FIXME: This probably breaks if a signal arrives. We should probably
4392 be disabling signals. */
4393 if (first_cpu
->next_cpu
) {
4401 while (p
&& p
!= (CPUState
*)cpu_env
) {
4402 lastp
= &p
->next_cpu
;
4405 /* If we didn't find the CPU for this thread then something is
4409 /* Remove the CPU from the list. */
4410 *lastp
= p
->next_cpu
;
4412 ts
= ((CPUState
*)cpu_env
)->opaque
;
4413 if (ts
->child_tidptr
) {
4414 put_user_u32(0, ts
->child_tidptr
);
4415 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4427 gdb_exit(cpu_env
, arg1
);
4429 ret
= 0; /* avoid warning */
4431 case TARGET_NR_read
:
4435 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4437 ret
= get_errno(read(arg1
, p
, arg3
));
4438 unlock_user(p
, arg2
, ret
);
4441 case TARGET_NR_write
:
4442 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4444 ret
= get_errno(write(arg1
, p
, arg3
));
4445 unlock_user(p
, arg2
, 0);
4447 case TARGET_NR_open
:
4448 if (!(p
= lock_user_string(arg1
)))
4450 ret
= get_errno(open(path(p
),
4451 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4453 unlock_user(p
, arg1
, 0);
4455 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4456 case TARGET_NR_openat
:
4457 if (!(p
= lock_user_string(arg2
)))
4459 ret
= get_errno(sys_openat(arg1
,
4461 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4463 unlock_user(p
, arg2
, 0);
4466 case TARGET_NR_close
:
4467 ret
= get_errno(close(arg1
));
4472 case TARGET_NR_fork
:
4473 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4475 #ifdef TARGET_NR_waitpid
4476 case TARGET_NR_waitpid
:
4479 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4480 if (!is_error(ret
) && arg2
4481 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4486 #ifdef TARGET_NR_waitid
4487 case TARGET_NR_waitid
:
4491 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4492 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4493 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4495 host_to_target_siginfo(p
, &info
);
4496 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4501 #ifdef TARGET_NR_creat /* not on alpha */
4502 case TARGET_NR_creat
:
4503 if (!(p
= lock_user_string(arg1
)))
4505 ret
= get_errno(creat(p
, arg2
));
4506 unlock_user(p
, arg1
, 0);
4509 case TARGET_NR_link
:
4512 p
= lock_user_string(arg1
);
4513 p2
= lock_user_string(arg2
);
4515 ret
= -TARGET_EFAULT
;
4517 ret
= get_errno(link(p
, p2
));
4518 unlock_user(p2
, arg2
, 0);
4519 unlock_user(p
, arg1
, 0);
4522 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4523 case TARGET_NR_linkat
:
4528 p
= lock_user_string(arg2
);
4529 p2
= lock_user_string(arg4
);
4531 ret
= -TARGET_EFAULT
;
4533 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4534 unlock_user(p
, arg2
, 0);
4535 unlock_user(p2
, arg4
, 0);
4539 case TARGET_NR_unlink
:
4540 if (!(p
= lock_user_string(arg1
)))
4542 ret
= get_errno(unlink(p
));
4543 unlock_user(p
, arg1
, 0);
4545 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4546 case TARGET_NR_unlinkat
:
4547 if (!(p
= lock_user_string(arg2
)))
4549 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4550 unlock_user(p
, arg2
, 0);
4553 case TARGET_NR_execve
:
4555 char **argp
, **envp
;
4558 abi_ulong guest_argp
;
4559 abi_ulong guest_envp
;
4565 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4566 if (get_user_ual(addr
, gp
))
4574 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4575 if (get_user_ual(addr
, gp
))
4582 argp
= alloca((argc
+ 1) * sizeof(void *));
4583 envp
= alloca((envc
+ 1) * sizeof(void *));
4585 for (gp
= guest_argp
, q
= argp
; gp
;
4586 gp
+= sizeof(abi_ulong
), q
++) {
4587 if (get_user_ual(addr
, gp
))
4591 if (!(*q
= lock_user_string(addr
)))
4596 for (gp
= guest_envp
, q
= envp
; gp
;
4597 gp
+= sizeof(abi_ulong
), q
++) {
4598 if (get_user_ual(addr
, gp
))
4602 if (!(*q
= lock_user_string(addr
)))
4607 if (!(p
= lock_user_string(arg1
)))
4609 ret
= get_errno(execve(p
, argp
, envp
));
4610 unlock_user(p
, arg1
, 0);
4615 ret
= -TARGET_EFAULT
;
4618 for (gp
= guest_argp
, q
= argp
; *q
;
4619 gp
+= sizeof(abi_ulong
), q
++) {
4620 if (get_user_ual(addr
, gp
)
4623 unlock_user(*q
, addr
, 0);
4625 for (gp
= guest_envp
, q
= envp
; *q
;
4626 gp
+= sizeof(abi_ulong
), q
++) {
4627 if (get_user_ual(addr
, gp
)
4630 unlock_user(*q
, addr
, 0);
4634 case TARGET_NR_chdir
:
4635 if (!(p
= lock_user_string(arg1
)))
4637 ret
= get_errno(chdir(p
));
4638 unlock_user(p
, arg1
, 0);
4640 #ifdef TARGET_NR_time
4641 case TARGET_NR_time
:
4644 ret
= get_errno(time(&host_time
));
4647 && put_user_sal(host_time
, arg1
))
4652 case TARGET_NR_mknod
:
4653 if (!(p
= lock_user_string(arg1
)))
4655 ret
= get_errno(mknod(p
, arg2
, arg3
));
4656 unlock_user(p
, arg1
, 0);
4658 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4659 case TARGET_NR_mknodat
:
4660 if (!(p
= lock_user_string(arg2
)))
4662 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4663 unlock_user(p
, arg2
, 0);
4666 case TARGET_NR_chmod
:
4667 if (!(p
= lock_user_string(arg1
)))
4669 ret
= get_errno(chmod(p
, arg2
));
4670 unlock_user(p
, arg1
, 0);
4672 #ifdef TARGET_NR_break
4673 case TARGET_NR_break
:
4676 #ifdef TARGET_NR_oldstat
4677 case TARGET_NR_oldstat
:
4680 case TARGET_NR_lseek
:
4681 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4683 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4684 /* Alpha specific */
4685 case TARGET_NR_getxpid
:
4686 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4687 ret
= get_errno(getpid());
4690 #ifdef TARGET_NR_getpid
4691 case TARGET_NR_getpid
:
4692 ret
= get_errno(getpid());
4695 case TARGET_NR_mount
:
4697 /* need to look at the data field */
4699 p
= lock_user_string(arg1
);
4700 p2
= lock_user_string(arg2
);
4701 p3
= lock_user_string(arg3
);
4702 if (!p
|| !p2
|| !p3
)
4703 ret
= -TARGET_EFAULT
;
4705 /* FIXME - arg5 should be locked, but it isn't clear how to
4706 * do that since it's not guaranteed to be a NULL-terminated
4710 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4712 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4714 unlock_user(p
, arg1
, 0);
4715 unlock_user(p2
, arg2
, 0);
4716 unlock_user(p3
, arg3
, 0);
4719 #ifdef TARGET_NR_umount
4720 case TARGET_NR_umount
:
4721 if (!(p
= lock_user_string(arg1
)))
4723 ret
= get_errno(umount(p
));
4724 unlock_user(p
, arg1
, 0);
4727 #ifdef TARGET_NR_stime /* not on alpha */
4728 case TARGET_NR_stime
:
4731 if (get_user_sal(host_time
, arg1
))
4733 ret
= get_errno(stime(&host_time
));
4737 case TARGET_NR_ptrace
:
4739 #ifdef TARGET_NR_alarm /* not on alpha */
4740 case TARGET_NR_alarm
:
4744 #ifdef TARGET_NR_oldfstat
4745 case TARGET_NR_oldfstat
:
4748 #ifdef TARGET_NR_pause /* not on alpha */
4749 case TARGET_NR_pause
:
4750 ret
= get_errno(pause());
4753 #ifdef TARGET_NR_utime
4754 case TARGET_NR_utime
:
4756 struct utimbuf tbuf
, *host_tbuf
;
4757 struct target_utimbuf
*target_tbuf
;
4759 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4761 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4762 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4763 unlock_user_struct(target_tbuf
, arg2
, 0);
4768 if (!(p
= lock_user_string(arg1
)))
4770 ret
= get_errno(utime(p
, host_tbuf
));
4771 unlock_user(p
, arg1
, 0);
4775 case TARGET_NR_utimes
:
4777 struct timeval
*tvp
, tv
[2];
4779 if (copy_from_user_timeval(&tv
[0], arg2
)
4780 || copy_from_user_timeval(&tv
[1],
4781 arg2
+ sizeof(struct target_timeval
)))
4787 if (!(p
= lock_user_string(arg1
)))
4789 ret
= get_errno(utimes(p
, tvp
));
4790 unlock_user(p
, arg1
, 0);
4793 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4794 case TARGET_NR_futimesat
:
4796 struct timeval
*tvp
, tv
[2];
4798 if (copy_from_user_timeval(&tv
[0], arg3
)
4799 || copy_from_user_timeval(&tv
[1],
4800 arg3
+ sizeof(struct target_timeval
)))
4806 if (!(p
= lock_user_string(arg2
)))
4808 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4809 unlock_user(p
, arg2
, 0);
4813 #ifdef TARGET_NR_stty
4814 case TARGET_NR_stty
:
4817 #ifdef TARGET_NR_gtty
4818 case TARGET_NR_gtty
:
4821 case TARGET_NR_access
:
4822 if (!(p
= lock_user_string(arg1
)))
4824 ret
= get_errno(access(path(p
), arg2
));
4825 unlock_user(p
, arg1
, 0);
4827 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4828 case TARGET_NR_faccessat
:
4829 if (!(p
= lock_user_string(arg2
)))
4831 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4832 unlock_user(p
, arg2
, 0);
4835 #ifdef TARGET_NR_nice /* not on alpha */
4836 case TARGET_NR_nice
:
4837 ret
= get_errno(nice(arg1
));
4840 #ifdef TARGET_NR_ftime
4841 case TARGET_NR_ftime
:
4844 case TARGET_NR_sync
:
4848 case TARGET_NR_kill
:
4849 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4851 case TARGET_NR_rename
:
4854 p
= lock_user_string(arg1
);
4855 p2
= lock_user_string(arg2
);
4857 ret
= -TARGET_EFAULT
;
4859 ret
= get_errno(rename(p
, p2
));
4860 unlock_user(p2
, arg2
, 0);
4861 unlock_user(p
, arg1
, 0);
4864 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4865 case TARGET_NR_renameat
:
4868 p
= lock_user_string(arg2
);
4869 p2
= lock_user_string(arg4
);
4871 ret
= -TARGET_EFAULT
;
4873 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4874 unlock_user(p2
, arg4
, 0);
4875 unlock_user(p
, arg2
, 0);
4879 case TARGET_NR_mkdir
:
4880 if (!(p
= lock_user_string(arg1
)))
4882 ret
= get_errno(mkdir(p
, arg2
));
4883 unlock_user(p
, arg1
, 0);
4885 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4886 case TARGET_NR_mkdirat
:
4887 if (!(p
= lock_user_string(arg2
)))
4889 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4890 unlock_user(p
, arg2
, 0);
4893 case TARGET_NR_rmdir
:
4894 if (!(p
= lock_user_string(arg1
)))
4896 ret
= get_errno(rmdir(p
));
4897 unlock_user(p
, arg1
, 0);
4900 ret
= get_errno(dup(arg1
));
4902 case TARGET_NR_pipe
:
4903 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
4905 #ifdef TARGET_NR_pipe2
4906 case TARGET_NR_pipe2
:
4907 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
4910 case TARGET_NR_times
:
4912 struct target_tms
*tmsp
;
4914 ret
= get_errno(times(&tms
));
4916 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4919 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4920 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4921 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4922 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4925 ret
= host_to_target_clock_t(ret
);
4928 #ifdef TARGET_NR_prof
4929 case TARGET_NR_prof
:
4932 #ifdef TARGET_NR_signal
4933 case TARGET_NR_signal
:
4936 case TARGET_NR_acct
:
4938 ret
= get_errno(acct(NULL
));
4940 if (!(p
= lock_user_string(arg1
)))
4942 ret
= get_errno(acct(path(p
)));
4943 unlock_user(p
, arg1
, 0);
4946 #ifdef TARGET_NR_umount2 /* not on alpha */
4947 case TARGET_NR_umount2
:
4948 if (!(p
= lock_user_string(arg1
)))
4950 ret
= get_errno(umount2(p
, arg2
));
4951 unlock_user(p
, arg1
, 0);
4954 #ifdef TARGET_NR_lock
4955 case TARGET_NR_lock
:
4958 case TARGET_NR_ioctl
:
4959 ret
= do_ioctl(arg1
, arg2
, arg3
);
4961 case TARGET_NR_fcntl
:
4962 ret
= do_fcntl(arg1
, arg2
, arg3
);
4964 #ifdef TARGET_NR_mpx
4968 case TARGET_NR_setpgid
:
4969 ret
= get_errno(setpgid(arg1
, arg2
));
4971 #ifdef TARGET_NR_ulimit
4972 case TARGET_NR_ulimit
:
4975 #ifdef TARGET_NR_oldolduname
4976 case TARGET_NR_oldolduname
:
4979 case TARGET_NR_umask
:
4980 ret
= get_errno(umask(arg1
));
4982 case TARGET_NR_chroot
:
4983 if (!(p
= lock_user_string(arg1
)))
4985 ret
= get_errno(chroot(p
));
4986 unlock_user(p
, arg1
, 0);
4988 case TARGET_NR_ustat
:
4990 case TARGET_NR_dup2
:
4991 ret
= get_errno(dup2(arg1
, arg2
));
4993 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
4994 case TARGET_NR_dup3
:
4995 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
4998 #ifdef TARGET_NR_getppid /* not on alpha */
4999 case TARGET_NR_getppid
:
5000 ret
= get_errno(getppid());
5003 case TARGET_NR_getpgrp
:
5004 ret
= get_errno(getpgrp());
5006 case TARGET_NR_setsid
:
5007 ret
= get_errno(setsid());
5009 #ifdef TARGET_NR_sigaction
5010 case TARGET_NR_sigaction
:
5012 #if defined(TARGET_ALPHA)
5013 struct target_sigaction act
, oact
, *pact
= 0;
5014 struct target_old_sigaction
*old_act
;
5016 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5018 act
._sa_handler
= old_act
->_sa_handler
;
5019 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5020 act
.sa_flags
= old_act
->sa_flags
;
5021 act
.sa_restorer
= 0;
5022 unlock_user_struct(old_act
, arg2
, 0);
5025 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5026 if (!is_error(ret
) && arg3
) {
5027 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5029 old_act
->_sa_handler
= oact
._sa_handler
;
5030 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5031 old_act
->sa_flags
= oact
.sa_flags
;
5032 unlock_user_struct(old_act
, arg3
, 1);
5034 #elif defined(TARGET_MIPS)
5035 struct target_sigaction act
, oact
, *pact
, *old_act
;
5038 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5040 act
._sa_handler
= old_act
->_sa_handler
;
5041 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5042 act
.sa_flags
= old_act
->sa_flags
;
5043 unlock_user_struct(old_act
, arg2
, 0);
5049 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5051 if (!is_error(ret
) && arg3
) {
5052 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5054 old_act
->_sa_handler
= oact
._sa_handler
;
5055 old_act
->sa_flags
= oact
.sa_flags
;
5056 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5057 old_act
->sa_mask
.sig
[1] = 0;
5058 old_act
->sa_mask
.sig
[2] = 0;
5059 old_act
->sa_mask
.sig
[3] = 0;
5060 unlock_user_struct(old_act
, arg3
, 1);
5063 struct target_old_sigaction
*old_act
;
5064 struct target_sigaction act
, oact
, *pact
;
5066 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5068 act
._sa_handler
= old_act
->_sa_handler
;
5069 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5070 act
.sa_flags
= old_act
->sa_flags
;
5071 act
.sa_restorer
= old_act
->sa_restorer
;
5072 unlock_user_struct(old_act
, arg2
, 0);
5077 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5078 if (!is_error(ret
) && arg3
) {
5079 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5081 old_act
->_sa_handler
= oact
._sa_handler
;
5082 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5083 old_act
->sa_flags
= oact
.sa_flags
;
5084 old_act
->sa_restorer
= oact
.sa_restorer
;
5085 unlock_user_struct(old_act
, arg3
, 1);
5091 case TARGET_NR_rt_sigaction
:
5093 #if defined(TARGET_ALPHA)
5094 struct target_sigaction act
, oact
, *pact
= 0;
5095 struct target_rt_sigaction
*rt_act
;
5096 /* ??? arg4 == sizeof(sigset_t). */
5098 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5100 act
._sa_handler
= rt_act
->_sa_handler
;
5101 act
.sa_mask
= rt_act
->sa_mask
;
5102 act
.sa_flags
= rt_act
->sa_flags
;
5103 act
.sa_restorer
= arg5
;
5104 unlock_user_struct(rt_act
, arg2
, 0);
5107 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5108 if (!is_error(ret
) && arg3
) {
5109 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5111 rt_act
->_sa_handler
= oact
._sa_handler
;
5112 rt_act
->sa_mask
= oact
.sa_mask
;
5113 rt_act
->sa_flags
= oact
.sa_flags
;
5114 unlock_user_struct(rt_act
, arg3
, 1);
5117 struct target_sigaction
*act
;
5118 struct target_sigaction
*oact
;
5121 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5126 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5127 ret
= -TARGET_EFAULT
;
5128 goto rt_sigaction_fail
;
5132 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5135 unlock_user_struct(act
, arg2
, 0);
5137 unlock_user_struct(oact
, arg3
, 1);
5141 #ifdef TARGET_NR_sgetmask /* not on alpha */
5142 case TARGET_NR_sgetmask
:
5145 abi_ulong target_set
;
5146 sigprocmask(0, NULL
, &cur_set
);
5147 host_to_target_old_sigset(&target_set
, &cur_set
);
5152 #ifdef TARGET_NR_ssetmask /* not on alpha */
5153 case TARGET_NR_ssetmask
:
5155 sigset_t set
, oset
, cur_set
;
5156 abi_ulong target_set
= arg1
;
5157 sigprocmask(0, NULL
, &cur_set
);
5158 target_to_host_old_sigset(&set
, &target_set
);
5159 sigorset(&set
, &set
, &cur_set
);
5160 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5161 host_to_target_old_sigset(&target_set
, &oset
);
5166 #ifdef TARGET_NR_sigprocmask
5167 case TARGET_NR_sigprocmask
:
5169 #if defined(TARGET_ALPHA)
5170 sigset_t set
, oldset
;
5175 case TARGET_SIG_BLOCK
:
5178 case TARGET_SIG_UNBLOCK
:
5181 case TARGET_SIG_SETMASK
:
5185 ret
= -TARGET_EINVAL
;
5189 target_to_host_old_sigset(&set
, &mask
);
5191 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5193 if (!is_error(ret
)) {
5194 host_to_target_old_sigset(&mask
, &oldset
);
5196 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5199 sigset_t set
, oldset
, *set_ptr
;
5204 case TARGET_SIG_BLOCK
:
5207 case TARGET_SIG_UNBLOCK
:
5210 case TARGET_SIG_SETMASK
:
5214 ret
= -TARGET_EINVAL
;
5217 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5219 target_to_host_old_sigset(&set
, p
);
5220 unlock_user(p
, arg2
, 0);
5226 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5227 if (!is_error(ret
) && arg3
) {
5228 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5230 host_to_target_old_sigset(p
, &oldset
);
5231 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5237 case TARGET_NR_rt_sigprocmask
:
5240 sigset_t set
, oldset
, *set_ptr
;
5244 case TARGET_SIG_BLOCK
:
5247 case TARGET_SIG_UNBLOCK
:
5250 case TARGET_SIG_SETMASK
:
5254 ret
= -TARGET_EINVAL
;
5257 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5259 target_to_host_sigset(&set
, p
);
5260 unlock_user(p
, arg2
, 0);
5266 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5267 if (!is_error(ret
) && arg3
) {
5268 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5270 host_to_target_sigset(p
, &oldset
);
5271 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5275 #ifdef TARGET_NR_sigpending
5276 case TARGET_NR_sigpending
:
5279 ret
= get_errno(sigpending(&set
));
5280 if (!is_error(ret
)) {
5281 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5283 host_to_target_old_sigset(p
, &set
);
5284 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5289 case TARGET_NR_rt_sigpending
:
5292 ret
= get_errno(sigpending(&set
));
5293 if (!is_error(ret
)) {
5294 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5296 host_to_target_sigset(p
, &set
);
5297 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5301 #ifdef TARGET_NR_sigsuspend
5302 case TARGET_NR_sigsuspend
:
5305 #if defined(TARGET_ALPHA)
5306 abi_ulong mask
= arg1
;
5307 target_to_host_old_sigset(&set
, &mask
);
5309 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5311 target_to_host_old_sigset(&set
, p
);
5312 unlock_user(p
, arg1
, 0);
5314 ret
= get_errno(sigsuspend(&set
));
5318 case TARGET_NR_rt_sigsuspend
:
5321 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5323 target_to_host_sigset(&set
, p
);
5324 unlock_user(p
, arg1
, 0);
5325 ret
= get_errno(sigsuspend(&set
));
5328 case TARGET_NR_rt_sigtimedwait
:
5331 struct timespec uts
, *puts
;
5334 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5336 target_to_host_sigset(&set
, p
);
5337 unlock_user(p
, arg1
, 0);
5340 target_to_host_timespec(puts
, arg3
);
5344 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5345 if (!is_error(ret
) && arg2
) {
5346 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5348 host_to_target_siginfo(p
, &uinfo
);
5349 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5353 case TARGET_NR_rt_sigqueueinfo
:
5356 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5358 target_to_host_siginfo(&uinfo
, p
);
5359 unlock_user(p
, arg1
, 0);
5360 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5363 #ifdef TARGET_NR_sigreturn
5364 case TARGET_NR_sigreturn
:
5365 /* NOTE: ret is eax, so not transcoding must be done */
5366 ret
= do_sigreturn(cpu_env
);
5369 case TARGET_NR_rt_sigreturn
:
5370 /* NOTE: ret is eax, so not transcoding must be done */
5371 ret
= do_rt_sigreturn(cpu_env
);
5373 case TARGET_NR_sethostname
:
5374 if (!(p
= lock_user_string(arg1
)))
5376 ret
= get_errno(sethostname(p
, arg2
));
5377 unlock_user(p
, arg1
, 0);
5379 case TARGET_NR_setrlimit
:
5381 int resource
= arg1
;
5382 struct target_rlimit
*target_rlim
;
5384 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5386 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5387 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5388 unlock_user_struct(target_rlim
, arg2
, 0);
5389 ret
= get_errno(setrlimit(resource
, &rlim
));
5392 case TARGET_NR_getrlimit
:
5394 int resource
= arg1
;
5395 struct target_rlimit
*target_rlim
;
5398 ret
= get_errno(getrlimit(resource
, &rlim
));
5399 if (!is_error(ret
)) {
5400 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5402 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5403 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5404 unlock_user_struct(target_rlim
, arg2
, 1);
5408 case TARGET_NR_getrusage
:
5410 struct rusage rusage
;
5411 ret
= get_errno(getrusage(arg1
, &rusage
));
5412 if (!is_error(ret
)) {
5413 host_to_target_rusage(arg2
, &rusage
);
5417 case TARGET_NR_gettimeofday
:
5420 ret
= get_errno(gettimeofday(&tv
, NULL
));
5421 if (!is_error(ret
)) {
5422 if (copy_to_user_timeval(arg1
, &tv
))
5427 case TARGET_NR_settimeofday
:
5430 if (copy_from_user_timeval(&tv
, arg1
))
5432 ret
= get_errno(settimeofday(&tv
, NULL
));
5435 #ifdef TARGET_NR_select
5436 case TARGET_NR_select
:
5438 struct target_sel_arg_struct
*sel
;
5439 abi_ulong inp
, outp
, exp
, tvp
;
5442 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5444 nsel
= tswapl(sel
->n
);
5445 inp
= tswapl(sel
->inp
);
5446 outp
= tswapl(sel
->outp
);
5447 exp
= tswapl(sel
->exp
);
5448 tvp
= tswapl(sel
->tvp
);
5449 unlock_user_struct(sel
, arg1
, 0);
5450 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5454 #ifdef TARGET_NR_pselect6
5455 case TARGET_NR_pselect6
:
5456 goto unimplemented_nowarn
;
5458 case TARGET_NR_symlink
:
5461 p
= lock_user_string(arg1
);
5462 p2
= lock_user_string(arg2
);
5464 ret
= -TARGET_EFAULT
;
5466 ret
= get_errno(symlink(p
, p2
));
5467 unlock_user(p2
, arg2
, 0);
5468 unlock_user(p
, arg1
, 0);
5471 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5472 case TARGET_NR_symlinkat
:
5475 p
= lock_user_string(arg1
);
5476 p2
= lock_user_string(arg3
);
5478 ret
= -TARGET_EFAULT
;
5480 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5481 unlock_user(p2
, arg3
, 0);
5482 unlock_user(p
, arg1
, 0);
5486 #ifdef TARGET_NR_oldlstat
5487 case TARGET_NR_oldlstat
:
5490 case TARGET_NR_readlink
:
5493 p
= lock_user_string(arg1
);
5494 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5496 ret
= -TARGET_EFAULT
;
5498 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5499 char real
[PATH_MAX
];
5500 temp
= realpath(exec_path
,real
);
5501 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5502 snprintf((char *)p2
, arg3
, "%s", real
);
5505 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5507 unlock_user(p2
, arg2
, ret
);
5508 unlock_user(p
, arg1
, 0);
5511 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5512 case TARGET_NR_readlinkat
:
5515 p
= lock_user_string(arg2
);
5516 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5518 ret
= -TARGET_EFAULT
;
5520 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5521 unlock_user(p2
, arg3
, ret
);
5522 unlock_user(p
, arg2
, 0);
5526 #ifdef TARGET_NR_uselib
5527 case TARGET_NR_uselib
:
5530 #ifdef TARGET_NR_swapon
5531 case TARGET_NR_swapon
:
5532 if (!(p
= lock_user_string(arg1
)))
5534 ret
= get_errno(swapon(p
, arg2
));
5535 unlock_user(p
, arg1
, 0);
5538 case TARGET_NR_reboot
:
5540 #ifdef TARGET_NR_readdir
5541 case TARGET_NR_readdir
:
5544 #ifdef TARGET_NR_mmap
5545 case TARGET_NR_mmap
:
5546 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5549 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5550 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5558 unlock_user(v
, arg1
, 0);
5559 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5560 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5564 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5565 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5571 #ifdef TARGET_NR_mmap2
5572 case TARGET_NR_mmap2
:
5574 #define MMAP_SHIFT 12
5576 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5577 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5579 arg6
<< MMAP_SHIFT
));
5582 case TARGET_NR_munmap
:
5583 ret
= get_errno(target_munmap(arg1
, arg2
));
5585 case TARGET_NR_mprotect
:
5587 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
5588 /* Special hack to detect libc making the stack executable. */
5589 if ((arg3
& PROT_GROWSDOWN
)
5590 && arg1
>= ts
->info
->stack_limit
5591 && arg1
<= ts
->info
->start_stack
) {
5592 arg3
&= ~PROT_GROWSDOWN
;
5593 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
5594 arg1
= ts
->info
->stack_limit
;
5597 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5599 #ifdef TARGET_NR_mremap
5600 case TARGET_NR_mremap
:
5601 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5604 /* ??? msync/mlock/munlock are broken for softmmu. */
5605 #ifdef TARGET_NR_msync
5606 case TARGET_NR_msync
:
5607 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5610 #ifdef TARGET_NR_mlock
5611 case TARGET_NR_mlock
:
5612 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5615 #ifdef TARGET_NR_munlock
5616 case TARGET_NR_munlock
:
5617 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5620 #ifdef TARGET_NR_mlockall
5621 case TARGET_NR_mlockall
:
5622 ret
= get_errno(mlockall(arg1
));
5625 #ifdef TARGET_NR_munlockall
5626 case TARGET_NR_munlockall
:
5627 ret
= get_errno(munlockall());
5630 case TARGET_NR_truncate
:
5631 if (!(p
= lock_user_string(arg1
)))
5633 ret
= get_errno(truncate(p
, arg2
));
5634 unlock_user(p
, arg1
, 0);
5636 case TARGET_NR_ftruncate
:
5637 ret
= get_errno(ftruncate(arg1
, arg2
));
5639 case TARGET_NR_fchmod
:
5640 ret
= get_errno(fchmod(arg1
, arg2
));
5642 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5643 case TARGET_NR_fchmodat
:
5644 if (!(p
= lock_user_string(arg2
)))
5646 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5647 unlock_user(p
, arg2
, 0);
5650 case TARGET_NR_getpriority
:
5651 /* libc does special remapping of the return value of
5652 * sys_getpriority() so it's just easiest to call
5653 * sys_getpriority() directly rather than through libc. */
5654 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5656 case TARGET_NR_setpriority
:
5657 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5659 #ifdef TARGET_NR_profil
5660 case TARGET_NR_profil
:
5663 case TARGET_NR_statfs
:
5664 if (!(p
= lock_user_string(arg1
)))
5666 ret
= get_errno(statfs(path(p
), &stfs
));
5667 unlock_user(p
, arg1
, 0);
5669 if (!is_error(ret
)) {
5670 struct target_statfs
*target_stfs
;
5672 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5674 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5675 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5676 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5677 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5678 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5679 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5680 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5681 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5682 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5683 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5684 unlock_user_struct(target_stfs
, arg2
, 1);
5687 case TARGET_NR_fstatfs
:
5688 ret
= get_errno(fstatfs(arg1
, &stfs
));
5689 goto convert_statfs
;
5690 #ifdef TARGET_NR_statfs64
5691 case TARGET_NR_statfs64
:
5692 if (!(p
= lock_user_string(arg1
)))
5694 ret
= get_errno(statfs(path(p
), &stfs
));
5695 unlock_user(p
, arg1
, 0);
5697 if (!is_error(ret
)) {
5698 struct target_statfs64
*target_stfs
;
5700 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5702 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5703 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5704 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5705 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5706 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5707 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5708 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5709 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5710 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5711 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5712 unlock_user_struct(target_stfs
, arg3
, 1);
5715 case TARGET_NR_fstatfs64
:
5716 ret
= get_errno(fstatfs(arg1
, &stfs
));
5717 goto convert_statfs64
;
5719 #ifdef TARGET_NR_ioperm
5720 case TARGET_NR_ioperm
:
5723 #ifdef TARGET_NR_socketcall
5724 case TARGET_NR_socketcall
:
5725 ret
= do_socketcall(arg1
, arg2
);
5728 #ifdef TARGET_NR_accept
5729 case TARGET_NR_accept
:
5730 ret
= do_accept(arg1
, arg2
, arg3
);
5733 #ifdef TARGET_NR_bind
5734 case TARGET_NR_bind
:
5735 ret
= do_bind(arg1
, arg2
, arg3
);
5738 #ifdef TARGET_NR_connect
5739 case TARGET_NR_connect
:
5740 ret
= do_connect(arg1
, arg2
, arg3
);
5743 #ifdef TARGET_NR_getpeername
5744 case TARGET_NR_getpeername
:
5745 ret
= do_getpeername(arg1
, arg2
, arg3
);
5748 #ifdef TARGET_NR_getsockname
5749 case TARGET_NR_getsockname
:
5750 ret
= do_getsockname(arg1
, arg2
, arg3
);
5753 #ifdef TARGET_NR_getsockopt
5754 case TARGET_NR_getsockopt
:
5755 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5758 #ifdef TARGET_NR_listen
5759 case TARGET_NR_listen
:
5760 ret
= get_errno(listen(arg1
, arg2
));
5763 #ifdef TARGET_NR_recv
5764 case TARGET_NR_recv
:
5765 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5768 #ifdef TARGET_NR_recvfrom
5769 case TARGET_NR_recvfrom
:
5770 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5773 #ifdef TARGET_NR_recvmsg
5774 case TARGET_NR_recvmsg
:
5775 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5778 #ifdef TARGET_NR_send
5779 case TARGET_NR_send
:
5780 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5783 #ifdef TARGET_NR_sendmsg
5784 case TARGET_NR_sendmsg
:
5785 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5788 #ifdef TARGET_NR_sendto
5789 case TARGET_NR_sendto
:
5790 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5793 #ifdef TARGET_NR_shutdown
5794 case TARGET_NR_shutdown
:
5795 ret
= get_errno(shutdown(arg1
, arg2
));
5798 #ifdef TARGET_NR_socket
5799 case TARGET_NR_socket
:
5800 ret
= do_socket(arg1
, arg2
, arg3
);
5803 #ifdef TARGET_NR_socketpair
5804 case TARGET_NR_socketpair
:
5805 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5808 #ifdef TARGET_NR_setsockopt
5809 case TARGET_NR_setsockopt
:
5810 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5814 case TARGET_NR_syslog
:
5815 if (!(p
= lock_user_string(arg2
)))
5817 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5818 unlock_user(p
, arg2
, 0);
5821 case TARGET_NR_setitimer
:
5823 struct itimerval value
, ovalue
, *pvalue
;
5827 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5828 || copy_from_user_timeval(&pvalue
->it_value
,
5829 arg2
+ sizeof(struct target_timeval
)))
5834 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5835 if (!is_error(ret
) && arg3
) {
5836 if (copy_to_user_timeval(arg3
,
5837 &ovalue
.it_interval
)
5838 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5844 case TARGET_NR_getitimer
:
5846 struct itimerval value
;
5848 ret
= get_errno(getitimer(arg1
, &value
));
5849 if (!is_error(ret
) && arg2
) {
5850 if (copy_to_user_timeval(arg2
,
5852 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5858 case TARGET_NR_stat
:
5859 if (!(p
= lock_user_string(arg1
)))
5861 ret
= get_errno(stat(path(p
), &st
));
5862 unlock_user(p
, arg1
, 0);
5864 case TARGET_NR_lstat
:
5865 if (!(p
= lock_user_string(arg1
)))
5867 ret
= get_errno(lstat(path(p
), &st
));
5868 unlock_user(p
, arg1
, 0);
5870 case TARGET_NR_fstat
:
5872 ret
= get_errno(fstat(arg1
, &st
));
5874 if (!is_error(ret
)) {
5875 struct target_stat
*target_st
;
5877 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5879 memset(target_st
, 0, sizeof(*target_st
));
5880 __put_user(st
.st_dev
, &target_st
->st_dev
);
5881 __put_user(st
.st_ino
, &target_st
->st_ino
);
5882 __put_user(st
.st_mode
, &target_st
->st_mode
);
5883 __put_user(st
.st_uid
, &target_st
->st_uid
);
5884 __put_user(st
.st_gid
, &target_st
->st_gid
);
5885 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5886 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5887 __put_user(st
.st_size
, &target_st
->st_size
);
5888 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5889 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5890 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5891 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5892 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5893 unlock_user_struct(target_st
, arg2
, 1);
5897 #ifdef TARGET_NR_olduname
5898 case TARGET_NR_olduname
:
5901 #ifdef TARGET_NR_iopl
5902 case TARGET_NR_iopl
:
5905 case TARGET_NR_vhangup
:
5906 ret
= get_errno(vhangup());
5908 #ifdef TARGET_NR_idle
5909 case TARGET_NR_idle
:
5912 #ifdef TARGET_NR_syscall
5913 case TARGET_NR_syscall
:
5914 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5917 case TARGET_NR_wait4
:
5920 abi_long status_ptr
= arg2
;
5921 struct rusage rusage
, *rusage_ptr
;
5922 abi_ulong target_rusage
= arg4
;
5924 rusage_ptr
= &rusage
;
5927 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5928 if (!is_error(ret
)) {
5930 status
= host_to_target_waitstatus(status
);
5931 if (put_user_s32(status
, status_ptr
))
5935 host_to_target_rusage(target_rusage
, &rusage
);
5939 #ifdef TARGET_NR_swapoff
5940 case TARGET_NR_swapoff
:
5941 if (!(p
= lock_user_string(arg1
)))
5943 ret
= get_errno(swapoff(p
));
5944 unlock_user(p
, arg1
, 0);
5947 case TARGET_NR_sysinfo
:
5949 struct target_sysinfo
*target_value
;
5950 struct sysinfo value
;
5951 ret
= get_errno(sysinfo(&value
));
5952 if (!is_error(ret
) && arg1
)
5954 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5956 __put_user(value
.uptime
, &target_value
->uptime
);
5957 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5958 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5959 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5960 __put_user(value
.totalram
, &target_value
->totalram
);
5961 __put_user(value
.freeram
, &target_value
->freeram
);
5962 __put_user(value
.sharedram
, &target_value
->sharedram
);
5963 __put_user(value
.bufferram
, &target_value
->bufferram
);
5964 __put_user(value
.totalswap
, &target_value
->totalswap
);
5965 __put_user(value
.freeswap
, &target_value
->freeswap
);
5966 __put_user(value
.procs
, &target_value
->procs
);
5967 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5968 __put_user(value
.freehigh
, &target_value
->freehigh
);
5969 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5970 unlock_user_struct(target_value
, arg1
, 1);
5974 #ifdef TARGET_NR_ipc
5976 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5979 #ifdef TARGET_NR_semget
5980 case TARGET_NR_semget
:
5981 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5984 #ifdef TARGET_NR_semop
5985 case TARGET_NR_semop
:
5986 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5989 #ifdef TARGET_NR_semctl
5990 case TARGET_NR_semctl
:
5991 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5994 #ifdef TARGET_NR_msgctl
5995 case TARGET_NR_msgctl
:
5996 ret
= do_msgctl(arg1
, arg2
, arg3
);
5999 #ifdef TARGET_NR_msgget
6000 case TARGET_NR_msgget
:
6001 ret
= get_errno(msgget(arg1
, arg2
));
6004 #ifdef TARGET_NR_msgrcv
6005 case TARGET_NR_msgrcv
:
6006 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6009 #ifdef TARGET_NR_msgsnd
6010 case TARGET_NR_msgsnd
:
6011 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6014 #ifdef TARGET_NR_shmget
6015 case TARGET_NR_shmget
:
6016 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6019 #ifdef TARGET_NR_shmctl
6020 case TARGET_NR_shmctl
:
6021 ret
= do_shmctl(arg1
, arg2
, arg3
);
6024 #ifdef TARGET_NR_shmat
6025 case TARGET_NR_shmat
:
6026 ret
= do_shmat(arg1
, arg2
, arg3
);
6029 #ifdef TARGET_NR_shmdt
6030 case TARGET_NR_shmdt
:
6031 ret
= do_shmdt(arg1
);
6034 case TARGET_NR_fsync
:
6035 ret
= get_errno(fsync(arg1
));
6037 case TARGET_NR_clone
:
6038 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6039 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6040 #elif defined(TARGET_CRIS)
6041 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6043 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6046 #ifdef __NR_exit_group
6047 /* new thread calls */
6048 case TARGET_NR_exit_group
:
6052 gdb_exit(cpu_env
, arg1
);
6053 ret
= get_errno(exit_group(arg1
));
6056 case TARGET_NR_setdomainname
:
6057 if (!(p
= lock_user_string(arg1
)))
6059 ret
= get_errno(setdomainname(p
, arg2
));
6060 unlock_user(p
, arg1
, 0);
6062 case TARGET_NR_uname
:
6063 /* no need to transcode because we use the linux syscall */
6065 struct new_utsname
* buf
;
6067 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6069 ret
= get_errno(sys_uname(buf
));
6070 if (!is_error(ret
)) {
6071 /* Overrite the native machine name with whatever is being
6073 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6074 /* Allow the user to override the reported release. */
6075 if (qemu_uname_release
&& *qemu_uname_release
)
6076 strcpy (buf
->release
, qemu_uname_release
);
6078 unlock_user_struct(buf
, arg1
, 1);
6082 case TARGET_NR_modify_ldt
:
6083 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6085 #if !defined(TARGET_X86_64)
6086 case TARGET_NR_vm86old
:
6088 case TARGET_NR_vm86
:
6089 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6093 case TARGET_NR_adjtimex
:
6095 #ifdef TARGET_NR_create_module
6096 case TARGET_NR_create_module
:
6098 case TARGET_NR_init_module
:
6099 case TARGET_NR_delete_module
:
6100 #ifdef TARGET_NR_get_kernel_syms
6101 case TARGET_NR_get_kernel_syms
:
6104 case TARGET_NR_quotactl
:
6106 case TARGET_NR_getpgid
:
6107 ret
= get_errno(getpgid(arg1
));
6109 case TARGET_NR_fchdir
:
6110 ret
= get_errno(fchdir(arg1
));
6112 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6113 case TARGET_NR_bdflush
:
6116 #ifdef TARGET_NR_sysfs
6117 case TARGET_NR_sysfs
:
6120 case TARGET_NR_personality
:
6121 ret
= get_errno(personality(arg1
));
6123 #ifdef TARGET_NR_afs_syscall
6124 case TARGET_NR_afs_syscall
:
6127 #ifdef TARGET_NR__llseek /* Not on alpha */
6128 case TARGET_NR__llseek
:
6131 #if !defined(__NR_llseek)
6132 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6134 ret
= get_errno(res
);
6139 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6141 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6147 case TARGET_NR_getdents
:
6148 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6150 struct target_dirent
*target_dirp
;
6151 struct linux_dirent
*dirp
;
6152 abi_long count
= arg3
;
6154 dirp
= malloc(count
);
6156 ret
= -TARGET_ENOMEM
;
6160 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6161 if (!is_error(ret
)) {
6162 struct linux_dirent
*de
;
6163 struct target_dirent
*tde
;
6165 int reclen
, treclen
;
6166 int count1
, tnamelen
;
6170 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6174 reclen
= de
->d_reclen
;
6175 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6176 tde
->d_reclen
= tswap16(treclen
);
6177 tde
->d_ino
= tswapl(de
->d_ino
);
6178 tde
->d_off
= tswapl(de
->d_off
);
6179 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6182 /* XXX: may not be correct */
6183 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6184 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6186 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6190 unlock_user(target_dirp
, arg2
, ret
);
6196 struct linux_dirent
*dirp
;
6197 abi_long count
= arg3
;
6199 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6201 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6202 if (!is_error(ret
)) {
6203 struct linux_dirent
*de
;
6208 reclen
= de
->d_reclen
;
6211 de
->d_reclen
= tswap16(reclen
);
6212 tswapls(&de
->d_ino
);
6213 tswapls(&de
->d_off
);
6214 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6218 unlock_user(dirp
, arg2
, ret
);
6222 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6223 case TARGET_NR_getdents64
:
6225 struct linux_dirent64
*dirp
;
6226 abi_long count
= arg3
;
6227 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6229 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6230 if (!is_error(ret
)) {
6231 struct linux_dirent64
*de
;
6236 reclen
= de
->d_reclen
;
6239 de
->d_reclen
= tswap16(reclen
);
6240 tswap64s((uint64_t *)&de
->d_ino
);
6241 tswap64s((uint64_t *)&de
->d_off
);
6242 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6246 unlock_user(dirp
, arg2
, ret
);
6249 #endif /* TARGET_NR_getdents64 */
6250 #ifdef TARGET_NR__newselect
6251 case TARGET_NR__newselect
:
6252 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6255 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6256 # ifdef TARGET_NR_poll
6257 case TARGET_NR_poll
:
6259 # ifdef TARGET_NR_ppoll
6260 case TARGET_NR_ppoll
:
6263 struct target_pollfd
*target_pfd
;
6264 unsigned int nfds
= arg2
;
6269 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6273 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6274 for(i
= 0; i
< nfds
; i
++) {
6275 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6276 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6279 # ifdef TARGET_NR_ppoll
6280 if (num
== TARGET_NR_ppoll
) {
6281 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
6282 target_sigset_t
*target_set
;
6283 sigset_t _set
, *set
= &_set
;
6286 if (target_to_host_timespec(timeout_ts
, arg3
)) {
6287 unlock_user(target_pfd
, arg1
, 0);
6295 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
6297 unlock_user(target_pfd
, arg1
, 0);
6300 target_to_host_sigset(set
, target_set
);
6305 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
6307 if (!is_error(ret
) && arg3
) {
6308 host_to_target_timespec(arg3
, timeout_ts
);
6311 unlock_user(target_set
, arg4
, 0);
6315 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6317 if (!is_error(ret
)) {
6318 for(i
= 0; i
< nfds
; i
++) {
6319 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6322 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
6326 case TARGET_NR_flock
:
6327 /* NOTE: the flock constant seems to be the same for every
6329 ret
= get_errno(flock(arg1
, arg2
));
6331 case TARGET_NR_readv
:
6336 vec
= alloca(count
* sizeof(struct iovec
));
6337 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6339 ret
= get_errno(readv(arg1
, vec
, count
));
6340 unlock_iovec(vec
, arg2
, count
, 1);
6343 case TARGET_NR_writev
:
6348 vec
= alloca(count
* sizeof(struct iovec
));
6349 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6351 ret
= get_errno(writev(arg1
, vec
, count
));
6352 unlock_iovec(vec
, arg2
, count
, 0);
6355 case TARGET_NR_getsid
:
6356 ret
= get_errno(getsid(arg1
));
6358 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6359 case TARGET_NR_fdatasync
:
6360 ret
= get_errno(fdatasync(arg1
));
6363 case TARGET_NR__sysctl
:
6364 /* We don't implement this, but ENOTDIR is always a safe
6366 ret
= -TARGET_ENOTDIR
;
6368 case TARGET_NR_sched_getaffinity
:
6370 unsigned int mask_size
;
6371 unsigned long *mask
;
6374 * sched_getaffinity needs multiples of ulong, so need to take
6375 * care of mismatches between target ulong and host ulong sizes.
6377 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6378 ret
= -TARGET_EINVAL
;
6381 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6383 mask
= alloca(mask_size
);
6384 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
6386 if (!is_error(ret
)) {
6388 /* Zero out any extra space kernel didn't fill */
6389 unsigned long zero
= arg2
- ret
;
6392 if (copy_to_user(arg3
+ zero
, p
, zero
)) {
6397 if (copy_to_user(arg3
, mask
, arg2
)) {
6404 case TARGET_NR_sched_setaffinity
:
6406 unsigned int mask_size
;
6407 unsigned long *mask
;
6410 * sched_setaffinity needs multiples of ulong, so need to take
6411 * care of mismatches between target ulong and host ulong sizes.
6413 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6414 ret
= -TARGET_EINVAL
;
6417 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6419 mask
= alloca(mask_size
);
6420 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
6423 memcpy(mask
, p
, arg2
);
6424 unlock_user_struct(p
, arg2
, 0);
6426 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
6429 case TARGET_NR_sched_setparam
:
6431 struct sched_param
*target_schp
;
6432 struct sched_param schp
;
6434 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6436 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6437 unlock_user_struct(target_schp
, arg2
, 0);
6438 ret
= get_errno(sched_setparam(arg1
, &schp
));
6441 case TARGET_NR_sched_getparam
:
6443 struct sched_param
*target_schp
;
6444 struct sched_param schp
;
6445 ret
= get_errno(sched_getparam(arg1
, &schp
));
6446 if (!is_error(ret
)) {
6447 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6449 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6450 unlock_user_struct(target_schp
, arg2
, 1);
6454 case TARGET_NR_sched_setscheduler
:
6456 struct sched_param
*target_schp
;
6457 struct sched_param schp
;
6458 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6460 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6461 unlock_user_struct(target_schp
, arg3
, 0);
6462 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6465 case TARGET_NR_sched_getscheduler
:
6466 ret
= get_errno(sched_getscheduler(arg1
));
6468 case TARGET_NR_sched_yield
:
6469 ret
= get_errno(sched_yield());
6471 case TARGET_NR_sched_get_priority_max
:
6472 ret
= get_errno(sched_get_priority_max(arg1
));
6474 case TARGET_NR_sched_get_priority_min
:
6475 ret
= get_errno(sched_get_priority_min(arg1
));
6477 case TARGET_NR_sched_rr_get_interval
:
6480 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6481 if (!is_error(ret
)) {
6482 host_to_target_timespec(arg2
, &ts
);
6486 case TARGET_NR_nanosleep
:
6488 struct timespec req
, rem
;
6489 target_to_host_timespec(&req
, arg1
);
6490 ret
= get_errno(nanosleep(&req
, &rem
));
6491 if (is_error(ret
) && arg2
) {
6492 host_to_target_timespec(arg2
, &rem
);
6496 #ifdef TARGET_NR_query_module
6497 case TARGET_NR_query_module
:
6500 #ifdef TARGET_NR_nfsservctl
6501 case TARGET_NR_nfsservctl
:
6504 case TARGET_NR_prctl
:
6507 case PR_GET_PDEATHSIG
:
6510 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6511 if (!is_error(ret
) && arg2
6512 && put_user_ual(deathsig
, arg2
))
6517 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6521 #ifdef TARGET_NR_arch_prctl
6522 case TARGET_NR_arch_prctl
:
6523 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6524 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6530 #ifdef TARGET_NR_pread
6531 case TARGET_NR_pread
:
6533 if (((CPUARMState
*)cpu_env
)->eabi
)
6536 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6538 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6539 unlock_user(p
, arg2
, ret
);
6541 case TARGET_NR_pwrite
:
6543 if (((CPUARMState
*)cpu_env
)->eabi
)
6546 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6548 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6549 unlock_user(p
, arg2
, 0);
6552 #ifdef TARGET_NR_pread64
6553 case TARGET_NR_pread64
:
6554 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6556 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6557 unlock_user(p
, arg2
, ret
);
6559 case TARGET_NR_pwrite64
:
6560 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6562 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6563 unlock_user(p
, arg2
, 0);
6566 case TARGET_NR_getcwd
:
6567 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6569 ret
= get_errno(sys_getcwd1(p
, arg2
));
6570 unlock_user(p
, arg1
, ret
);
6572 case TARGET_NR_capget
:
6574 case TARGET_NR_capset
:
6576 case TARGET_NR_sigaltstack
:
6577 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6578 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6579 defined(TARGET_M68K)
6580 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6585 case TARGET_NR_sendfile
:
6587 #ifdef TARGET_NR_getpmsg
6588 case TARGET_NR_getpmsg
:
6591 #ifdef TARGET_NR_putpmsg
6592 case TARGET_NR_putpmsg
:
6595 #ifdef TARGET_NR_vfork
6596 case TARGET_NR_vfork
:
6597 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6601 #ifdef TARGET_NR_ugetrlimit
6602 case TARGET_NR_ugetrlimit
:
6605 ret
= get_errno(getrlimit(arg1
, &rlim
));
6606 if (!is_error(ret
)) {
6607 struct target_rlimit
*target_rlim
;
6608 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6610 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6611 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6612 unlock_user_struct(target_rlim
, arg2
, 1);
6617 #ifdef TARGET_NR_truncate64
6618 case TARGET_NR_truncate64
:
6619 if (!(p
= lock_user_string(arg1
)))
6621 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6622 unlock_user(p
, arg1
, 0);
6625 #ifdef TARGET_NR_ftruncate64
6626 case TARGET_NR_ftruncate64
:
6627 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6630 #ifdef TARGET_NR_stat64
6631 case TARGET_NR_stat64
:
6632 if (!(p
= lock_user_string(arg1
)))
6634 ret
= get_errno(stat(path(p
), &st
));
6635 unlock_user(p
, arg1
, 0);
6637 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6640 #ifdef TARGET_NR_lstat64
6641 case TARGET_NR_lstat64
:
6642 if (!(p
= lock_user_string(arg1
)))
6644 ret
= get_errno(lstat(path(p
), &st
));
6645 unlock_user(p
, arg1
, 0);
6647 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6650 #ifdef TARGET_NR_fstat64
6651 case TARGET_NR_fstat64
:
6652 ret
= get_errno(fstat(arg1
, &st
));
6654 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6657 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6658 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6659 #ifdef TARGET_NR_fstatat64
6660 case TARGET_NR_fstatat64
:
6662 #ifdef TARGET_NR_newfstatat
6663 case TARGET_NR_newfstatat
:
6665 if (!(p
= lock_user_string(arg2
)))
6667 #ifdef __NR_fstatat64
6668 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6670 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6673 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6677 case TARGET_NR_lchown
:
6678 if (!(p
= lock_user_string(arg1
)))
6680 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6681 unlock_user(p
, arg1
, 0);
6683 case TARGET_NR_getuid
:
6684 ret
= get_errno(high2lowuid(getuid()));
6686 case TARGET_NR_getgid
:
6687 ret
= get_errno(high2lowgid(getgid()));
6689 case TARGET_NR_geteuid
:
6690 ret
= get_errno(high2lowuid(geteuid()));
6692 case TARGET_NR_getegid
:
6693 ret
= get_errno(high2lowgid(getegid()));
6695 case TARGET_NR_setreuid
:
6696 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6698 case TARGET_NR_setregid
:
6699 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6701 case TARGET_NR_getgroups
:
6703 int gidsetsize
= arg1
;
6704 uint16_t *target_grouplist
;
6708 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6709 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6710 if (gidsetsize
== 0)
6712 if (!is_error(ret
)) {
6713 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6714 if (!target_grouplist
)
6716 for(i
= 0;i
< ret
; i
++)
6717 target_grouplist
[i
] = tswap16(grouplist
[i
]);
6718 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6722 case TARGET_NR_setgroups
:
6724 int gidsetsize
= arg1
;
6725 uint16_t *target_grouplist
;
6729 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6730 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6731 if (!target_grouplist
) {
6732 ret
= -TARGET_EFAULT
;
6735 for(i
= 0;i
< gidsetsize
; i
++)
6736 grouplist
[i
] = tswap16(target_grouplist
[i
]);
6737 unlock_user(target_grouplist
, arg2
, 0);
6738 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6741 case TARGET_NR_fchown
:
6742 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6744 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6745 case TARGET_NR_fchownat
:
6746 if (!(p
= lock_user_string(arg2
)))
6748 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6749 unlock_user(p
, arg2
, 0);
6752 #ifdef TARGET_NR_setresuid
6753 case TARGET_NR_setresuid
:
6754 ret
= get_errno(setresuid(low2highuid(arg1
),
6756 low2highuid(arg3
)));
6759 #ifdef TARGET_NR_getresuid
6760 case TARGET_NR_getresuid
:
6762 uid_t ruid
, euid
, suid
;
6763 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6764 if (!is_error(ret
)) {
6765 if (put_user_u16(high2lowuid(ruid
), arg1
)
6766 || put_user_u16(high2lowuid(euid
), arg2
)
6767 || put_user_u16(high2lowuid(suid
), arg3
))
6773 #ifdef TARGET_NR_getresgid
6774 case TARGET_NR_setresgid
:
6775 ret
= get_errno(setresgid(low2highgid(arg1
),
6777 low2highgid(arg3
)));
6780 #ifdef TARGET_NR_getresgid
6781 case TARGET_NR_getresgid
:
6783 gid_t rgid
, egid
, sgid
;
6784 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6785 if (!is_error(ret
)) {
6786 if (put_user_u16(high2lowgid(rgid
), arg1
)
6787 || put_user_u16(high2lowgid(egid
), arg2
)
6788 || put_user_u16(high2lowgid(sgid
), arg3
))
6794 case TARGET_NR_chown
:
6795 if (!(p
= lock_user_string(arg1
)))
6797 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6798 unlock_user(p
, arg1
, 0);
6800 case TARGET_NR_setuid
:
6801 ret
= get_errno(setuid(low2highuid(arg1
)));
6803 case TARGET_NR_setgid
:
6804 ret
= get_errno(setgid(low2highgid(arg1
)));
6806 case TARGET_NR_setfsuid
:
6807 ret
= get_errno(setfsuid(arg1
));
6809 case TARGET_NR_setfsgid
:
6810 ret
= get_errno(setfsgid(arg1
));
6812 #endif /* USE_UID16 */
6814 #ifdef TARGET_NR_lchown32
6815 case TARGET_NR_lchown32
:
6816 if (!(p
= lock_user_string(arg1
)))
6818 ret
= get_errno(lchown(p
, arg2
, arg3
));
6819 unlock_user(p
, arg1
, 0);
6822 #ifdef TARGET_NR_getuid32
6823 case TARGET_NR_getuid32
:
6824 ret
= get_errno(getuid());
6828 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6829 /* Alpha specific */
6830 case TARGET_NR_getxuid
:
6834 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6836 ret
= get_errno(getuid());
6839 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6840 /* Alpha specific */
6841 case TARGET_NR_getxgid
:
6845 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6847 ret
= get_errno(getgid());
6850 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6851 /* Alpha specific */
6852 case TARGET_NR_osf_getsysinfo
:
6853 ret
= -TARGET_EOPNOTSUPP
;
6855 case TARGET_GSI_IEEE_FP_CONTROL
:
6857 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6859 /* Copied from linux ieee_fpcr_to_swcr. */
6860 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
6861 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
6862 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
6863 | SWCR_TRAP_ENABLE_DZE
6864 | SWCR_TRAP_ENABLE_OVF
);
6865 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
6866 | SWCR_TRAP_ENABLE_INE
);
6867 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
6868 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
6870 if (put_user_u64 (swcr
, arg2
))
6876 /* case GSI_IEEE_STATE_AT_SIGNAL:
6877 -- Not implemented in linux kernel.
6879 -- Retrieves current unaligned access state; not much used.
6881 -- Retrieves implver information; surely not used.
6883 -- Grabs a copy of the HWRPB; surely not used.
6888 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
6889 /* Alpha specific */
6890 case TARGET_NR_osf_setsysinfo
:
6891 ret
= -TARGET_EOPNOTSUPP
;
6893 case TARGET_SSI_IEEE_FP_CONTROL
:
6894 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
6896 uint64_t swcr
, fpcr
, orig_fpcr
;
6898 if (get_user_u64 (swcr
, arg2
))
6900 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6901 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
6903 /* Copied from linux ieee_swcr_to_fpcr. */
6904 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
6905 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
6906 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
6907 | SWCR_TRAP_ENABLE_DZE
6908 | SWCR_TRAP_ENABLE_OVF
)) << 48;
6909 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
6910 | SWCR_TRAP_ENABLE_INE
)) << 57;
6911 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
6912 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
6914 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
6917 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
6918 /* Old exceptions are not signaled. */
6919 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
6921 /* If any exceptions set by this call, and are unmasked,
6928 /* case SSI_NVPAIRS:
6929 -- Used with SSIN_UACPROC to enable unaligned accesses.
6930 case SSI_IEEE_STATE_AT_SIGNAL:
6931 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
6932 -- Not implemented in linux kernel
6937 #ifdef TARGET_NR_osf_sigprocmask
6938 /* Alpha specific. */
6939 case TARGET_NR_osf_sigprocmask
:
6943 sigset_t set
, oldset
;
6946 case TARGET_SIG_BLOCK
:
6949 case TARGET_SIG_UNBLOCK
:
6952 case TARGET_SIG_SETMASK
:
6956 ret
= -TARGET_EINVAL
;
6960 target_to_host_old_sigset(&set
, &mask
);
6961 sigprocmask(arg1
, &set
, &oldset
);
6962 host_to_target_old_sigset(&mask
, &oldset
);
6968 #ifdef TARGET_NR_getgid32
6969 case TARGET_NR_getgid32
:
6970 ret
= get_errno(getgid());
6973 #ifdef TARGET_NR_geteuid32
6974 case TARGET_NR_geteuid32
:
6975 ret
= get_errno(geteuid());
6978 #ifdef TARGET_NR_getegid32
6979 case TARGET_NR_getegid32
:
6980 ret
= get_errno(getegid());
6983 #ifdef TARGET_NR_setreuid32
6984 case TARGET_NR_setreuid32
:
6985 ret
= get_errno(setreuid(arg1
, arg2
));
6988 #ifdef TARGET_NR_setregid32
6989 case TARGET_NR_setregid32
:
6990 ret
= get_errno(setregid(arg1
, arg2
));
6993 #ifdef TARGET_NR_getgroups32
6994 case TARGET_NR_getgroups32
:
6996 int gidsetsize
= arg1
;
6997 uint32_t *target_grouplist
;
7001 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7002 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7003 if (gidsetsize
== 0)
7005 if (!is_error(ret
)) {
7006 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7007 if (!target_grouplist
) {
7008 ret
= -TARGET_EFAULT
;
7011 for(i
= 0;i
< ret
; i
++)
7012 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7013 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7018 #ifdef TARGET_NR_setgroups32
7019 case TARGET_NR_setgroups32
:
7021 int gidsetsize
= arg1
;
7022 uint32_t *target_grouplist
;
7026 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7027 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7028 if (!target_grouplist
) {
7029 ret
= -TARGET_EFAULT
;
7032 for(i
= 0;i
< gidsetsize
; i
++)
7033 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7034 unlock_user(target_grouplist
, arg2
, 0);
7035 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7039 #ifdef TARGET_NR_fchown32
7040 case TARGET_NR_fchown32
:
7041 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7044 #ifdef TARGET_NR_setresuid32
7045 case TARGET_NR_setresuid32
:
7046 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7049 #ifdef TARGET_NR_getresuid32
7050 case TARGET_NR_getresuid32
:
7052 uid_t ruid
, euid
, suid
;
7053 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7054 if (!is_error(ret
)) {
7055 if (put_user_u32(ruid
, arg1
)
7056 || put_user_u32(euid
, arg2
)
7057 || put_user_u32(suid
, arg3
))
7063 #ifdef TARGET_NR_setresgid32
7064 case TARGET_NR_setresgid32
:
7065 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7068 #ifdef TARGET_NR_getresgid32
7069 case TARGET_NR_getresgid32
:
7071 gid_t rgid
, egid
, sgid
;
7072 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7073 if (!is_error(ret
)) {
7074 if (put_user_u32(rgid
, arg1
)
7075 || put_user_u32(egid
, arg2
)
7076 || put_user_u32(sgid
, arg3
))
7082 #ifdef TARGET_NR_chown32
7083 case TARGET_NR_chown32
:
7084 if (!(p
= lock_user_string(arg1
)))
7086 ret
= get_errno(chown(p
, arg2
, arg3
));
7087 unlock_user(p
, arg1
, 0);
7090 #ifdef TARGET_NR_setuid32
7091 case TARGET_NR_setuid32
:
7092 ret
= get_errno(setuid(arg1
));
7095 #ifdef TARGET_NR_setgid32
7096 case TARGET_NR_setgid32
:
7097 ret
= get_errno(setgid(arg1
));
7100 #ifdef TARGET_NR_setfsuid32
7101 case TARGET_NR_setfsuid32
:
7102 ret
= get_errno(setfsuid(arg1
));
7105 #ifdef TARGET_NR_setfsgid32
7106 case TARGET_NR_setfsgid32
:
7107 ret
= get_errno(setfsgid(arg1
));
7111 case TARGET_NR_pivot_root
:
7113 #ifdef TARGET_NR_mincore
7114 case TARGET_NR_mincore
:
7117 ret
= -TARGET_EFAULT
;
7118 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7120 if (!(p
= lock_user_string(arg3
)))
7122 ret
= get_errno(mincore(a
, arg2
, p
));
7123 unlock_user(p
, arg3
, ret
);
7125 unlock_user(a
, arg1
, 0);
7129 #ifdef TARGET_NR_arm_fadvise64_64
7130 case TARGET_NR_arm_fadvise64_64
:
7133 * arm_fadvise64_64 looks like fadvise64_64 but
7134 * with different argument order
7142 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7143 #ifdef TARGET_NR_fadvise64_64
7144 case TARGET_NR_fadvise64_64
:
7146 #ifdef TARGET_NR_fadvise64
7147 case TARGET_NR_fadvise64
:
7151 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7152 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7153 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7154 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7158 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7161 #ifdef TARGET_NR_madvise
7162 case TARGET_NR_madvise
:
7163 /* A straight passthrough may not be safe because qemu sometimes
7164 turns private flie-backed mappings into anonymous mappings.
7165 This will break MADV_DONTNEED.
7166 This is a hint, so ignoring and returning success is ok. */
7170 #if TARGET_ABI_BITS == 32
7171 case TARGET_NR_fcntl64
:
7175 struct target_flock64
*target_fl
;
7177 struct target_eabi_flock64
*target_efl
;
7180 cmd
= target_to_host_fcntl_cmd(arg2
);
7181 if (cmd
== -TARGET_EINVAL
)
7185 case TARGET_F_GETLK64
:
7187 if (((CPUARMState
*)cpu_env
)->eabi
) {
7188 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7190 fl
.l_type
= tswap16(target_efl
->l_type
);
7191 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7192 fl
.l_start
= tswap64(target_efl
->l_start
);
7193 fl
.l_len
= tswap64(target_efl
->l_len
);
7194 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7195 unlock_user_struct(target_efl
, arg3
, 0);
7199 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7201 fl
.l_type
= tswap16(target_fl
->l_type
);
7202 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7203 fl
.l_start
= tswap64(target_fl
->l_start
);
7204 fl
.l_len
= tswap64(target_fl
->l_len
);
7205 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7206 unlock_user_struct(target_fl
, arg3
, 0);
7208 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7211 if (((CPUARMState
*)cpu_env
)->eabi
) {
7212 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7214 target_efl
->l_type
= tswap16(fl
.l_type
);
7215 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7216 target_efl
->l_start
= tswap64(fl
.l_start
);
7217 target_efl
->l_len
= tswap64(fl
.l_len
);
7218 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7219 unlock_user_struct(target_efl
, arg3
, 1);
7223 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7225 target_fl
->l_type
= tswap16(fl
.l_type
);
7226 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7227 target_fl
->l_start
= tswap64(fl
.l_start
);
7228 target_fl
->l_len
= tswap64(fl
.l_len
);
7229 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7230 unlock_user_struct(target_fl
, arg3
, 1);
7235 case TARGET_F_SETLK64
:
7236 case TARGET_F_SETLKW64
:
7238 if (((CPUARMState
*)cpu_env
)->eabi
) {
7239 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7241 fl
.l_type
= tswap16(target_efl
->l_type
);
7242 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7243 fl
.l_start
= tswap64(target_efl
->l_start
);
7244 fl
.l_len
= tswap64(target_efl
->l_len
);
7245 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7246 unlock_user_struct(target_efl
, arg3
, 0);
7250 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7252 fl
.l_type
= tswap16(target_fl
->l_type
);
7253 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7254 fl
.l_start
= tswap64(target_fl
->l_start
);
7255 fl
.l_len
= tswap64(target_fl
->l_len
);
7256 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7257 unlock_user_struct(target_fl
, arg3
, 0);
7259 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7262 ret
= do_fcntl(arg1
, arg2
, arg3
);
7268 #ifdef TARGET_NR_cacheflush
7269 case TARGET_NR_cacheflush
:
7270 /* self-modifying code is handled automatically, so nothing needed */
7274 #ifdef TARGET_NR_security
7275 case TARGET_NR_security
:
7278 #ifdef TARGET_NR_getpagesize
7279 case TARGET_NR_getpagesize
:
7280 ret
= TARGET_PAGE_SIZE
;
7283 case TARGET_NR_gettid
:
7284 ret
= get_errno(gettid());
7286 #ifdef TARGET_NR_readahead
7287 case TARGET_NR_readahead
:
7288 #if TARGET_ABI_BITS == 32
7290 if (((CPUARMState
*)cpu_env
)->eabi
)
7297 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7299 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7303 #ifdef TARGET_NR_setxattr
7304 case TARGET_NR_setxattr
:
7305 case TARGET_NR_lsetxattr
:
7306 case TARGET_NR_fsetxattr
:
7307 case TARGET_NR_getxattr
:
7308 case TARGET_NR_lgetxattr
:
7309 case TARGET_NR_fgetxattr
:
7310 case TARGET_NR_listxattr
:
7311 case TARGET_NR_llistxattr
:
7312 case TARGET_NR_flistxattr
:
7313 case TARGET_NR_removexattr
:
7314 case TARGET_NR_lremovexattr
:
7315 case TARGET_NR_fremovexattr
:
7316 ret
= -TARGET_EOPNOTSUPP
;
7319 #ifdef TARGET_NR_set_thread_area
7320 case TARGET_NR_set_thread_area
:
7321 #if defined(TARGET_MIPS)
7322 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7325 #elif defined(TARGET_CRIS)
7327 ret
= -TARGET_EINVAL
;
7329 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7333 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7334 ret
= do_set_thread_area(cpu_env
, arg1
);
7337 goto unimplemented_nowarn
;
7340 #ifdef TARGET_NR_get_thread_area
7341 case TARGET_NR_get_thread_area
:
7342 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7343 ret
= do_get_thread_area(cpu_env
, arg1
);
7345 goto unimplemented_nowarn
;
7348 #ifdef TARGET_NR_getdomainname
7349 case TARGET_NR_getdomainname
:
7350 goto unimplemented_nowarn
;
7353 #ifdef TARGET_NR_clock_gettime
7354 case TARGET_NR_clock_gettime
:
7357 ret
= get_errno(clock_gettime(arg1
, &ts
));
7358 if (!is_error(ret
)) {
7359 host_to_target_timespec(arg2
, &ts
);
7364 #ifdef TARGET_NR_clock_getres
7365 case TARGET_NR_clock_getres
:
7368 ret
= get_errno(clock_getres(arg1
, &ts
));
7369 if (!is_error(ret
)) {
7370 host_to_target_timespec(arg2
, &ts
);
7375 #ifdef TARGET_NR_clock_nanosleep
7376 case TARGET_NR_clock_nanosleep
:
7379 target_to_host_timespec(&ts
, arg3
);
7380 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7382 host_to_target_timespec(arg4
, &ts
);
7387 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7388 case TARGET_NR_set_tid_address
:
7389 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7393 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7394 case TARGET_NR_tkill
:
7395 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7399 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7400 case TARGET_NR_tgkill
:
7401 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7402 target_to_host_signal(arg3
)));
7406 #ifdef TARGET_NR_set_robust_list
7407 case TARGET_NR_set_robust_list
:
7408 goto unimplemented_nowarn
;
7411 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7412 case TARGET_NR_utimensat
:
7414 struct timespec
*tsp
, ts
[2];
7418 target_to_host_timespec(ts
, arg3
);
7419 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7423 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7425 if (!(p
= lock_user_string(arg2
))) {
7426 ret
= -TARGET_EFAULT
;
7429 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7430 unlock_user(p
, arg2
, 0);
7435 #if defined(CONFIG_USE_NPTL)
7436 case TARGET_NR_futex
:
7437 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7440 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7441 case TARGET_NR_inotify_init
:
7442 ret
= get_errno(sys_inotify_init());
7445 #ifdef CONFIG_INOTIFY1
7446 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7447 case TARGET_NR_inotify_init1
:
7448 ret
= get_errno(sys_inotify_init1(arg1
));
7452 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7453 case TARGET_NR_inotify_add_watch
:
7454 p
= lock_user_string(arg2
);
7455 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7456 unlock_user(p
, arg2
, 0);
7459 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7460 case TARGET_NR_inotify_rm_watch
:
7461 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7465 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7466 case TARGET_NR_mq_open
:
7468 struct mq_attr posix_mq_attr
;
7470 p
= lock_user_string(arg1
- 1);
7472 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7473 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7474 unlock_user (p
, arg1
, 0);
7478 case TARGET_NR_mq_unlink
:
7479 p
= lock_user_string(arg1
- 1);
7480 ret
= get_errno(mq_unlink(p
));
7481 unlock_user (p
, arg1
, 0);
7484 case TARGET_NR_mq_timedsend
:
7488 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7490 target_to_host_timespec(&ts
, arg5
);
7491 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7492 host_to_target_timespec(arg5
, &ts
);
7495 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7496 unlock_user (p
, arg2
, arg3
);
7500 case TARGET_NR_mq_timedreceive
:
7505 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7507 target_to_host_timespec(&ts
, arg5
);
7508 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7509 host_to_target_timespec(arg5
, &ts
);
7512 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7513 unlock_user (p
, arg2
, arg3
);
7515 put_user_u32(prio
, arg4
);
7519 /* Not implemented for now... */
7520 /* case TARGET_NR_mq_notify: */
7523 case TARGET_NR_mq_getsetattr
:
7525 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7528 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7529 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7532 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7533 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7540 #ifdef CONFIG_SPLICE
7541 #ifdef TARGET_NR_tee
7544 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7548 #ifdef TARGET_NR_splice
7549 case TARGET_NR_splice
:
7551 loff_t loff_in
, loff_out
;
7552 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7554 get_user_u64(loff_in
, arg2
);
7555 ploff_in
= &loff_in
;
7558 get_user_u64(loff_out
, arg2
);
7559 ploff_out
= &loff_out
;
7561 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7565 #ifdef TARGET_NR_vmsplice
7566 case TARGET_NR_vmsplice
:
7571 vec
= alloca(count
* sizeof(struct iovec
));
7572 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7574 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7575 unlock_iovec(vec
, arg2
, count
, 0);
7579 #endif /* CONFIG_SPLICE */
7580 #ifdef CONFIG_EVENTFD
7581 #if defined(TARGET_NR_eventfd)
7582 case TARGET_NR_eventfd
:
7583 ret
= get_errno(eventfd(arg1
, 0));
7586 #if defined(TARGET_NR_eventfd2)
7587 case TARGET_NR_eventfd2
:
7588 ret
= get_errno(eventfd(arg1
, arg2
));
7591 #endif /* CONFIG_EVENTFD */
7592 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7593 case TARGET_NR_fallocate
:
7594 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7597 #if defined(CONFIG_SYNC_FILE_RANGE)
7598 #if defined(TARGET_NR_sync_file_range)
7599 case TARGET_NR_sync_file_range
:
7600 #if TARGET_ABI_BITS == 32
7601 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
7602 target_offset64(arg4
, arg5
), arg6
));
7604 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
7608 #if defined(TARGET_NR_sync_file_range2)
7609 case TARGET_NR_sync_file_range2
:
7610 /* This is like sync_file_range but the arguments are reordered */
7611 #if TARGET_ABI_BITS == 32
7612 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7613 target_offset64(arg5
, arg6
), arg2
));
7615 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
7620 #if defined(CONFIG_EPOLL)
7621 #if defined(TARGET_NR_epoll_create)
7622 case TARGET_NR_epoll_create
:
7623 ret
= get_errno(epoll_create(arg1
));
7626 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7627 case TARGET_NR_epoll_create1
:
7628 ret
= get_errno(epoll_create1(arg1
));
7631 #if defined(TARGET_NR_epoll_ctl)
7632 case TARGET_NR_epoll_ctl
:
7634 struct epoll_event ep
;
7635 struct epoll_event
*epp
= 0;
7637 struct target_epoll_event
*target_ep
;
7638 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
7641 ep
.events
= tswap32(target_ep
->events
);
7642 /* The epoll_data_t union is just opaque data to the kernel,
7643 * so we transfer all 64 bits across and need not worry what
7644 * actual data type it is.
7646 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
7647 unlock_user_struct(target_ep
, arg4
, 0);
7650 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
7655 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7656 #define IMPLEMENT_EPOLL_PWAIT
7658 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7659 #if defined(TARGET_NR_epoll_wait)
7660 case TARGET_NR_epoll_wait
:
7662 #if defined(IMPLEMENT_EPOLL_PWAIT)
7663 case TARGET_NR_epoll_pwait
:
7666 struct target_epoll_event
*target_ep
;
7667 struct epoll_event
*ep
;
7669 int maxevents
= arg3
;
7672 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
7673 maxevents
* sizeof(struct target_epoll_event
), 1);
7678 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
7681 #if defined(IMPLEMENT_EPOLL_PWAIT)
7682 case TARGET_NR_epoll_pwait
:
7684 target_sigset_t
*target_set
;
7685 sigset_t _set
, *set
= &_set
;
7688 target_set
= lock_user(VERIFY_READ
, arg5
,
7689 sizeof(target_sigset_t
), 1);
7691 unlock_user(target_ep
, arg2
, 0);
7694 target_to_host_sigset(set
, target_set
);
7695 unlock_user(target_set
, arg5
, 0);
7700 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
7704 #if defined(TARGET_NR_epoll_wait)
7705 case TARGET_NR_epoll_wait
:
7706 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
7710 ret
= -TARGET_ENOSYS
;
7712 if (!is_error(ret
)) {
7714 for (i
= 0; i
< ret
; i
++) {
7715 target_ep
[i
].events
= tswap32(ep
[i
].events
);
7716 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
7719 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
7726 gemu_log("qemu: Unsupported syscall: %d\n", num
);
7727 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7728 unimplemented_nowarn
:
7730 ret
= -TARGET_ENOSYS
;
7735 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
7738 print_syscall_ret(num
, ret
);
7741 ret
= -TARGET_EFAULT
;