4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <qemu-common.h>
67 #include <sys/eventfd.h>
70 #define termios host_termios
71 #define winsize host_winsize
72 #define termio host_termio
73 #define sgttyb host_sgttyb /* same as target */
74 #define tchars host_tchars /* same as target */
75 #define ltchars host_ltchars /* same as target */
77 #include <linux/termios.h>
78 #include <linux/unistd.h>
79 #include <linux/utsname.h>
80 #include <linux/cdrom.h>
81 #include <linux/hdreg.h>
82 #include <linux/soundcard.h>
84 #include <linux/mtio.h>
86 #if defined(CONFIG_FIEMAP)
87 #include <linux/fiemap.h>
91 #include "linux_loop.h"
92 #include "cpu-uname.h"
95 #include "qemu-common.h"
97 #if defined(CONFIG_USE_NPTL)
98 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
99 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
101 /* XXX: Hardcode the above values. */
102 #define CLONE_NPTL_FLAGS2 0
107 //#include <linux/msdos_fs.h>
108 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
109 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
120 #define _syscall0(type,name) \
121 static type name (void) \
123 return syscall(__NR_##name); \
126 #define _syscall1(type,name,type1,arg1) \
127 static type name (type1 arg1) \
129 return syscall(__NR_##name, arg1); \
132 #define _syscall2(type,name,type1,arg1,type2,arg2) \
133 static type name (type1 arg1,type2 arg2) \
135 return syscall(__NR_##name, arg1, arg2); \
138 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
139 static type name (type1 arg1,type2 arg2,type3 arg3) \
141 return syscall(__NR_##name, arg1, arg2, arg3); \
144 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
145 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
147 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
150 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
158 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
159 type5,arg5,type6,arg6) \
160 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
167 #define __NR_sys_uname __NR_uname
168 #define __NR_sys_faccessat __NR_faccessat
169 #define __NR_sys_fchmodat __NR_fchmodat
170 #define __NR_sys_fchownat __NR_fchownat
171 #define __NR_sys_fstatat64 __NR_fstatat64
172 #define __NR_sys_futimesat __NR_futimesat
173 #define __NR_sys_getcwd1 __NR_getcwd
174 #define __NR_sys_getdents __NR_getdents
175 #define __NR_sys_getdents64 __NR_getdents64
176 #define __NR_sys_getpriority __NR_getpriority
177 #define __NR_sys_linkat __NR_linkat
178 #define __NR_sys_mkdirat __NR_mkdirat
179 #define __NR_sys_mknodat __NR_mknodat
180 #define __NR_sys_newfstatat __NR_newfstatat
181 #define __NR_sys_openat __NR_openat
182 #define __NR_sys_readlinkat __NR_readlinkat
183 #define __NR_sys_renameat __NR_renameat
184 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
185 #define __NR_sys_symlinkat __NR_symlinkat
186 #define __NR_sys_syslog __NR_syslog
187 #define __NR_sys_tgkill __NR_tgkill
188 #define __NR_sys_tkill __NR_tkill
189 #define __NR_sys_unlinkat __NR_unlinkat
190 #define __NR_sys_utimensat __NR_utimensat
191 #define __NR_sys_futex __NR_futex
192 #define __NR_sys_inotify_init __NR_inotify_init
193 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
194 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
196 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
197 #define __NR__llseek __NR_lseek
201 _syscall0(int, gettid
)
203 /* This is a replacement for the host gettid() and must return a host
205 static int gettid(void) {
209 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
210 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
211 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
213 _syscall2(int, sys_getpriority
, int, which
, int, who
);
214 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
215 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
216 loff_t
*, res
, uint
, wh
);
218 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
219 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
220 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
221 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
223 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
224 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
226 #ifdef __NR_exit_group
227 _syscall1(int,exit_group
,int,error_code
)
229 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
230 _syscall1(int,set_tid_address
,int *,tidptr
)
232 #if defined(CONFIG_USE_NPTL)
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
235 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
238 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
239 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
240 unsigned long *, user_mask_ptr
);
241 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
242 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
243 unsigned long *, user_mask_ptr
);
245 static bitmask_transtbl fcntl_flags_tbl
[] = {
246 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
247 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
248 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
249 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
250 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
251 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
252 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
253 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
254 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
255 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
256 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
257 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
258 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
259 #if defined(O_DIRECT)
260 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
265 #define COPY_UTSNAME_FIELD(dest, src) \
267 /* __NEW_UTS_LEN doesn't include terminating null */ \
268 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
269 (dest)[__NEW_UTS_LEN] = '\0'; \
272 static int sys_uname(struct new_utsname
*buf
)
274 struct utsname uts_buf
;
276 if (uname(&uts_buf
) < 0)
280 * Just in case these have some differences, we
281 * translate utsname to new_utsname (which is the
282 * struct linux kernel uses).
285 bzero(buf
, sizeof (*buf
));
286 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
287 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
288 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
289 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
290 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
292 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
296 #undef COPY_UTSNAME_FIELD
299 static int sys_getcwd1(char *buf
, size_t size
)
301 if (getcwd(buf
, size
) == NULL
) {
302 /* getcwd() sets errno */
305 return strlen(buf
)+1;
310 * Host system seems to have atfile syscall stubs available. We
311 * now enable them one by one as specified by target syscall_nr.h.
314 #ifdef TARGET_NR_faccessat
315 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
317 return (faccessat(dirfd
, pathname
, mode
, 0));
320 #ifdef TARGET_NR_fchmodat
321 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
323 return (fchmodat(dirfd
, pathname
, mode
, 0));
326 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
327 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
328 gid_t group
, int flags
)
330 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
333 #ifdef __NR_fstatat64
334 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
337 return (fstatat(dirfd
, pathname
, buf
, flags
));
340 #ifdef __NR_newfstatat
341 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
344 return (fstatat(dirfd
, pathname
, buf
, flags
));
347 #ifdef TARGET_NR_futimesat
348 static int sys_futimesat(int dirfd
, const char *pathname
,
349 const struct timeval times
[2])
351 return (futimesat(dirfd
, pathname
, times
));
354 #ifdef TARGET_NR_linkat
355 static int sys_linkat(int olddirfd
, const char *oldpath
,
356 int newdirfd
, const char *newpath
, int flags
)
358 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
361 #ifdef TARGET_NR_mkdirat
362 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
364 return (mkdirat(dirfd
, pathname
, mode
));
367 #ifdef TARGET_NR_mknodat
368 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
371 return (mknodat(dirfd
, pathname
, mode
, dev
));
374 #ifdef TARGET_NR_openat
375 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
378 * open(2) has extra parameter 'mode' when called with
381 if ((flags
& O_CREAT
) != 0) {
386 * Get the 'mode' parameter and translate it to
390 mode
= va_arg(ap
, mode_t
);
391 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
394 return (openat(dirfd
, pathname
, flags
, mode
));
396 return (openat(dirfd
, pathname
, flags
));
399 #ifdef TARGET_NR_readlinkat
400 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
402 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
405 #ifdef TARGET_NR_renameat
406 static int sys_renameat(int olddirfd
, const char *oldpath
,
407 int newdirfd
, const char *newpath
)
409 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
412 #ifdef TARGET_NR_symlinkat
413 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
415 return (symlinkat(oldpath
, newdirfd
, newpath
));
418 #ifdef TARGET_NR_unlinkat
419 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
421 return (unlinkat(dirfd
, pathname
, flags
));
424 #else /* !CONFIG_ATFILE */
427 * Try direct syscalls instead
429 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
430 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
432 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
433 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
435 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
436 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
437 uid_t
,owner
,gid_t
,group
,int,flags
)
439 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
440 defined(__NR_fstatat64)
441 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
442 struct stat
*,buf
,int,flags
)
444 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
445 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
446 const struct timeval
*,times
)
448 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
449 defined(__NR_newfstatat)
450 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
451 struct stat
*,buf
,int,flags
)
453 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
454 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
455 int,newdirfd
,const char *,newpath
,int,flags
)
457 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
458 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
460 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
461 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
462 mode_t
,mode
,dev_t
,dev
)
464 #if defined(TARGET_NR_openat) && defined(__NR_openat)
465 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
467 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
468 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
469 char *,buf
,size_t,bufsize
)
471 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
472 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
473 int,newdirfd
,const char *,newpath
)
475 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
476 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
477 int,newdirfd
,const char *,newpath
)
479 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
480 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
483 #endif /* CONFIG_ATFILE */
485 #ifdef CONFIG_UTIMENSAT
486 static int sys_utimensat(int dirfd
, const char *pathname
,
487 const struct timespec times
[2], int flags
)
489 if (pathname
== NULL
)
490 return futimens(dirfd
, times
);
492 return utimensat(dirfd
, pathname
, times
, flags
);
495 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
496 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
497 const struct timespec
*,tsp
,int,flags
)
499 #endif /* CONFIG_UTIMENSAT */
501 #ifdef CONFIG_INOTIFY
502 #include <sys/inotify.h>
504 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
505 static int sys_inotify_init(void)
507 return (inotify_init());
510 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
511 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
513 return (inotify_add_watch(fd
, pathname
, mask
));
516 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
517 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
519 return (inotify_rm_watch(fd
, wd
));
522 #ifdef CONFIG_INOTIFY1
523 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
524 static int sys_inotify_init1(int flags
)
526 return (inotify_init1(flags
));
531 /* Userspace can usually survive runtime without inotify */
532 #undef TARGET_NR_inotify_init
533 #undef TARGET_NR_inotify_init1
534 #undef TARGET_NR_inotify_add_watch
535 #undef TARGET_NR_inotify_rm_watch
536 #endif /* CONFIG_INOTIFY */
538 #if defined(TARGET_NR_ppoll)
540 # define __NR_ppoll -1
542 #define __NR_sys_ppoll __NR_ppoll
543 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
544 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
548 extern int personality(int);
549 extern int flock(int, int);
550 extern int setfsuid(int);
551 extern int setfsgid(int);
552 extern int setgroups(int, gid_t
*);
554 #define ERRNO_TABLE_SIZE 1200
556 /* target_to_host_errno_table[] is initialized from
557 * host_to_target_errno_table[] in syscall_init(). */
558 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
562 * This list is the union of errno values overridden in asm-<arch>/errno.h
563 * minus the errnos that are not actually generic to all archs.
565 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
566 [EIDRM
] = TARGET_EIDRM
,
567 [ECHRNG
] = TARGET_ECHRNG
,
568 [EL2NSYNC
] = TARGET_EL2NSYNC
,
569 [EL3HLT
] = TARGET_EL3HLT
,
570 [EL3RST
] = TARGET_EL3RST
,
571 [ELNRNG
] = TARGET_ELNRNG
,
572 [EUNATCH
] = TARGET_EUNATCH
,
573 [ENOCSI
] = TARGET_ENOCSI
,
574 [EL2HLT
] = TARGET_EL2HLT
,
575 [EDEADLK
] = TARGET_EDEADLK
,
576 [ENOLCK
] = TARGET_ENOLCK
,
577 [EBADE
] = TARGET_EBADE
,
578 [EBADR
] = TARGET_EBADR
,
579 [EXFULL
] = TARGET_EXFULL
,
580 [ENOANO
] = TARGET_ENOANO
,
581 [EBADRQC
] = TARGET_EBADRQC
,
582 [EBADSLT
] = TARGET_EBADSLT
,
583 [EBFONT
] = TARGET_EBFONT
,
584 [ENOSTR
] = TARGET_ENOSTR
,
585 [ENODATA
] = TARGET_ENODATA
,
586 [ETIME
] = TARGET_ETIME
,
587 [ENOSR
] = TARGET_ENOSR
,
588 [ENONET
] = TARGET_ENONET
,
589 [ENOPKG
] = TARGET_ENOPKG
,
590 [EREMOTE
] = TARGET_EREMOTE
,
591 [ENOLINK
] = TARGET_ENOLINK
,
592 [EADV
] = TARGET_EADV
,
593 [ESRMNT
] = TARGET_ESRMNT
,
594 [ECOMM
] = TARGET_ECOMM
,
595 [EPROTO
] = TARGET_EPROTO
,
596 [EDOTDOT
] = TARGET_EDOTDOT
,
597 [EMULTIHOP
] = TARGET_EMULTIHOP
,
598 [EBADMSG
] = TARGET_EBADMSG
,
599 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
600 [EOVERFLOW
] = TARGET_EOVERFLOW
,
601 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
602 [EBADFD
] = TARGET_EBADFD
,
603 [EREMCHG
] = TARGET_EREMCHG
,
604 [ELIBACC
] = TARGET_ELIBACC
,
605 [ELIBBAD
] = TARGET_ELIBBAD
,
606 [ELIBSCN
] = TARGET_ELIBSCN
,
607 [ELIBMAX
] = TARGET_ELIBMAX
,
608 [ELIBEXEC
] = TARGET_ELIBEXEC
,
609 [EILSEQ
] = TARGET_EILSEQ
,
610 [ENOSYS
] = TARGET_ENOSYS
,
611 [ELOOP
] = TARGET_ELOOP
,
612 [ERESTART
] = TARGET_ERESTART
,
613 [ESTRPIPE
] = TARGET_ESTRPIPE
,
614 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
615 [EUSERS
] = TARGET_EUSERS
,
616 [ENOTSOCK
] = TARGET_ENOTSOCK
,
617 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
618 [EMSGSIZE
] = TARGET_EMSGSIZE
,
619 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
620 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
621 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
622 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
623 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
624 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
625 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
626 [EADDRINUSE
] = TARGET_EADDRINUSE
,
627 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
628 [ENETDOWN
] = TARGET_ENETDOWN
,
629 [ENETUNREACH
] = TARGET_ENETUNREACH
,
630 [ENETRESET
] = TARGET_ENETRESET
,
631 [ECONNABORTED
] = TARGET_ECONNABORTED
,
632 [ECONNRESET
] = TARGET_ECONNRESET
,
633 [ENOBUFS
] = TARGET_ENOBUFS
,
634 [EISCONN
] = TARGET_EISCONN
,
635 [ENOTCONN
] = TARGET_ENOTCONN
,
636 [EUCLEAN
] = TARGET_EUCLEAN
,
637 [ENOTNAM
] = TARGET_ENOTNAM
,
638 [ENAVAIL
] = TARGET_ENAVAIL
,
639 [EISNAM
] = TARGET_EISNAM
,
640 [EREMOTEIO
] = TARGET_EREMOTEIO
,
641 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
642 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
643 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
644 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
645 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
646 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
647 [EALREADY
] = TARGET_EALREADY
,
648 [EINPROGRESS
] = TARGET_EINPROGRESS
,
649 [ESTALE
] = TARGET_ESTALE
,
650 [ECANCELED
] = TARGET_ECANCELED
,
651 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
652 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
654 [ENOKEY
] = TARGET_ENOKEY
,
657 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
660 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
663 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
666 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
668 #ifdef ENOTRECOVERABLE
669 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
673 static inline int host_to_target_errno(int err
)
675 if(host_to_target_errno_table
[err
])
676 return host_to_target_errno_table
[err
];
680 static inline int target_to_host_errno(int err
)
682 if (target_to_host_errno_table
[err
])
683 return target_to_host_errno_table
[err
];
687 static inline abi_long
get_errno(abi_long ret
)
690 return -host_to_target_errno(errno
);
695 static inline int is_error(abi_long ret
)
697 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
700 char *target_strerror(int err
)
702 return strerror(target_to_host_errno(err
));
705 static abi_ulong target_brk
;
706 static abi_ulong target_original_brk
;
708 void target_set_brk(abi_ulong new_brk
)
710 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
713 /* do_brk() must return target values and target errnos. */
714 abi_long
do_brk(abi_ulong new_brk
)
717 abi_long mapped_addr
;
722 if (new_brk
< target_original_brk
)
725 brk_page
= HOST_PAGE_ALIGN(target_brk
);
727 /* If the new brk is less than this, set it and we're done... */
728 if (new_brk
< brk_page
) {
729 target_brk
= new_brk
;
733 /* We need to allocate more memory after the brk... */
734 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
735 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
736 PROT_READ
|PROT_WRITE
,
737 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
739 #if defined(TARGET_ALPHA)
740 /* We (partially) emulate OSF/1 on Alpha, which requires we
741 return a proper errno, not an unchanged brk value. */
742 if (is_error(mapped_addr
)) {
743 return -TARGET_ENOMEM
;
747 if (!is_error(mapped_addr
)) {
748 target_brk
= new_brk
;
753 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
754 abi_ulong target_fds_addr
,
758 abi_ulong b
, *target_fds
;
760 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
761 if (!(target_fds
= lock_user(VERIFY_READ
,
763 sizeof(abi_ulong
) * nw
,
765 return -TARGET_EFAULT
;
769 for (i
= 0; i
< nw
; i
++) {
770 /* grab the abi_ulong */
771 __get_user(b
, &target_fds
[i
]);
772 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
773 /* check the bit inside the abi_ulong */
780 unlock_user(target_fds
, target_fds_addr
, 0);
785 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
791 abi_ulong
*target_fds
;
793 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
794 if (!(target_fds
= lock_user(VERIFY_WRITE
,
796 sizeof(abi_ulong
) * nw
,
798 return -TARGET_EFAULT
;
801 for (i
= 0; i
< nw
; i
++) {
803 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
804 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
807 __put_user(v
, &target_fds
[i
]);
810 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
815 #if defined(__alpha__)
821 static inline abi_long
host_to_target_clock_t(long ticks
)
823 #if HOST_HZ == TARGET_HZ
826 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
830 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
831 const struct rusage
*rusage
)
833 struct target_rusage
*target_rusage
;
835 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
836 return -TARGET_EFAULT
;
837 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
838 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
839 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
840 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
841 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
842 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
843 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
844 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
845 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
846 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
847 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
848 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
849 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
850 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
851 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
852 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
853 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
854 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
855 unlock_user_struct(target_rusage
, target_addr
, 1);
860 static inline rlim_t
target_to_host_rlim(target_ulong target_rlim
)
862 if (target_rlim
== TARGET_RLIM_INFINITY
)
863 return RLIM_INFINITY
;
865 return tswapl(target_rlim
);
868 static inline target_ulong
host_to_target_rlim(rlim_t rlim
)
870 if (rlim
== RLIM_INFINITY
|| rlim
!= (target_long
)rlim
)
871 return TARGET_RLIM_INFINITY
;
876 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
877 abi_ulong target_tv_addr
)
879 struct target_timeval
*target_tv
;
881 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
882 return -TARGET_EFAULT
;
884 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
885 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
887 unlock_user_struct(target_tv
, target_tv_addr
, 0);
892 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
893 const struct timeval
*tv
)
895 struct target_timeval
*target_tv
;
897 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
898 return -TARGET_EFAULT
;
900 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
901 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
903 unlock_user_struct(target_tv
, target_tv_addr
, 1);
908 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
911 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
912 abi_ulong target_mq_attr_addr
)
914 struct target_mq_attr
*target_mq_attr
;
916 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
917 target_mq_attr_addr
, 1))
918 return -TARGET_EFAULT
;
920 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
921 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
922 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
923 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
925 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
930 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
931 const struct mq_attr
*attr
)
933 struct target_mq_attr
*target_mq_attr
;
935 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
936 target_mq_attr_addr
, 0))
937 return -TARGET_EFAULT
;
939 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
940 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
941 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
942 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
944 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
950 /* do_select() must return target values and target errnos. */
951 static abi_long
do_select(int n
,
952 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
953 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
955 fd_set rfds
, wfds
, efds
;
956 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
957 struct timeval tv
, *tv_ptr
;
961 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
962 return -TARGET_EFAULT
;
968 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
969 return -TARGET_EFAULT
;
975 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
976 return -TARGET_EFAULT
;
982 if (target_tv_addr
) {
983 if (copy_from_user_timeval(&tv
, target_tv_addr
))
984 return -TARGET_EFAULT
;
990 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
992 if (!is_error(ret
)) {
993 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
994 return -TARGET_EFAULT
;
995 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
996 return -TARGET_EFAULT
;
997 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
998 return -TARGET_EFAULT
;
1000 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1001 return -TARGET_EFAULT
;
1007 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1010 return pipe2(host_pipe
, flags
);
1016 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1017 int flags
, int is_pipe2
)
1021 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1024 return get_errno(ret
);
1026 /* Several targets have special calling conventions for the original
1027 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1029 #if defined(TARGET_ALPHA)
1030 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1031 return host_pipe
[0];
1032 #elif defined(TARGET_MIPS)
1033 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1034 return host_pipe
[0];
1035 #elif defined(TARGET_SH4)
1036 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1037 return host_pipe
[0];
1041 if (put_user_s32(host_pipe
[0], pipedes
)
1042 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1043 return -TARGET_EFAULT
;
1044 return get_errno(ret
);
1047 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1048 abi_ulong target_addr
,
1051 struct target_ip_mreqn
*target_smreqn
;
1053 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1055 return -TARGET_EFAULT
;
1056 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1057 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1058 if (len
== sizeof(struct target_ip_mreqn
))
1059 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
1060 unlock_user(target_smreqn
, target_addr
, 0);
1065 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1066 abi_ulong target_addr
,
1069 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1070 sa_family_t sa_family
;
1071 struct target_sockaddr
*target_saddr
;
1073 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1075 return -TARGET_EFAULT
;
1077 sa_family
= tswap16(target_saddr
->sa_family
);
1079 /* Oops. The caller might send a incomplete sun_path; sun_path
1080 * must be terminated by \0 (see the manual page), but
1081 * unfortunately it is quite common to specify sockaddr_un
1082 * length as "strlen(x->sun_path)" while it should be
1083 * "strlen(...) + 1". We'll fix that here if needed.
1084 * Linux kernel has a similar feature.
1087 if (sa_family
== AF_UNIX
) {
1088 if (len
< unix_maxlen
&& len
> 0) {
1089 char *cp
= (char*)target_saddr
;
1091 if ( cp
[len
-1] && !cp
[len
] )
1094 if (len
> unix_maxlen
)
1098 memcpy(addr
, target_saddr
, len
);
1099 addr
->sa_family
= sa_family
;
1100 unlock_user(target_saddr
, target_addr
, 0);
1105 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1106 struct sockaddr
*addr
,
1109 struct target_sockaddr
*target_saddr
;
1111 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1113 return -TARGET_EFAULT
;
1114 memcpy(target_saddr
, addr
, len
);
1115 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1116 unlock_user(target_saddr
, target_addr
, len
);
1121 /* ??? Should this also swap msgh->name? */
1122 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1123 struct target_msghdr
*target_msgh
)
1125 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1126 abi_long msg_controllen
;
1127 abi_ulong target_cmsg_addr
;
1128 struct target_cmsghdr
*target_cmsg
;
1129 socklen_t space
= 0;
1131 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1132 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1134 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1135 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1137 return -TARGET_EFAULT
;
1139 while (cmsg
&& target_cmsg
) {
1140 void *data
= CMSG_DATA(cmsg
);
1141 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1143 int len
= tswapl(target_cmsg
->cmsg_len
)
1144 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1146 space
+= CMSG_SPACE(len
);
1147 if (space
> msgh
->msg_controllen
) {
1148 space
-= CMSG_SPACE(len
);
1149 gemu_log("Host cmsg overflow\n");
1153 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1154 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1155 cmsg
->cmsg_len
= CMSG_LEN(len
);
1157 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1158 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1159 memcpy(data
, target_data
, len
);
1161 int *fd
= (int *)data
;
1162 int *target_fd
= (int *)target_data
;
1163 int i
, numfds
= len
/ sizeof(int);
1165 for (i
= 0; i
< numfds
; i
++)
1166 fd
[i
] = tswap32(target_fd
[i
]);
1169 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1170 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1172 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1174 msgh
->msg_controllen
= space
;
1178 /* ??? Should this also swap msgh->name? */
1179 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1180 struct msghdr
*msgh
)
1182 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1183 abi_long msg_controllen
;
1184 abi_ulong target_cmsg_addr
;
1185 struct target_cmsghdr
*target_cmsg
;
1186 socklen_t space
= 0;
1188 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1189 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1191 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1192 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1194 return -TARGET_EFAULT
;
1196 while (cmsg
&& target_cmsg
) {
1197 void *data
= CMSG_DATA(cmsg
);
1198 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1200 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1202 space
+= TARGET_CMSG_SPACE(len
);
1203 if (space
> msg_controllen
) {
1204 space
-= TARGET_CMSG_SPACE(len
);
1205 gemu_log("Target cmsg overflow\n");
1209 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1210 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1211 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1213 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1214 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1215 memcpy(target_data
, data
, len
);
1217 int *fd
= (int *)data
;
1218 int *target_fd
= (int *)target_data
;
1219 int i
, numfds
= len
/ sizeof(int);
1221 for (i
= 0; i
< numfds
; i
++)
1222 target_fd
[i
] = tswap32(fd
[i
]);
1225 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1226 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1228 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1230 target_msgh
->msg_controllen
= tswapl(space
);
1234 /* do_setsockopt() Must return target values and target errnos. */
1235 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1236 abi_ulong optval_addr
, socklen_t optlen
)
1240 struct ip_mreqn
*ip_mreq
;
1241 struct ip_mreq_source
*ip_mreq_source
;
1245 /* TCP options all take an 'int' value. */
1246 if (optlen
< sizeof(uint32_t))
1247 return -TARGET_EINVAL
;
1249 if (get_user_u32(val
, optval_addr
))
1250 return -TARGET_EFAULT
;
1251 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1258 case IP_ROUTER_ALERT
:
1262 case IP_MTU_DISCOVER
:
1268 case IP_MULTICAST_TTL
:
1269 case IP_MULTICAST_LOOP
:
1271 if (optlen
>= sizeof(uint32_t)) {
1272 if (get_user_u32(val
, optval_addr
))
1273 return -TARGET_EFAULT
;
1274 } else if (optlen
>= 1) {
1275 if (get_user_u8(val
, optval_addr
))
1276 return -TARGET_EFAULT
;
1278 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1280 case IP_ADD_MEMBERSHIP
:
1281 case IP_DROP_MEMBERSHIP
:
1282 if (optlen
< sizeof (struct target_ip_mreq
) ||
1283 optlen
> sizeof (struct target_ip_mreqn
))
1284 return -TARGET_EINVAL
;
1286 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1287 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1288 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1291 case IP_BLOCK_SOURCE
:
1292 case IP_UNBLOCK_SOURCE
:
1293 case IP_ADD_SOURCE_MEMBERSHIP
:
1294 case IP_DROP_SOURCE_MEMBERSHIP
:
1295 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1296 return -TARGET_EINVAL
;
1298 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1299 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1300 unlock_user (ip_mreq_source
, optval_addr
, 0);
1307 case TARGET_SOL_SOCKET
:
1309 /* Options with 'int' argument. */
1310 case TARGET_SO_DEBUG
:
1313 case TARGET_SO_REUSEADDR
:
1314 optname
= SO_REUSEADDR
;
1316 case TARGET_SO_TYPE
:
1319 case TARGET_SO_ERROR
:
1322 case TARGET_SO_DONTROUTE
:
1323 optname
= SO_DONTROUTE
;
1325 case TARGET_SO_BROADCAST
:
1326 optname
= SO_BROADCAST
;
1328 case TARGET_SO_SNDBUF
:
1329 optname
= SO_SNDBUF
;
1331 case TARGET_SO_RCVBUF
:
1332 optname
= SO_RCVBUF
;
1334 case TARGET_SO_KEEPALIVE
:
1335 optname
= SO_KEEPALIVE
;
1337 case TARGET_SO_OOBINLINE
:
1338 optname
= SO_OOBINLINE
;
1340 case TARGET_SO_NO_CHECK
:
1341 optname
= SO_NO_CHECK
;
1343 case TARGET_SO_PRIORITY
:
1344 optname
= SO_PRIORITY
;
1347 case TARGET_SO_BSDCOMPAT
:
1348 optname
= SO_BSDCOMPAT
;
1351 case TARGET_SO_PASSCRED
:
1352 optname
= SO_PASSCRED
;
1354 case TARGET_SO_TIMESTAMP
:
1355 optname
= SO_TIMESTAMP
;
1357 case TARGET_SO_RCVLOWAT
:
1358 optname
= SO_RCVLOWAT
;
1360 case TARGET_SO_RCVTIMEO
:
1361 optname
= SO_RCVTIMEO
;
1363 case TARGET_SO_SNDTIMEO
:
1364 optname
= SO_SNDTIMEO
;
1370 if (optlen
< sizeof(uint32_t))
1371 return -TARGET_EINVAL
;
1373 if (get_user_u32(val
, optval_addr
))
1374 return -TARGET_EFAULT
;
1375 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1379 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1380 ret
= -TARGET_ENOPROTOOPT
;
1385 /* do_getsockopt() Must return target values and target errnos. */
1386 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1387 abi_ulong optval_addr
, abi_ulong optlen
)
1394 case TARGET_SOL_SOCKET
:
1397 /* These don't just return a single integer */
1398 case TARGET_SO_LINGER
:
1399 case TARGET_SO_RCVTIMEO
:
1400 case TARGET_SO_SNDTIMEO
:
1401 case TARGET_SO_PEERCRED
:
1402 case TARGET_SO_PEERNAME
:
1404 /* Options with 'int' argument. */
1405 case TARGET_SO_DEBUG
:
1408 case TARGET_SO_REUSEADDR
:
1409 optname
= SO_REUSEADDR
;
1411 case TARGET_SO_TYPE
:
1414 case TARGET_SO_ERROR
:
1417 case TARGET_SO_DONTROUTE
:
1418 optname
= SO_DONTROUTE
;
1420 case TARGET_SO_BROADCAST
:
1421 optname
= SO_BROADCAST
;
1423 case TARGET_SO_SNDBUF
:
1424 optname
= SO_SNDBUF
;
1426 case TARGET_SO_RCVBUF
:
1427 optname
= SO_RCVBUF
;
1429 case TARGET_SO_KEEPALIVE
:
1430 optname
= SO_KEEPALIVE
;
1432 case TARGET_SO_OOBINLINE
:
1433 optname
= SO_OOBINLINE
;
1435 case TARGET_SO_NO_CHECK
:
1436 optname
= SO_NO_CHECK
;
1438 case TARGET_SO_PRIORITY
:
1439 optname
= SO_PRIORITY
;
1442 case TARGET_SO_BSDCOMPAT
:
1443 optname
= SO_BSDCOMPAT
;
1446 case TARGET_SO_PASSCRED
:
1447 optname
= SO_PASSCRED
;
1449 case TARGET_SO_TIMESTAMP
:
1450 optname
= SO_TIMESTAMP
;
1452 case TARGET_SO_RCVLOWAT
:
1453 optname
= SO_RCVLOWAT
;
1460 /* TCP options all take an 'int' value. */
1462 if (get_user_u32(len
, optlen
))
1463 return -TARGET_EFAULT
;
1465 return -TARGET_EINVAL
;
1467 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1473 if (put_user_u32(val
, optval_addr
))
1474 return -TARGET_EFAULT
;
1476 if (put_user_u8(val
, optval_addr
))
1477 return -TARGET_EFAULT
;
1479 if (put_user_u32(len
, optlen
))
1480 return -TARGET_EFAULT
;
1487 case IP_ROUTER_ALERT
:
1491 case IP_MTU_DISCOVER
:
1497 case IP_MULTICAST_TTL
:
1498 case IP_MULTICAST_LOOP
:
1499 if (get_user_u32(len
, optlen
))
1500 return -TARGET_EFAULT
;
1502 return -TARGET_EINVAL
;
1504 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1507 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1509 if (put_user_u32(len
, optlen
)
1510 || put_user_u8(val
, optval_addr
))
1511 return -TARGET_EFAULT
;
1513 if (len
> sizeof(int))
1515 if (put_user_u32(len
, optlen
)
1516 || put_user_u32(val
, optval_addr
))
1517 return -TARGET_EFAULT
;
1521 ret
= -TARGET_ENOPROTOOPT
;
1527 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1529 ret
= -TARGET_EOPNOTSUPP
;
1536 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1537 * other lock functions have a return code of 0 for failure.
1539 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1540 int count
, int copy
)
1542 struct target_iovec
*target_vec
;
1546 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1548 return -TARGET_EFAULT
;
1549 for(i
= 0;i
< count
; i
++) {
1550 base
= tswapl(target_vec
[i
].iov_base
);
1551 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1552 if (vec
[i
].iov_len
!= 0) {
1553 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1554 /* Don't check lock_user return value. We must call writev even
1555 if a element has invalid base address. */
1557 /* zero length pointer is ignored */
1558 vec
[i
].iov_base
= NULL
;
1561 unlock_user (target_vec
, target_addr
, 0);
1565 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1566 int count
, int copy
)
1568 struct target_iovec
*target_vec
;
1572 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1574 return -TARGET_EFAULT
;
1575 for(i
= 0;i
< count
; i
++) {
1576 if (target_vec
[i
].iov_base
) {
1577 base
= tswapl(target_vec
[i
].iov_base
);
1578 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1581 unlock_user (target_vec
, target_addr
, 0);
1586 /* do_socket() Must return target values and target errnos. */
1587 static abi_long
do_socket(int domain
, int type
, int protocol
)
1589 #if defined(TARGET_MIPS)
1591 case TARGET_SOCK_DGRAM
:
1594 case TARGET_SOCK_STREAM
:
1597 case TARGET_SOCK_RAW
:
1600 case TARGET_SOCK_RDM
:
1603 case TARGET_SOCK_SEQPACKET
:
1604 type
= SOCK_SEQPACKET
;
1606 case TARGET_SOCK_PACKET
:
1611 if (domain
== PF_NETLINK
)
1612 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1613 return get_errno(socket(domain
, type
, protocol
));
1616 /* do_bind() Must return target values and target errnos. */
1617 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1623 if ((int)addrlen
< 0) {
1624 return -TARGET_EINVAL
;
1627 addr
= alloca(addrlen
+1);
1629 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1633 return get_errno(bind(sockfd
, addr
, addrlen
));
1636 /* do_connect() Must return target values and target errnos. */
1637 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1643 if ((int)addrlen
< 0) {
1644 return -TARGET_EINVAL
;
1647 addr
= alloca(addrlen
);
1649 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1653 return get_errno(connect(sockfd
, addr
, addrlen
));
1656 /* do_sendrecvmsg() Must return target values and target errnos. */
1657 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1658 int flags
, int send
)
1661 struct target_msghdr
*msgp
;
1665 abi_ulong target_vec
;
1668 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1672 return -TARGET_EFAULT
;
1673 if (msgp
->msg_name
) {
1674 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1675 msg
.msg_name
= alloca(msg
.msg_namelen
);
1676 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1679 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1683 msg
.msg_name
= NULL
;
1684 msg
.msg_namelen
= 0;
1686 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1687 msg
.msg_control
= alloca(msg
.msg_controllen
);
1688 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1690 count
= tswapl(msgp
->msg_iovlen
);
1691 vec
= alloca(count
* sizeof(struct iovec
));
1692 target_vec
= tswapl(msgp
->msg_iov
);
1693 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1694 msg
.msg_iovlen
= count
;
1698 ret
= target_to_host_cmsg(&msg
, msgp
);
1700 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1702 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1703 if (!is_error(ret
)) {
1705 ret
= host_to_target_cmsg(msgp
, &msg
);
1710 unlock_iovec(vec
, target_vec
, count
, !send
);
1711 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1715 /* do_accept() Must return target values and target errnos. */
1716 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1717 abi_ulong target_addrlen_addr
)
1723 if (target_addr
== 0)
1724 return get_errno(accept(fd
, NULL
, NULL
));
1726 /* linux returns EINVAL if addrlen pointer is invalid */
1727 if (get_user_u32(addrlen
, target_addrlen_addr
))
1728 return -TARGET_EINVAL
;
1730 if ((int)addrlen
< 0) {
1731 return -TARGET_EINVAL
;
1734 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1735 return -TARGET_EINVAL
;
1737 addr
= alloca(addrlen
);
1739 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1740 if (!is_error(ret
)) {
1741 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1742 if (put_user_u32(addrlen
, target_addrlen_addr
))
1743 ret
= -TARGET_EFAULT
;
1748 /* do_getpeername() Must return target values and target errnos. */
1749 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1750 abi_ulong target_addrlen_addr
)
1756 if (get_user_u32(addrlen
, target_addrlen_addr
))
1757 return -TARGET_EFAULT
;
1759 if ((int)addrlen
< 0) {
1760 return -TARGET_EINVAL
;
1763 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1764 return -TARGET_EFAULT
;
1766 addr
= alloca(addrlen
);
1768 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1769 if (!is_error(ret
)) {
1770 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1771 if (put_user_u32(addrlen
, target_addrlen_addr
))
1772 ret
= -TARGET_EFAULT
;
1777 /* do_getsockname() Must return target values and target errnos. */
1778 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1779 abi_ulong target_addrlen_addr
)
1785 if (get_user_u32(addrlen
, target_addrlen_addr
))
1786 return -TARGET_EFAULT
;
1788 if ((int)addrlen
< 0) {
1789 return -TARGET_EINVAL
;
1792 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1793 return -TARGET_EFAULT
;
1795 addr
= alloca(addrlen
);
1797 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1798 if (!is_error(ret
)) {
1799 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1800 if (put_user_u32(addrlen
, target_addrlen_addr
))
1801 ret
= -TARGET_EFAULT
;
1806 /* do_socketpair() Must return target values and target errnos. */
1807 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1808 abi_ulong target_tab_addr
)
1813 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1814 if (!is_error(ret
)) {
1815 if (put_user_s32(tab
[0], target_tab_addr
)
1816 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1817 ret
= -TARGET_EFAULT
;
1822 /* do_sendto() Must return target values and target errnos. */
1823 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1824 abi_ulong target_addr
, socklen_t addrlen
)
1830 if ((int)addrlen
< 0) {
1831 return -TARGET_EINVAL
;
1834 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1836 return -TARGET_EFAULT
;
1838 addr
= alloca(addrlen
);
1839 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1841 unlock_user(host_msg
, msg
, 0);
1844 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1846 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1848 unlock_user(host_msg
, msg
, 0);
1852 /* do_recvfrom() Must return target values and target errnos. */
1853 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1854 abi_ulong target_addr
,
1855 abi_ulong target_addrlen
)
1862 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1864 return -TARGET_EFAULT
;
1866 if (get_user_u32(addrlen
, target_addrlen
)) {
1867 ret
= -TARGET_EFAULT
;
1870 if ((int)addrlen
< 0) {
1871 ret
= -TARGET_EINVAL
;
1874 addr
= alloca(addrlen
);
1875 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1877 addr
= NULL
; /* To keep compiler quiet. */
1878 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1880 if (!is_error(ret
)) {
1882 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1883 if (put_user_u32(addrlen
, target_addrlen
)) {
1884 ret
= -TARGET_EFAULT
;
1888 unlock_user(host_msg
, msg
, len
);
1891 unlock_user(host_msg
, msg
, 0);
1896 #ifdef TARGET_NR_socketcall
1897 /* do_socketcall() Must return target values and target errnos. */
1898 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1901 const int n
= sizeof(abi_ulong
);
1906 abi_ulong domain
, type
, protocol
;
1908 if (get_user_ual(domain
, vptr
)
1909 || get_user_ual(type
, vptr
+ n
)
1910 || get_user_ual(protocol
, vptr
+ 2 * n
))
1911 return -TARGET_EFAULT
;
1913 ret
= do_socket(domain
, type
, protocol
);
1919 abi_ulong target_addr
;
1922 if (get_user_ual(sockfd
, vptr
)
1923 || get_user_ual(target_addr
, vptr
+ n
)
1924 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1925 return -TARGET_EFAULT
;
1927 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1930 case SOCKOP_connect
:
1933 abi_ulong target_addr
;
1936 if (get_user_ual(sockfd
, vptr
)
1937 || get_user_ual(target_addr
, vptr
+ n
)
1938 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1939 return -TARGET_EFAULT
;
1941 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1946 abi_ulong sockfd
, backlog
;
1948 if (get_user_ual(sockfd
, vptr
)
1949 || get_user_ual(backlog
, vptr
+ n
))
1950 return -TARGET_EFAULT
;
1952 ret
= get_errno(listen(sockfd
, backlog
));
1958 abi_ulong target_addr
, target_addrlen
;
1960 if (get_user_ual(sockfd
, vptr
)
1961 || get_user_ual(target_addr
, vptr
+ n
)
1962 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1963 return -TARGET_EFAULT
;
1965 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1968 case SOCKOP_getsockname
:
1971 abi_ulong target_addr
, target_addrlen
;
1973 if (get_user_ual(sockfd
, vptr
)
1974 || get_user_ual(target_addr
, vptr
+ n
)
1975 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1976 return -TARGET_EFAULT
;
1978 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1981 case SOCKOP_getpeername
:
1984 abi_ulong target_addr
, target_addrlen
;
1986 if (get_user_ual(sockfd
, vptr
)
1987 || get_user_ual(target_addr
, vptr
+ n
)
1988 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1989 return -TARGET_EFAULT
;
1991 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1994 case SOCKOP_socketpair
:
1996 abi_ulong domain
, type
, protocol
;
1999 if (get_user_ual(domain
, vptr
)
2000 || get_user_ual(type
, vptr
+ n
)
2001 || get_user_ual(protocol
, vptr
+ 2 * n
)
2002 || get_user_ual(tab
, vptr
+ 3 * n
))
2003 return -TARGET_EFAULT
;
2005 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2015 if (get_user_ual(sockfd
, vptr
)
2016 || get_user_ual(msg
, vptr
+ n
)
2017 || get_user_ual(len
, vptr
+ 2 * n
)
2018 || get_user_ual(flags
, vptr
+ 3 * n
))
2019 return -TARGET_EFAULT
;
2021 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2031 if (get_user_ual(sockfd
, vptr
)
2032 || get_user_ual(msg
, vptr
+ n
)
2033 || get_user_ual(len
, vptr
+ 2 * n
)
2034 || get_user_ual(flags
, vptr
+ 3 * n
))
2035 return -TARGET_EFAULT
;
2037 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2049 if (get_user_ual(sockfd
, vptr
)
2050 || get_user_ual(msg
, vptr
+ n
)
2051 || get_user_ual(len
, vptr
+ 2 * n
)
2052 || get_user_ual(flags
, vptr
+ 3 * n
)
2053 || get_user_ual(addr
, vptr
+ 4 * n
)
2054 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2055 return -TARGET_EFAULT
;
2057 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2060 case SOCKOP_recvfrom
:
2069 if (get_user_ual(sockfd
, vptr
)
2070 || get_user_ual(msg
, vptr
+ n
)
2071 || get_user_ual(len
, vptr
+ 2 * n
)
2072 || get_user_ual(flags
, vptr
+ 3 * n
)
2073 || get_user_ual(addr
, vptr
+ 4 * n
)
2074 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2075 return -TARGET_EFAULT
;
2077 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2080 case SOCKOP_shutdown
:
2082 abi_ulong sockfd
, how
;
2084 if (get_user_ual(sockfd
, vptr
)
2085 || get_user_ual(how
, vptr
+ n
))
2086 return -TARGET_EFAULT
;
2088 ret
= get_errno(shutdown(sockfd
, how
));
2091 case SOCKOP_sendmsg
:
2092 case SOCKOP_recvmsg
:
2095 abi_ulong target_msg
;
2098 if (get_user_ual(fd
, vptr
)
2099 || get_user_ual(target_msg
, vptr
+ n
)
2100 || get_user_ual(flags
, vptr
+ 2 * n
))
2101 return -TARGET_EFAULT
;
2103 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2104 (num
== SOCKOP_sendmsg
));
2107 case SOCKOP_setsockopt
:
2115 if (get_user_ual(sockfd
, vptr
)
2116 || get_user_ual(level
, vptr
+ n
)
2117 || get_user_ual(optname
, vptr
+ 2 * n
)
2118 || get_user_ual(optval
, vptr
+ 3 * n
)
2119 || get_user_ual(optlen
, vptr
+ 4 * n
))
2120 return -TARGET_EFAULT
;
2122 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2125 case SOCKOP_getsockopt
:
2133 if (get_user_ual(sockfd
, vptr
)
2134 || get_user_ual(level
, vptr
+ n
)
2135 || get_user_ual(optname
, vptr
+ 2 * n
)
2136 || get_user_ual(optval
, vptr
+ 3 * n
)
2137 || get_user_ual(optlen
, vptr
+ 4 * n
))
2138 return -TARGET_EFAULT
;
2140 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2144 gemu_log("Unsupported socketcall: %d\n", num
);
2145 ret
= -TARGET_ENOSYS
;
2152 #define N_SHM_REGIONS 32
2154 static struct shm_region
{
2157 } shm_regions
[N_SHM_REGIONS
];
2159 struct target_ipc_perm
2166 unsigned short int mode
;
2167 unsigned short int __pad1
;
2168 unsigned short int __seq
;
2169 unsigned short int __pad2
;
2170 abi_ulong __unused1
;
2171 abi_ulong __unused2
;
2174 struct target_semid_ds
2176 struct target_ipc_perm sem_perm
;
2177 abi_ulong sem_otime
;
2178 abi_ulong __unused1
;
2179 abi_ulong sem_ctime
;
2180 abi_ulong __unused2
;
2181 abi_ulong sem_nsems
;
2182 abi_ulong __unused3
;
2183 abi_ulong __unused4
;
2186 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2187 abi_ulong target_addr
)
2189 struct target_ipc_perm
*target_ip
;
2190 struct target_semid_ds
*target_sd
;
2192 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2193 return -TARGET_EFAULT
;
2194 target_ip
= &(target_sd
->sem_perm
);
2195 host_ip
->__key
= tswapl(target_ip
->__key
);
2196 host_ip
->uid
= tswapl(target_ip
->uid
);
2197 host_ip
->gid
= tswapl(target_ip
->gid
);
2198 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2199 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2200 host_ip
->mode
= tswapl(target_ip
->mode
);
2201 unlock_user_struct(target_sd
, target_addr
, 0);
2205 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2206 struct ipc_perm
*host_ip
)
2208 struct target_ipc_perm
*target_ip
;
2209 struct target_semid_ds
*target_sd
;
2211 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2212 return -TARGET_EFAULT
;
2213 target_ip
= &(target_sd
->sem_perm
);
2214 target_ip
->__key
= tswapl(host_ip
->__key
);
2215 target_ip
->uid
= tswapl(host_ip
->uid
);
2216 target_ip
->gid
= tswapl(host_ip
->gid
);
2217 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2218 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2219 target_ip
->mode
= tswapl(host_ip
->mode
);
2220 unlock_user_struct(target_sd
, target_addr
, 1);
2224 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2225 abi_ulong target_addr
)
2227 struct target_semid_ds
*target_sd
;
2229 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2230 return -TARGET_EFAULT
;
2231 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2232 return -TARGET_EFAULT
;
2233 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2234 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2235 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2236 unlock_user_struct(target_sd
, target_addr
, 0);
2240 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2241 struct semid_ds
*host_sd
)
2243 struct target_semid_ds
*target_sd
;
2245 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2246 return -TARGET_EFAULT
;
2247 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2248 return -TARGET_EFAULT
;;
2249 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2250 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2251 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2252 unlock_user_struct(target_sd
, target_addr
, 1);
2256 struct target_seminfo
{
2269 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2270 struct seminfo
*host_seminfo
)
2272 struct target_seminfo
*target_seminfo
;
2273 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2274 return -TARGET_EFAULT
;
2275 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2276 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2277 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2278 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2279 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2280 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2281 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2282 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2283 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2284 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2285 unlock_user_struct(target_seminfo
, target_addr
, 1);
2291 struct semid_ds
*buf
;
2292 unsigned short *array
;
2293 struct seminfo
*__buf
;
2296 union target_semun
{
2303 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2304 abi_ulong target_addr
)
2307 unsigned short *array
;
2309 struct semid_ds semid_ds
;
2312 semun
.buf
= &semid_ds
;
2314 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2316 return get_errno(ret
);
2318 nsems
= semid_ds
.sem_nsems
;
2320 *host_array
= malloc(nsems
*sizeof(unsigned short));
2321 array
= lock_user(VERIFY_READ
, target_addr
,
2322 nsems
*sizeof(unsigned short), 1);
2324 return -TARGET_EFAULT
;
2326 for(i
=0; i
<nsems
; i
++) {
2327 __get_user((*host_array
)[i
], &array
[i
]);
2329 unlock_user(array
, target_addr
, 0);
2334 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2335 unsigned short **host_array
)
2338 unsigned short *array
;
2340 struct semid_ds semid_ds
;
2343 semun
.buf
= &semid_ds
;
2345 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2347 return get_errno(ret
);
2349 nsems
= semid_ds
.sem_nsems
;
2351 array
= lock_user(VERIFY_WRITE
, target_addr
,
2352 nsems
*sizeof(unsigned short), 0);
2354 return -TARGET_EFAULT
;
2356 for(i
=0; i
<nsems
; i
++) {
2357 __put_user((*host_array
)[i
], &array
[i
]);
2360 unlock_user(array
, target_addr
, 1);
2365 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2366 union target_semun target_su
)
2369 struct semid_ds dsarg
;
2370 unsigned short *array
= NULL
;
2371 struct seminfo seminfo
;
2372 abi_long ret
= -TARGET_EINVAL
;
2379 arg
.val
= tswapl(target_su
.val
);
2380 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2381 target_su
.val
= tswapl(arg
.val
);
2385 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2389 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2390 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2397 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2401 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2402 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2408 arg
.__buf
= &seminfo
;
2409 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2410 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2418 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2425 struct target_sembuf
{
2426 unsigned short sem_num
;
2431 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2432 abi_ulong target_addr
,
2435 struct target_sembuf
*target_sembuf
;
2438 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2439 nsops
*sizeof(struct target_sembuf
), 1);
2441 return -TARGET_EFAULT
;
2443 for(i
=0; i
<nsops
; i
++) {
2444 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2445 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2446 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2449 unlock_user(target_sembuf
, target_addr
, 0);
2454 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2456 struct sembuf sops
[nsops
];
2458 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2459 return -TARGET_EFAULT
;
2461 return semop(semid
, sops
, nsops
);
2464 struct target_msqid_ds
2466 struct target_ipc_perm msg_perm
;
2467 abi_ulong msg_stime
;
2468 #if TARGET_ABI_BITS == 32
2469 abi_ulong __unused1
;
2471 abi_ulong msg_rtime
;
2472 #if TARGET_ABI_BITS == 32
2473 abi_ulong __unused2
;
2475 abi_ulong msg_ctime
;
2476 #if TARGET_ABI_BITS == 32
2477 abi_ulong __unused3
;
2479 abi_ulong __msg_cbytes
;
2481 abi_ulong msg_qbytes
;
2482 abi_ulong msg_lspid
;
2483 abi_ulong msg_lrpid
;
2484 abi_ulong __unused4
;
2485 abi_ulong __unused5
;
2488 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2489 abi_ulong target_addr
)
2491 struct target_msqid_ds
*target_md
;
2493 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2494 return -TARGET_EFAULT
;
2495 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2496 return -TARGET_EFAULT
;
2497 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2498 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2499 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2500 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2501 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2502 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2503 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2504 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2505 unlock_user_struct(target_md
, target_addr
, 0);
2509 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2510 struct msqid_ds
*host_md
)
2512 struct target_msqid_ds
*target_md
;
2514 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2515 return -TARGET_EFAULT
;
2516 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2517 return -TARGET_EFAULT
;
2518 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2519 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2520 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2521 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2522 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2523 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2524 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2525 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2526 unlock_user_struct(target_md
, target_addr
, 1);
2530 struct target_msginfo
{
2538 unsigned short int msgseg
;
2541 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2542 struct msginfo
*host_msginfo
)
2544 struct target_msginfo
*target_msginfo
;
2545 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2546 return -TARGET_EFAULT
;
2547 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2548 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2549 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2550 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2551 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2552 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2553 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2554 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2555 unlock_user_struct(target_msginfo
, target_addr
, 1);
2559 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2561 struct msqid_ds dsarg
;
2562 struct msginfo msginfo
;
2563 abi_long ret
= -TARGET_EINVAL
;
2571 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2572 return -TARGET_EFAULT
;
2573 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2574 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2575 return -TARGET_EFAULT
;
2578 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2582 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2583 if (host_to_target_msginfo(ptr
, &msginfo
))
2584 return -TARGET_EFAULT
;
2591 struct target_msgbuf
{
2596 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2597 unsigned int msgsz
, int msgflg
)
2599 struct target_msgbuf
*target_mb
;
2600 struct msgbuf
*host_mb
;
2603 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2604 return -TARGET_EFAULT
;
2605 host_mb
= malloc(msgsz
+sizeof(long));
2606 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2607 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2608 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2610 unlock_user_struct(target_mb
, msgp
, 0);
2615 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2616 unsigned int msgsz
, abi_long msgtyp
,
2619 struct target_msgbuf
*target_mb
;
2621 struct msgbuf
*host_mb
;
2624 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2625 return -TARGET_EFAULT
;
2627 host_mb
= malloc(msgsz
+sizeof(long));
2628 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2631 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2632 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2633 if (!target_mtext
) {
2634 ret
= -TARGET_EFAULT
;
2637 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2638 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2641 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2646 unlock_user_struct(target_mb
, msgp
, 1);
2650 struct target_shmid_ds
2652 struct target_ipc_perm shm_perm
;
2653 abi_ulong shm_segsz
;
2654 abi_ulong shm_atime
;
2655 #if TARGET_ABI_BITS == 32
2656 abi_ulong __unused1
;
2658 abi_ulong shm_dtime
;
2659 #if TARGET_ABI_BITS == 32
2660 abi_ulong __unused2
;
2662 abi_ulong shm_ctime
;
2663 #if TARGET_ABI_BITS == 32
2664 abi_ulong __unused3
;
2668 abi_ulong shm_nattch
;
2669 unsigned long int __unused4
;
2670 unsigned long int __unused5
;
2673 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2674 abi_ulong target_addr
)
2676 struct target_shmid_ds
*target_sd
;
2678 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2679 return -TARGET_EFAULT
;
2680 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2681 return -TARGET_EFAULT
;
2682 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2683 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2684 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2685 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2686 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2687 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2688 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2689 unlock_user_struct(target_sd
, target_addr
, 0);
2693 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2694 struct shmid_ds
*host_sd
)
2696 struct target_shmid_ds
*target_sd
;
2698 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2699 return -TARGET_EFAULT
;
2700 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2701 return -TARGET_EFAULT
;
2702 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2703 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2704 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2705 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2706 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2707 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2708 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2709 unlock_user_struct(target_sd
, target_addr
, 1);
2713 struct target_shminfo
{
2721 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2722 struct shminfo
*host_shminfo
)
2724 struct target_shminfo
*target_shminfo
;
2725 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2726 return -TARGET_EFAULT
;
2727 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2728 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2729 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2730 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2731 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2732 unlock_user_struct(target_shminfo
, target_addr
, 1);
2736 struct target_shm_info
{
2741 abi_ulong swap_attempts
;
2742 abi_ulong swap_successes
;
2745 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2746 struct shm_info
*host_shm_info
)
2748 struct target_shm_info
*target_shm_info
;
2749 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2750 return -TARGET_EFAULT
;
2751 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2752 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2753 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2754 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2755 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2756 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2757 unlock_user_struct(target_shm_info
, target_addr
, 1);
2761 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2763 struct shmid_ds dsarg
;
2764 struct shminfo shminfo
;
2765 struct shm_info shm_info
;
2766 abi_long ret
= -TARGET_EINVAL
;
2774 if (target_to_host_shmid_ds(&dsarg
, buf
))
2775 return -TARGET_EFAULT
;
2776 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2777 if (host_to_target_shmid_ds(buf
, &dsarg
))
2778 return -TARGET_EFAULT
;
2781 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2782 if (host_to_target_shminfo(buf
, &shminfo
))
2783 return -TARGET_EFAULT
;
2786 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2787 if (host_to_target_shm_info(buf
, &shm_info
))
2788 return -TARGET_EFAULT
;
2793 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2800 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2804 struct shmid_ds shm_info
;
2807 /* find out the length of the shared memory segment */
2808 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2809 if (is_error(ret
)) {
2810 /* can't get length, bail out */
2817 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2819 abi_ulong mmap_start
;
2821 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2823 if (mmap_start
== -1) {
2825 host_raddr
= (void *)-1;
2827 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2830 if (host_raddr
== (void *)-1) {
2832 return get_errno((long)host_raddr
);
2834 raddr
=h2g((unsigned long)host_raddr
);
2836 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2837 PAGE_VALID
| PAGE_READ
|
2838 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2840 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2841 if (shm_regions
[i
].start
== 0) {
2842 shm_regions
[i
].start
= raddr
;
2843 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2853 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2857 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2858 if (shm_regions
[i
].start
== shmaddr
) {
2859 shm_regions
[i
].start
= 0;
2860 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
2865 return get_errno(shmdt(g2h(shmaddr
)));
2868 #ifdef TARGET_NR_ipc
2869 /* ??? This only works with linear mappings. */
2870 /* do_ipc() must return target values and target errnos. */
2871 static abi_long
do_ipc(unsigned int call
, int first
,
2872 int second
, int third
,
2873 abi_long ptr
, abi_long fifth
)
2878 version
= call
>> 16;
2883 ret
= do_semop(first
, ptr
, second
);
2887 ret
= get_errno(semget(first
, second
, third
));
2891 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2895 ret
= get_errno(msgget(first
, second
));
2899 ret
= do_msgsnd(first
, ptr
, second
, third
);
2903 ret
= do_msgctl(first
, second
, ptr
);
2910 struct target_ipc_kludge
{
2915 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2916 ret
= -TARGET_EFAULT
;
2920 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2922 unlock_user_struct(tmp
, ptr
, 0);
2926 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2935 raddr
= do_shmat(first
, ptr
, second
);
2936 if (is_error(raddr
))
2937 return get_errno(raddr
);
2938 if (put_user_ual(raddr
, third
))
2939 return -TARGET_EFAULT
;
2943 ret
= -TARGET_EINVAL
;
2948 ret
= do_shmdt(ptr
);
2952 /* IPC_* flag values are the same on all linux platforms */
2953 ret
= get_errno(shmget(first
, second
, third
));
2956 /* IPC_* and SHM_* command values are the same on all linux platforms */
2958 ret
= do_shmctl(first
, second
, third
);
2961 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2962 ret
= -TARGET_ENOSYS
;
2969 /* kernel structure types definitions */
2972 #define STRUCT(name, ...) STRUCT_ ## name,
2973 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2975 #include "syscall_types.h"
2978 #undef STRUCT_SPECIAL
2980 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2981 #define STRUCT_SPECIAL(name)
2982 #include "syscall_types.h"
2984 #undef STRUCT_SPECIAL
2986 typedef struct IOCTLEntry IOCTLEntry
;
2988 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
2989 int fd
, abi_long cmd
, abi_long arg
);
2992 unsigned int target_cmd
;
2993 unsigned int host_cmd
;
2996 do_ioctl_fn
*do_ioctl
;
2997 const argtype arg_type
[5];
3000 #define IOC_R 0x0001
3001 #define IOC_W 0x0002
3002 #define IOC_RW (IOC_R | IOC_W)
3004 #define MAX_STRUCT_SIZE 4096
3006 #ifdef CONFIG_FIEMAP
3007 /* So fiemap access checks don't overflow on 32 bit systems.
3008 * This is very slightly smaller than the limit imposed by
3009 * the underlying kernel.
3011 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3012 / sizeof(struct fiemap_extent))
3014 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3015 int fd
, abi_long cmd
, abi_long arg
)
3017 /* The parameter for this ioctl is a struct fiemap followed
3018 * by an array of struct fiemap_extent whose size is set
3019 * in fiemap->fm_extent_count. The array is filled in by the
3022 int target_size_in
, target_size_out
;
3024 const argtype
*arg_type
= ie
->arg_type
;
3025 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3028 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3032 assert(arg_type
[0] == TYPE_PTR
);
3033 assert(ie
->access
== IOC_RW
);
3035 target_size_in
= thunk_type_size(arg_type
, 0);
3036 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3038 return -TARGET_EFAULT
;
3040 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3041 unlock_user(argptr
, arg
, 0);
3042 fm
= (struct fiemap
*)buf_temp
;
3043 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3044 return -TARGET_EINVAL
;
3047 outbufsz
= sizeof (*fm
) +
3048 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3050 if (outbufsz
> MAX_STRUCT_SIZE
) {
3051 /* We can't fit all the extents into the fixed size buffer.
3052 * Allocate one that is large enough and use it instead.
3054 fm
= malloc(outbufsz
);
3056 return -TARGET_ENOMEM
;
3058 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3061 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3062 if (!is_error(ret
)) {
3063 target_size_out
= target_size_in
;
3064 /* An extent_count of 0 means we were only counting the extents
3065 * so there are no structs to copy
3067 if (fm
->fm_extent_count
!= 0) {
3068 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3070 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3072 ret
= -TARGET_EFAULT
;
3074 /* Convert the struct fiemap */
3075 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3076 if (fm
->fm_extent_count
!= 0) {
3077 p
= argptr
+ target_size_in
;
3078 /* ...and then all the struct fiemap_extents */
3079 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3080 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3085 unlock_user(argptr
, arg
, target_size_out
);
3095 static IOCTLEntry ioctl_entries
[] = {
3096 #define IOCTL(cmd, access, ...) \
3097 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3098 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3099 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3104 /* ??? Implement proper locking for ioctls. */
3105 /* do_ioctl() Must return target values and target errnos. */
3106 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3108 const IOCTLEntry
*ie
;
3109 const argtype
*arg_type
;
3111 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3117 if (ie
->target_cmd
== 0) {
3118 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3119 return -TARGET_ENOSYS
;
3121 if (ie
->target_cmd
== cmd
)
3125 arg_type
= ie
->arg_type
;
3127 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3130 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3133 switch(arg_type
[0]) {
3136 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3141 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3145 target_size
= thunk_type_size(arg_type
, 0);
3146 switch(ie
->access
) {
3148 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3149 if (!is_error(ret
)) {
3150 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3152 return -TARGET_EFAULT
;
3153 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3154 unlock_user(argptr
, arg
, target_size
);
3158 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3160 return -TARGET_EFAULT
;
3161 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3162 unlock_user(argptr
, arg
, 0);
3163 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3167 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3169 return -TARGET_EFAULT
;
3170 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3171 unlock_user(argptr
, arg
, 0);
3172 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3173 if (!is_error(ret
)) {
3174 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3176 return -TARGET_EFAULT
;
3177 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3178 unlock_user(argptr
, arg
, target_size
);
3184 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3185 (long)cmd
, arg_type
[0]);
3186 ret
= -TARGET_ENOSYS
;
3192 static const bitmask_transtbl iflag_tbl
[] = {
3193 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3194 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3195 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3196 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3197 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3198 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3199 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3200 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3201 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3202 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3203 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3204 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3205 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3206 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3210 static const bitmask_transtbl oflag_tbl
[] = {
3211 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3212 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3213 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3214 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3215 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3216 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3217 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3218 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3219 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3220 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3221 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3222 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3223 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3224 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3225 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3226 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3227 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3228 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3229 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3230 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3231 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3232 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3233 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3234 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3238 static const bitmask_transtbl cflag_tbl
[] = {
3239 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3240 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3241 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3242 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3243 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3244 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3245 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3246 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3247 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3248 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3249 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3250 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3251 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3252 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3253 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3254 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3255 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3256 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3257 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3258 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3259 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3260 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3261 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3262 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3263 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3264 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3265 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3266 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3267 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3268 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3269 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3273 static const bitmask_transtbl lflag_tbl
[] = {
3274 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3275 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3276 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3277 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3278 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3279 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3280 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3281 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3282 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3283 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3284 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3285 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3286 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3287 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3288 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3292 static void target_to_host_termios (void *dst
, const void *src
)
3294 struct host_termios
*host
= dst
;
3295 const struct target_termios
*target
= src
;
3298 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3300 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3302 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3304 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3305 host
->c_line
= target
->c_line
;
3307 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3308 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3309 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3310 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3311 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3312 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3313 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3314 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3315 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3316 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3317 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3318 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3319 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3320 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3321 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3322 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3323 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3324 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3327 static void host_to_target_termios (void *dst
, const void *src
)
3329 struct target_termios
*target
= dst
;
3330 const struct host_termios
*host
= src
;
3333 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3335 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3337 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3339 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3340 target
->c_line
= host
->c_line
;
3342 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3343 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3344 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3345 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3346 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3347 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3348 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3349 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3350 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3351 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3352 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3353 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3354 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3355 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3356 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3357 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3358 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3359 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3362 static const StructEntry struct_termios_def
= {
3363 .convert
= { host_to_target_termios
, target_to_host_termios
},
3364 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3365 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3368 static bitmask_transtbl mmap_flags_tbl
[] = {
3369 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3370 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3371 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3372 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3373 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3374 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3375 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3376 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3380 #if defined(TARGET_I386)
3382 /* NOTE: there is really one LDT for all the threads */
3383 static uint8_t *ldt_table
;
3385 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3392 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3393 if (size
> bytecount
)
3395 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3397 return -TARGET_EFAULT
;
3398 /* ??? Should this by byteswapped? */
3399 memcpy(p
, ldt_table
, size
);
3400 unlock_user(p
, ptr
, size
);
3404 /* XXX: add locking support */
3405 static abi_long
write_ldt(CPUX86State
*env
,
3406 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3408 struct target_modify_ldt_ldt_s ldt_info
;
3409 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3410 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3411 int seg_not_present
, useable
, lm
;
3412 uint32_t *lp
, entry_1
, entry_2
;
3414 if (bytecount
!= sizeof(ldt_info
))
3415 return -TARGET_EINVAL
;
3416 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3417 return -TARGET_EFAULT
;
3418 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3419 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3420 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3421 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3422 unlock_user_struct(target_ldt_info
, ptr
, 0);
3424 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3425 return -TARGET_EINVAL
;
3426 seg_32bit
= ldt_info
.flags
& 1;
3427 contents
= (ldt_info
.flags
>> 1) & 3;
3428 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3429 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3430 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3431 useable
= (ldt_info
.flags
>> 6) & 1;
3435 lm
= (ldt_info
.flags
>> 7) & 1;
3437 if (contents
== 3) {
3439 return -TARGET_EINVAL
;
3440 if (seg_not_present
== 0)
3441 return -TARGET_EINVAL
;
3443 /* allocate the LDT */
3445 env
->ldt
.base
= target_mmap(0,
3446 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3447 PROT_READ
|PROT_WRITE
,
3448 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3449 if (env
->ldt
.base
== -1)
3450 return -TARGET_ENOMEM
;
3451 memset(g2h(env
->ldt
.base
), 0,
3452 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3453 env
->ldt
.limit
= 0xffff;
3454 ldt_table
= g2h(env
->ldt
.base
);
3457 /* NOTE: same code as Linux kernel */
3458 /* Allow LDTs to be cleared by the user. */
3459 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3462 read_exec_only
== 1 &&
3464 limit_in_pages
== 0 &&
3465 seg_not_present
== 1 &&
3473 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3474 (ldt_info
.limit
& 0x0ffff);
3475 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3476 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3477 (ldt_info
.limit
& 0xf0000) |
3478 ((read_exec_only
^ 1) << 9) |
3480 ((seg_not_present
^ 1) << 15) |
3482 (limit_in_pages
<< 23) |
3486 entry_2
|= (useable
<< 20);
3488 /* Install the new entry ... */
3490 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3491 lp
[0] = tswap32(entry_1
);
3492 lp
[1] = tswap32(entry_2
);
3496 /* specific and weird i386 syscalls */
3497 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3498 unsigned long bytecount
)
3504 ret
= read_ldt(ptr
, bytecount
);
3507 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3510 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3513 ret
= -TARGET_ENOSYS
;
3519 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3520 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3522 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3523 struct target_modify_ldt_ldt_s ldt_info
;
3524 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3525 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3526 int seg_not_present
, useable
, lm
;
3527 uint32_t *lp
, entry_1
, entry_2
;
3530 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3531 if (!target_ldt_info
)
3532 return -TARGET_EFAULT
;
3533 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3534 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3535 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3536 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3537 if (ldt_info
.entry_number
== -1) {
3538 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3539 if (gdt_table
[i
] == 0) {
3540 ldt_info
.entry_number
= i
;
3541 target_ldt_info
->entry_number
= tswap32(i
);
3546 unlock_user_struct(target_ldt_info
, ptr
, 1);
3548 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3549 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3550 return -TARGET_EINVAL
;
3551 seg_32bit
= ldt_info
.flags
& 1;
3552 contents
= (ldt_info
.flags
>> 1) & 3;
3553 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3554 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3555 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3556 useable
= (ldt_info
.flags
>> 6) & 1;
3560 lm
= (ldt_info
.flags
>> 7) & 1;
3563 if (contents
== 3) {
3564 if (seg_not_present
== 0)
3565 return -TARGET_EINVAL
;
3568 /* NOTE: same code as Linux kernel */
3569 /* Allow LDTs to be cleared by the user. */
3570 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3571 if ((contents
== 0 &&
3572 read_exec_only
== 1 &&
3574 limit_in_pages
== 0 &&
3575 seg_not_present
== 1 &&
3583 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3584 (ldt_info
.limit
& 0x0ffff);
3585 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3586 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3587 (ldt_info
.limit
& 0xf0000) |
3588 ((read_exec_only
^ 1) << 9) |
3590 ((seg_not_present
^ 1) << 15) |
3592 (limit_in_pages
<< 23) |
3597 /* Install the new entry ... */
3599 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3600 lp
[0] = tswap32(entry_1
);
3601 lp
[1] = tswap32(entry_2
);
3605 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3607 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3608 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3609 uint32_t base_addr
, limit
, flags
;
3610 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3611 int seg_not_present
, useable
, lm
;
3612 uint32_t *lp
, entry_1
, entry_2
;
3614 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3615 if (!target_ldt_info
)
3616 return -TARGET_EFAULT
;
3617 idx
= tswap32(target_ldt_info
->entry_number
);
3618 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3619 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3620 unlock_user_struct(target_ldt_info
, ptr
, 1);
3621 return -TARGET_EINVAL
;
3623 lp
= (uint32_t *)(gdt_table
+ idx
);
3624 entry_1
= tswap32(lp
[0]);
3625 entry_2
= tswap32(lp
[1]);
3627 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3628 contents
= (entry_2
>> 10) & 3;
3629 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3630 seg_32bit
= (entry_2
>> 22) & 1;
3631 limit_in_pages
= (entry_2
>> 23) & 1;
3632 useable
= (entry_2
>> 20) & 1;
3636 lm
= (entry_2
>> 21) & 1;
3638 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3639 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3640 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3641 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3642 base_addr
= (entry_1
>> 16) |
3643 (entry_2
& 0xff000000) |
3644 ((entry_2
& 0xff) << 16);
3645 target_ldt_info
->base_addr
= tswapl(base_addr
);
3646 target_ldt_info
->limit
= tswap32(limit
);
3647 target_ldt_info
->flags
= tswap32(flags
);
3648 unlock_user_struct(target_ldt_info
, ptr
, 1);
3651 #endif /* TARGET_I386 && TARGET_ABI32 */
3653 #ifndef TARGET_ABI32
3654 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3661 case TARGET_ARCH_SET_GS
:
3662 case TARGET_ARCH_SET_FS
:
3663 if (code
== TARGET_ARCH_SET_GS
)
3667 cpu_x86_load_seg(env
, idx
, 0);
3668 env
->segs
[idx
].base
= addr
;
3670 case TARGET_ARCH_GET_GS
:
3671 case TARGET_ARCH_GET_FS
:
3672 if (code
== TARGET_ARCH_GET_GS
)
3676 val
= env
->segs
[idx
].base
;
3677 if (put_user(val
, addr
, abi_ulong
))
3678 return -TARGET_EFAULT
;
3681 ret
= -TARGET_EINVAL
;
3688 #endif /* defined(TARGET_I386) */
3690 #if defined(CONFIG_USE_NPTL)
3692 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3694 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3697 pthread_mutex_t mutex
;
3698 pthread_cond_t cond
;
3701 abi_ulong child_tidptr
;
3702 abi_ulong parent_tidptr
;
3706 static void *clone_func(void *arg
)
3708 new_thread_info
*info
= arg
;
3714 ts
= (TaskState
*)thread_env
->opaque
;
3715 info
->tid
= gettid();
3716 env
->host_tid
= info
->tid
;
3718 if (info
->child_tidptr
)
3719 put_user_u32(info
->tid
, info
->child_tidptr
);
3720 if (info
->parent_tidptr
)
3721 put_user_u32(info
->tid
, info
->parent_tidptr
);
3722 /* Enable signals. */
3723 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3724 /* Signal to the parent that we're ready. */
3725 pthread_mutex_lock(&info
->mutex
);
3726 pthread_cond_broadcast(&info
->cond
);
3727 pthread_mutex_unlock(&info
->mutex
);
3728 /* Wait until the parent has finshed initializing the tls state. */
3729 pthread_mutex_lock(&clone_lock
);
3730 pthread_mutex_unlock(&clone_lock
);
3736 /* this stack is the equivalent of the kernel stack associated with a
3738 #define NEW_STACK_SIZE 8192
3740 static int clone_func(void *arg
)
3742 CPUState
*env
= arg
;
3749 /* do_fork() Must return host values and target errnos (unlike most
3750 do_*() functions). */
3751 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3752 abi_ulong parent_tidptr
, target_ulong newtls
,
3753 abi_ulong child_tidptr
)
3758 #if defined(CONFIG_USE_NPTL)
3759 unsigned int nptl_flags
;
3765 /* Emulate vfork() with fork() */
3766 if (flags
& CLONE_VFORK
)
3767 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3769 if (flags
& CLONE_VM
) {
3770 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3771 #if defined(CONFIG_USE_NPTL)
3772 new_thread_info info
;
3773 pthread_attr_t attr
;
3775 ts
= qemu_mallocz(sizeof(TaskState
));
3776 init_task_state(ts
);
3777 /* we create a new CPU instance. */
3778 new_env
= cpu_copy(env
);
3779 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3782 /* Init regs that differ from the parent. */
3783 cpu_clone_regs(new_env
, newsp
);
3784 new_env
->opaque
= ts
;
3785 ts
->bprm
= parent_ts
->bprm
;
3786 ts
->info
= parent_ts
->info
;
3787 #if defined(CONFIG_USE_NPTL)
3789 flags
&= ~CLONE_NPTL_FLAGS2
;
3791 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3792 ts
->child_tidptr
= child_tidptr
;
3795 if (nptl_flags
& CLONE_SETTLS
)
3796 cpu_set_tls (new_env
, newtls
);
3798 /* Grab a mutex so that thread setup appears atomic. */
3799 pthread_mutex_lock(&clone_lock
);
3801 memset(&info
, 0, sizeof(info
));
3802 pthread_mutex_init(&info
.mutex
, NULL
);
3803 pthread_mutex_lock(&info
.mutex
);
3804 pthread_cond_init(&info
.cond
, NULL
);
3806 if (nptl_flags
& CLONE_CHILD_SETTID
)
3807 info
.child_tidptr
= child_tidptr
;
3808 if (nptl_flags
& CLONE_PARENT_SETTID
)
3809 info
.parent_tidptr
= parent_tidptr
;
3811 ret
= pthread_attr_init(&attr
);
3812 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
3813 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
3814 /* It is not safe to deliver signals until the child has finished
3815 initializing, so temporarily block all signals. */
3816 sigfillset(&sigmask
);
3817 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3819 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3820 /* TODO: Free new CPU state if thread creation failed. */
3822 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3823 pthread_attr_destroy(&attr
);
3825 /* Wait for the child to initialize. */
3826 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3828 if (flags
& CLONE_PARENT_SETTID
)
3829 put_user_u32(ret
, parent_tidptr
);
3833 pthread_mutex_unlock(&info
.mutex
);
3834 pthread_cond_destroy(&info
.cond
);
3835 pthread_mutex_destroy(&info
.mutex
);
3836 pthread_mutex_unlock(&clone_lock
);
3838 if (flags
& CLONE_NPTL_FLAGS2
)
3840 /* This is probably going to die very quickly, but do it anyway. */
3841 new_stack
= qemu_mallocz (NEW_STACK_SIZE
);
3843 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
3845 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3849 /* if no CLONE_VM, we consider it is a fork */
3850 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3855 /* Child Process. */
3856 cpu_clone_regs(env
, newsp
);
3858 #if defined(CONFIG_USE_NPTL)
3859 /* There is a race condition here. The parent process could
3860 theoretically read the TID in the child process before the child
3861 tid is set. This would require using either ptrace
3862 (not implemented) or having *_tidptr to point at a shared memory
3863 mapping. We can't repeat the spinlock hack used above because
3864 the child process gets its own copy of the lock. */
3865 if (flags
& CLONE_CHILD_SETTID
)
3866 put_user_u32(gettid(), child_tidptr
);
3867 if (flags
& CLONE_PARENT_SETTID
)
3868 put_user_u32(gettid(), parent_tidptr
);
3869 ts
= (TaskState
*)env
->opaque
;
3870 if (flags
& CLONE_SETTLS
)
3871 cpu_set_tls (env
, newtls
);
3872 if (flags
& CLONE_CHILD_CLEARTID
)
3873 ts
->child_tidptr
= child_tidptr
;
3882 /* warning : doesn't handle linux specific flags... */
3883 static int target_to_host_fcntl_cmd(int cmd
)
3886 case TARGET_F_DUPFD
:
3887 case TARGET_F_GETFD
:
3888 case TARGET_F_SETFD
:
3889 case TARGET_F_GETFL
:
3890 case TARGET_F_SETFL
:
3892 case TARGET_F_GETLK
:
3894 case TARGET_F_SETLK
:
3896 case TARGET_F_SETLKW
:
3898 case TARGET_F_GETOWN
:
3900 case TARGET_F_SETOWN
:
3902 case TARGET_F_GETSIG
:
3904 case TARGET_F_SETSIG
:
3906 #if TARGET_ABI_BITS == 32
3907 case TARGET_F_GETLK64
:
3909 case TARGET_F_SETLK64
:
3911 case TARGET_F_SETLKW64
:
3914 case TARGET_F_SETLEASE
:
3916 case TARGET_F_GETLEASE
:
3918 #ifdef F_DUPFD_CLOEXEC
3919 case TARGET_F_DUPFD_CLOEXEC
:
3920 return F_DUPFD_CLOEXEC
;
3922 case TARGET_F_NOTIFY
:
3925 return -TARGET_EINVAL
;
3927 return -TARGET_EINVAL
;
3930 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3933 struct target_flock
*target_fl
;
3934 struct flock64 fl64
;
3935 struct target_flock64
*target_fl64
;
3937 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
3939 if (host_cmd
== -TARGET_EINVAL
)
3943 case TARGET_F_GETLK
:
3944 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3945 return -TARGET_EFAULT
;
3946 fl
.l_type
= tswap16(target_fl
->l_type
);
3947 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3948 fl
.l_start
= tswapl(target_fl
->l_start
);
3949 fl
.l_len
= tswapl(target_fl
->l_len
);
3950 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3951 unlock_user_struct(target_fl
, arg
, 0);
3952 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3954 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3955 return -TARGET_EFAULT
;
3956 target_fl
->l_type
= tswap16(fl
.l_type
);
3957 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3958 target_fl
->l_start
= tswapl(fl
.l_start
);
3959 target_fl
->l_len
= tswapl(fl
.l_len
);
3960 target_fl
->l_pid
= tswap32(fl
.l_pid
);
3961 unlock_user_struct(target_fl
, arg
, 1);
3965 case TARGET_F_SETLK
:
3966 case TARGET_F_SETLKW
:
3967 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3968 return -TARGET_EFAULT
;
3969 fl
.l_type
= tswap16(target_fl
->l_type
);
3970 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3971 fl
.l_start
= tswapl(target_fl
->l_start
);
3972 fl
.l_len
= tswapl(target_fl
->l_len
);
3973 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3974 unlock_user_struct(target_fl
, arg
, 0);
3975 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3978 case TARGET_F_GETLK64
:
3979 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3980 return -TARGET_EFAULT
;
3981 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3982 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3983 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3984 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3985 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3986 unlock_user_struct(target_fl64
, arg
, 0);
3987 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3989 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3990 return -TARGET_EFAULT
;
3991 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3992 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3993 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3994 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3995 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
3996 unlock_user_struct(target_fl64
, arg
, 1);
3999 case TARGET_F_SETLK64
:
4000 case TARGET_F_SETLKW64
:
4001 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4002 return -TARGET_EFAULT
;
4003 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4004 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4005 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4006 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4007 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4008 unlock_user_struct(target_fl64
, arg
, 0);
4009 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4012 case TARGET_F_GETFL
:
4013 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4015 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4019 case TARGET_F_SETFL
:
4020 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4023 case TARGET_F_SETOWN
:
4024 case TARGET_F_GETOWN
:
4025 case TARGET_F_SETSIG
:
4026 case TARGET_F_GETSIG
:
4027 case TARGET_F_SETLEASE
:
4028 case TARGET_F_GETLEASE
:
4029 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4033 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4041 static inline int high2lowuid(int uid
)
4049 static inline int high2lowgid(int gid
)
4057 static inline int low2highuid(int uid
)
4059 if ((int16_t)uid
== -1)
4065 static inline int low2highgid(int gid
)
4067 if ((int16_t)gid
== -1)
4073 #endif /* USE_UID16 */
4075 void syscall_init(void)
4078 const argtype
*arg_type
;
4082 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4083 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4084 #include "syscall_types.h"
4086 #undef STRUCT_SPECIAL
4088 /* we patch the ioctl size if necessary. We rely on the fact that
4089 no ioctl has all the bits at '1' in the size field */
4091 while (ie
->target_cmd
!= 0) {
4092 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4093 TARGET_IOC_SIZEMASK
) {
4094 arg_type
= ie
->arg_type
;
4095 if (arg_type
[0] != TYPE_PTR
) {
4096 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4101 size
= thunk_type_size(arg_type
, 0);
4102 ie
->target_cmd
= (ie
->target_cmd
&
4103 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4104 (size
<< TARGET_IOC_SIZESHIFT
);
4107 /* Build target_to_host_errno_table[] table from
4108 * host_to_target_errno_table[]. */
4109 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4110 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4112 /* automatic consistency check if same arch */
4113 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4114 (defined(__x86_64__) && defined(TARGET_X86_64))
4115 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4116 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4117 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4124 #if TARGET_ABI_BITS == 32
4125 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4127 #ifdef TARGET_WORDS_BIGENDIAN
4128 return ((uint64_t)word0
<< 32) | word1
;
4130 return ((uint64_t)word1
<< 32) | word0
;
4133 #else /* TARGET_ABI_BITS == 32 */
4134 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4138 #endif /* TARGET_ABI_BITS != 32 */
4140 #ifdef TARGET_NR_truncate64
4141 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4147 if (((CPUARMState
*)cpu_env
)->eabi
)
4153 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4157 #ifdef TARGET_NR_ftruncate64
4158 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4164 if (((CPUARMState
*)cpu_env
)->eabi
)
4170 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4174 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4175 abi_ulong target_addr
)
4177 struct target_timespec
*target_ts
;
4179 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4180 return -TARGET_EFAULT
;
4181 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
4182 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
4183 unlock_user_struct(target_ts
, target_addr
, 0);
4187 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4188 struct timespec
*host_ts
)
4190 struct target_timespec
*target_ts
;
4192 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4193 return -TARGET_EFAULT
;
4194 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
4195 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
4196 unlock_user_struct(target_ts
, target_addr
, 1);
4200 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4201 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4202 abi_ulong target_addr
,
4203 struct stat
*host_st
)
4206 if (((CPUARMState
*)cpu_env
)->eabi
) {
4207 struct target_eabi_stat64
*target_st
;
4209 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4210 return -TARGET_EFAULT
;
4211 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4212 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4213 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4214 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4215 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4217 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4218 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4219 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4220 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4221 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4222 __put_user(host_st
->st_size
, &target_st
->st_size
);
4223 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4224 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4225 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4226 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4227 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4228 unlock_user_struct(target_st
, target_addr
, 1);
4232 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4233 struct target_stat
*target_st
;
4235 struct target_stat64
*target_st
;
4238 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4239 return -TARGET_EFAULT
;
4240 memset(target_st
, 0, sizeof(*target_st
));
4241 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4242 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4243 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4244 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4246 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4247 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4248 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4249 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4250 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4251 /* XXX: better use of kernel struct */
4252 __put_user(host_st
->st_size
, &target_st
->st_size
);
4253 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4254 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4255 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4256 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4257 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4258 unlock_user_struct(target_st
, target_addr
, 1);
4265 #if defined(CONFIG_USE_NPTL)
4266 /* ??? Using host futex calls even when target atomic operations
4267 are not really atomic probably breaks things. However implementing
4268 futexes locally would make futexes shared between multiple processes
4269 tricky. However they're probably useless because guest atomic
4270 operations won't work either. */
4271 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4272 target_ulong uaddr2
, int val3
)
4274 struct timespec ts
, *pts
;
4277 /* ??? We assume FUTEX_* constants are the same on both host
4279 #ifdef FUTEX_CMD_MASK
4280 base_op
= op
& FUTEX_CMD_MASK
;
4288 target_to_host_timespec(pts
, timeout
);
4292 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4295 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4297 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4299 case FUTEX_CMP_REQUEUE
:
4301 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4302 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4303 But the prototype takes a `struct timespec *'; insert casts
4304 to satisfy the compiler. We do not need to tswap TIMEOUT
4305 since it's not compared to guest memory. */
4306 pts
= (struct timespec
*)(uintptr_t) timeout
;
4307 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4309 (base_op
== FUTEX_CMP_REQUEUE
4313 return -TARGET_ENOSYS
;
4318 /* Map host to target signal numbers for the wait family of syscalls.
4319 Assume all other status bits are the same. */
4320 static int host_to_target_waitstatus(int status
)
4322 if (WIFSIGNALED(status
)) {
4323 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4325 if (WIFSTOPPED(status
)) {
4326 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4332 int get_osversion(void)
4334 static int osversion
;
4335 struct new_utsname buf
;
4340 if (qemu_uname_release
&& *qemu_uname_release
) {
4341 s
= qemu_uname_release
;
4343 if (sys_uname(&buf
))
4348 for (i
= 0; i
< 3; i
++) {
4350 while (*s
>= '0' && *s
<= '9') {
4355 tmp
= (tmp
<< 8) + n
;
4363 /* do_syscall() should always have a single exit point at the end so
4364 that actions, such as logging of syscall results, can be performed.
4365 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4366 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4367 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4368 abi_long arg5
, abi_long arg6
)
4376 gemu_log("syscall %d", num
);
4379 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4382 case TARGET_NR_exit
:
4383 #ifdef CONFIG_USE_NPTL
4384 /* In old applications this may be used to implement _exit(2).
4385 However in threaded applictions it is used for thread termination,
4386 and _exit_group is used for application termination.
4387 Do thread termination if we have more then one thread. */
4388 /* FIXME: This probably breaks if a signal arrives. We should probably
4389 be disabling signals. */
4390 if (first_cpu
->next_cpu
) {
4398 while (p
&& p
!= (CPUState
*)cpu_env
) {
4399 lastp
= &p
->next_cpu
;
4402 /* If we didn't find the CPU for this thread then something is
4406 /* Remove the CPU from the list. */
4407 *lastp
= p
->next_cpu
;
4409 ts
= ((CPUState
*)cpu_env
)->opaque
;
4410 if (ts
->child_tidptr
) {
4411 put_user_u32(0, ts
->child_tidptr
);
4412 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4424 gdb_exit(cpu_env
, arg1
);
4426 ret
= 0; /* avoid warning */
4428 case TARGET_NR_read
:
4432 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4434 ret
= get_errno(read(arg1
, p
, arg3
));
4435 unlock_user(p
, arg2
, ret
);
4438 case TARGET_NR_write
:
4439 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4441 ret
= get_errno(write(arg1
, p
, arg3
));
4442 unlock_user(p
, arg2
, 0);
4444 case TARGET_NR_open
:
4445 if (!(p
= lock_user_string(arg1
)))
4447 ret
= get_errno(open(path(p
),
4448 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4450 unlock_user(p
, arg1
, 0);
4452 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4453 case TARGET_NR_openat
:
4454 if (!(p
= lock_user_string(arg2
)))
4456 ret
= get_errno(sys_openat(arg1
,
4458 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4460 unlock_user(p
, arg2
, 0);
4463 case TARGET_NR_close
:
4464 ret
= get_errno(close(arg1
));
4469 case TARGET_NR_fork
:
4470 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4472 #ifdef TARGET_NR_waitpid
4473 case TARGET_NR_waitpid
:
4476 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4477 if (!is_error(ret
) && arg2
4478 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4483 #ifdef TARGET_NR_waitid
4484 case TARGET_NR_waitid
:
4488 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4489 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4490 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4492 host_to_target_siginfo(p
, &info
);
4493 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4498 #ifdef TARGET_NR_creat /* not on alpha */
4499 case TARGET_NR_creat
:
4500 if (!(p
= lock_user_string(arg1
)))
4502 ret
= get_errno(creat(p
, arg2
));
4503 unlock_user(p
, arg1
, 0);
4506 case TARGET_NR_link
:
4509 p
= lock_user_string(arg1
);
4510 p2
= lock_user_string(arg2
);
4512 ret
= -TARGET_EFAULT
;
4514 ret
= get_errno(link(p
, p2
));
4515 unlock_user(p2
, arg2
, 0);
4516 unlock_user(p
, arg1
, 0);
4519 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4520 case TARGET_NR_linkat
:
4525 p
= lock_user_string(arg2
);
4526 p2
= lock_user_string(arg4
);
4528 ret
= -TARGET_EFAULT
;
4530 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4531 unlock_user(p
, arg2
, 0);
4532 unlock_user(p2
, arg4
, 0);
4536 case TARGET_NR_unlink
:
4537 if (!(p
= lock_user_string(arg1
)))
4539 ret
= get_errno(unlink(p
));
4540 unlock_user(p
, arg1
, 0);
4542 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4543 case TARGET_NR_unlinkat
:
4544 if (!(p
= lock_user_string(arg2
)))
4546 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4547 unlock_user(p
, arg2
, 0);
4550 case TARGET_NR_execve
:
4552 char **argp
, **envp
;
4555 abi_ulong guest_argp
;
4556 abi_ulong guest_envp
;
4562 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4563 if (get_user_ual(addr
, gp
))
4571 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4572 if (get_user_ual(addr
, gp
))
4579 argp
= alloca((argc
+ 1) * sizeof(void *));
4580 envp
= alloca((envc
+ 1) * sizeof(void *));
4582 for (gp
= guest_argp
, q
= argp
; gp
;
4583 gp
+= sizeof(abi_ulong
), q
++) {
4584 if (get_user_ual(addr
, gp
))
4588 if (!(*q
= lock_user_string(addr
)))
4593 for (gp
= guest_envp
, q
= envp
; gp
;
4594 gp
+= sizeof(abi_ulong
), q
++) {
4595 if (get_user_ual(addr
, gp
))
4599 if (!(*q
= lock_user_string(addr
)))
4604 if (!(p
= lock_user_string(arg1
)))
4606 ret
= get_errno(execve(p
, argp
, envp
));
4607 unlock_user(p
, arg1
, 0);
4612 ret
= -TARGET_EFAULT
;
4615 for (gp
= guest_argp
, q
= argp
; *q
;
4616 gp
+= sizeof(abi_ulong
), q
++) {
4617 if (get_user_ual(addr
, gp
)
4620 unlock_user(*q
, addr
, 0);
4622 for (gp
= guest_envp
, q
= envp
; *q
;
4623 gp
+= sizeof(abi_ulong
), q
++) {
4624 if (get_user_ual(addr
, gp
)
4627 unlock_user(*q
, addr
, 0);
4631 case TARGET_NR_chdir
:
4632 if (!(p
= lock_user_string(arg1
)))
4634 ret
= get_errno(chdir(p
));
4635 unlock_user(p
, arg1
, 0);
4637 #ifdef TARGET_NR_time
4638 case TARGET_NR_time
:
4641 ret
= get_errno(time(&host_time
));
4644 && put_user_sal(host_time
, arg1
))
4649 case TARGET_NR_mknod
:
4650 if (!(p
= lock_user_string(arg1
)))
4652 ret
= get_errno(mknod(p
, arg2
, arg3
));
4653 unlock_user(p
, arg1
, 0);
4655 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4656 case TARGET_NR_mknodat
:
4657 if (!(p
= lock_user_string(arg2
)))
4659 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4660 unlock_user(p
, arg2
, 0);
4663 case TARGET_NR_chmod
:
4664 if (!(p
= lock_user_string(arg1
)))
4666 ret
= get_errno(chmod(p
, arg2
));
4667 unlock_user(p
, arg1
, 0);
4669 #ifdef TARGET_NR_break
4670 case TARGET_NR_break
:
4673 #ifdef TARGET_NR_oldstat
4674 case TARGET_NR_oldstat
:
4677 case TARGET_NR_lseek
:
4678 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4680 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4681 /* Alpha specific */
4682 case TARGET_NR_getxpid
:
4683 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4684 ret
= get_errno(getpid());
4687 #ifdef TARGET_NR_getpid
4688 case TARGET_NR_getpid
:
4689 ret
= get_errno(getpid());
4692 case TARGET_NR_mount
:
4694 /* need to look at the data field */
4696 p
= lock_user_string(arg1
);
4697 p2
= lock_user_string(arg2
);
4698 p3
= lock_user_string(arg3
);
4699 if (!p
|| !p2
|| !p3
)
4700 ret
= -TARGET_EFAULT
;
4702 /* FIXME - arg5 should be locked, but it isn't clear how to
4703 * do that since it's not guaranteed to be a NULL-terminated
4707 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4709 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4711 unlock_user(p
, arg1
, 0);
4712 unlock_user(p2
, arg2
, 0);
4713 unlock_user(p3
, arg3
, 0);
4716 #ifdef TARGET_NR_umount
4717 case TARGET_NR_umount
:
4718 if (!(p
= lock_user_string(arg1
)))
4720 ret
= get_errno(umount(p
));
4721 unlock_user(p
, arg1
, 0);
4724 #ifdef TARGET_NR_stime /* not on alpha */
4725 case TARGET_NR_stime
:
4728 if (get_user_sal(host_time
, arg1
))
4730 ret
= get_errno(stime(&host_time
));
4734 case TARGET_NR_ptrace
:
4736 #ifdef TARGET_NR_alarm /* not on alpha */
4737 case TARGET_NR_alarm
:
4741 #ifdef TARGET_NR_oldfstat
4742 case TARGET_NR_oldfstat
:
4745 #ifdef TARGET_NR_pause /* not on alpha */
4746 case TARGET_NR_pause
:
4747 ret
= get_errno(pause());
4750 #ifdef TARGET_NR_utime
4751 case TARGET_NR_utime
:
4753 struct utimbuf tbuf
, *host_tbuf
;
4754 struct target_utimbuf
*target_tbuf
;
4756 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4758 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4759 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4760 unlock_user_struct(target_tbuf
, arg2
, 0);
4765 if (!(p
= lock_user_string(arg1
)))
4767 ret
= get_errno(utime(p
, host_tbuf
));
4768 unlock_user(p
, arg1
, 0);
4772 case TARGET_NR_utimes
:
4774 struct timeval
*tvp
, tv
[2];
4776 if (copy_from_user_timeval(&tv
[0], arg2
)
4777 || copy_from_user_timeval(&tv
[1],
4778 arg2
+ sizeof(struct target_timeval
)))
4784 if (!(p
= lock_user_string(arg1
)))
4786 ret
= get_errno(utimes(p
, tvp
));
4787 unlock_user(p
, arg1
, 0);
4790 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4791 case TARGET_NR_futimesat
:
4793 struct timeval
*tvp
, tv
[2];
4795 if (copy_from_user_timeval(&tv
[0], arg3
)
4796 || copy_from_user_timeval(&tv
[1],
4797 arg3
+ sizeof(struct target_timeval
)))
4803 if (!(p
= lock_user_string(arg2
)))
4805 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4806 unlock_user(p
, arg2
, 0);
4810 #ifdef TARGET_NR_stty
4811 case TARGET_NR_stty
:
4814 #ifdef TARGET_NR_gtty
4815 case TARGET_NR_gtty
:
4818 case TARGET_NR_access
:
4819 if (!(p
= lock_user_string(arg1
)))
4821 ret
= get_errno(access(path(p
), arg2
));
4822 unlock_user(p
, arg1
, 0);
4824 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4825 case TARGET_NR_faccessat
:
4826 if (!(p
= lock_user_string(arg2
)))
4828 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4829 unlock_user(p
, arg2
, 0);
4832 #ifdef TARGET_NR_nice /* not on alpha */
4833 case TARGET_NR_nice
:
4834 ret
= get_errno(nice(arg1
));
4837 #ifdef TARGET_NR_ftime
4838 case TARGET_NR_ftime
:
4841 case TARGET_NR_sync
:
4845 case TARGET_NR_kill
:
4846 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4848 case TARGET_NR_rename
:
4851 p
= lock_user_string(arg1
);
4852 p2
= lock_user_string(arg2
);
4854 ret
= -TARGET_EFAULT
;
4856 ret
= get_errno(rename(p
, p2
));
4857 unlock_user(p2
, arg2
, 0);
4858 unlock_user(p
, arg1
, 0);
4861 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4862 case TARGET_NR_renameat
:
4865 p
= lock_user_string(arg2
);
4866 p2
= lock_user_string(arg4
);
4868 ret
= -TARGET_EFAULT
;
4870 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4871 unlock_user(p2
, arg4
, 0);
4872 unlock_user(p
, arg2
, 0);
4876 case TARGET_NR_mkdir
:
4877 if (!(p
= lock_user_string(arg1
)))
4879 ret
= get_errno(mkdir(p
, arg2
));
4880 unlock_user(p
, arg1
, 0);
4882 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4883 case TARGET_NR_mkdirat
:
4884 if (!(p
= lock_user_string(arg2
)))
4886 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4887 unlock_user(p
, arg2
, 0);
4890 case TARGET_NR_rmdir
:
4891 if (!(p
= lock_user_string(arg1
)))
4893 ret
= get_errno(rmdir(p
));
4894 unlock_user(p
, arg1
, 0);
4897 ret
= get_errno(dup(arg1
));
4899 case TARGET_NR_pipe
:
4900 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
4902 #ifdef TARGET_NR_pipe2
4903 case TARGET_NR_pipe2
:
4904 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
4907 case TARGET_NR_times
:
4909 struct target_tms
*tmsp
;
4911 ret
= get_errno(times(&tms
));
4913 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4916 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4917 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4918 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4919 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4922 ret
= host_to_target_clock_t(ret
);
4925 #ifdef TARGET_NR_prof
4926 case TARGET_NR_prof
:
4929 #ifdef TARGET_NR_signal
4930 case TARGET_NR_signal
:
4933 case TARGET_NR_acct
:
4935 ret
= get_errno(acct(NULL
));
4937 if (!(p
= lock_user_string(arg1
)))
4939 ret
= get_errno(acct(path(p
)));
4940 unlock_user(p
, arg1
, 0);
4943 #ifdef TARGET_NR_umount2 /* not on alpha */
4944 case TARGET_NR_umount2
:
4945 if (!(p
= lock_user_string(arg1
)))
4947 ret
= get_errno(umount2(p
, arg2
));
4948 unlock_user(p
, arg1
, 0);
4951 #ifdef TARGET_NR_lock
4952 case TARGET_NR_lock
:
4955 case TARGET_NR_ioctl
:
4956 ret
= do_ioctl(arg1
, arg2
, arg3
);
4958 case TARGET_NR_fcntl
:
4959 ret
= do_fcntl(arg1
, arg2
, arg3
);
4961 #ifdef TARGET_NR_mpx
4965 case TARGET_NR_setpgid
:
4966 ret
= get_errno(setpgid(arg1
, arg2
));
4968 #ifdef TARGET_NR_ulimit
4969 case TARGET_NR_ulimit
:
4972 #ifdef TARGET_NR_oldolduname
4973 case TARGET_NR_oldolduname
:
4976 case TARGET_NR_umask
:
4977 ret
= get_errno(umask(arg1
));
4979 case TARGET_NR_chroot
:
4980 if (!(p
= lock_user_string(arg1
)))
4982 ret
= get_errno(chroot(p
));
4983 unlock_user(p
, arg1
, 0);
4985 case TARGET_NR_ustat
:
4987 case TARGET_NR_dup2
:
4988 ret
= get_errno(dup2(arg1
, arg2
));
4990 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
4991 case TARGET_NR_dup3
:
4992 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
4995 #ifdef TARGET_NR_getppid /* not on alpha */
4996 case TARGET_NR_getppid
:
4997 ret
= get_errno(getppid());
5000 case TARGET_NR_getpgrp
:
5001 ret
= get_errno(getpgrp());
5003 case TARGET_NR_setsid
:
5004 ret
= get_errno(setsid());
5006 #ifdef TARGET_NR_sigaction
5007 case TARGET_NR_sigaction
:
5009 #if defined(TARGET_ALPHA)
5010 struct target_sigaction act
, oact
, *pact
= 0;
5011 struct target_old_sigaction
*old_act
;
5013 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5015 act
._sa_handler
= old_act
->_sa_handler
;
5016 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5017 act
.sa_flags
= old_act
->sa_flags
;
5018 act
.sa_restorer
= 0;
5019 unlock_user_struct(old_act
, arg2
, 0);
5022 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5023 if (!is_error(ret
) && arg3
) {
5024 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5026 old_act
->_sa_handler
= oact
._sa_handler
;
5027 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5028 old_act
->sa_flags
= oact
.sa_flags
;
5029 unlock_user_struct(old_act
, arg3
, 1);
5031 #elif defined(TARGET_MIPS)
5032 struct target_sigaction act
, oact
, *pact
, *old_act
;
5035 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5037 act
._sa_handler
= old_act
->_sa_handler
;
5038 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5039 act
.sa_flags
= old_act
->sa_flags
;
5040 unlock_user_struct(old_act
, arg2
, 0);
5046 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5048 if (!is_error(ret
) && arg3
) {
5049 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5051 old_act
->_sa_handler
= oact
._sa_handler
;
5052 old_act
->sa_flags
= oact
.sa_flags
;
5053 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5054 old_act
->sa_mask
.sig
[1] = 0;
5055 old_act
->sa_mask
.sig
[2] = 0;
5056 old_act
->sa_mask
.sig
[3] = 0;
5057 unlock_user_struct(old_act
, arg3
, 1);
5060 struct target_old_sigaction
*old_act
;
5061 struct target_sigaction act
, oact
, *pact
;
5063 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5065 act
._sa_handler
= old_act
->_sa_handler
;
5066 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5067 act
.sa_flags
= old_act
->sa_flags
;
5068 act
.sa_restorer
= old_act
->sa_restorer
;
5069 unlock_user_struct(old_act
, arg2
, 0);
5074 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5075 if (!is_error(ret
) && arg3
) {
5076 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5078 old_act
->_sa_handler
= oact
._sa_handler
;
5079 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5080 old_act
->sa_flags
= oact
.sa_flags
;
5081 old_act
->sa_restorer
= oact
.sa_restorer
;
5082 unlock_user_struct(old_act
, arg3
, 1);
5088 case TARGET_NR_rt_sigaction
:
5090 #if defined(TARGET_ALPHA)
5091 struct target_sigaction act
, oact
, *pact
= 0;
5092 struct target_rt_sigaction
*rt_act
;
5093 /* ??? arg4 == sizeof(sigset_t). */
5095 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5097 act
._sa_handler
= rt_act
->_sa_handler
;
5098 act
.sa_mask
= rt_act
->sa_mask
;
5099 act
.sa_flags
= rt_act
->sa_flags
;
5100 act
.sa_restorer
= arg5
;
5101 unlock_user_struct(rt_act
, arg2
, 0);
5104 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5105 if (!is_error(ret
) && arg3
) {
5106 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5108 rt_act
->_sa_handler
= oact
._sa_handler
;
5109 rt_act
->sa_mask
= oact
.sa_mask
;
5110 rt_act
->sa_flags
= oact
.sa_flags
;
5111 unlock_user_struct(rt_act
, arg3
, 1);
5114 struct target_sigaction
*act
;
5115 struct target_sigaction
*oact
;
5118 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5123 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5124 ret
= -TARGET_EFAULT
;
5125 goto rt_sigaction_fail
;
5129 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5132 unlock_user_struct(act
, arg2
, 0);
5134 unlock_user_struct(oact
, arg3
, 1);
5138 #ifdef TARGET_NR_sgetmask /* not on alpha */
5139 case TARGET_NR_sgetmask
:
5142 abi_ulong target_set
;
5143 sigprocmask(0, NULL
, &cur_set
);
5144 host_to_target_old_sigset(&target_set
, &cur_set
);
5149 #ifdef TARGET_NR_ssetmask /* not on alpha */
5150 case TARGET_NR_ssetmask
:
5152 sigset_t set
, oset
, cur_set
;
5153 abi_ulong target_set
= arg1
;
5154 sigprocmask(0, NULL
, &cur_set
);
5155 target_to_host_old_sigset(&set
, &target_set
);
5156 sigorset(&set
, &set
, &cur_set
);
5157 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5158 host_to_target_old_sigset(&target_set
, &oset
);
5163 #ifdef TARGET_NR_sigprocmask
5164 case TARGET_NR_sigprocmask
:
5166 #if defined(TARGET_ALPHA)
5167 sigset_t set
, oldset
;
5172 case TARGET_SIG_BLOCK
:
5175 case TARGET_SIG_UNBLOCK
:
5178 case TARGET_SIG_SETMASK
:
5182 ret
= -TARGET_EINVAL
;
5186 target_to_host_old_sigset(&set
, &mask
);
5188 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5190 if (!is_error(ret
)) {
5191 host_to_target_old_sigset(&mask
, &oldset
);
5193 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5196 sigset_t set
, oldset
, *set_ptr
;
5201 case TARGET_SIG_BLOCK
:
5204 case TARGET_SIG_UNBLOCK
:
5207 case TARGET_SIG_SETMASK
:
5211 ret
= -TARGET_EINVAL
;
5214 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5216 target_to_host_old_sigset(&set
, p
);
5217 unlock_user(p
, arg2
, 0);
5223 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5224 if (!is_error(ret
) && arg3
) {
5225 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5227 host_to_target_old_sigset(p
, &oldset
);
5228 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5234 case TARGET_NR_rt_sigprocmask
:
5237 sigset_t set
, oldset
, *set_ptr
;
5241 case TARGET_SIG_BLOCK
:
5244 case TARGET_SIG_UNBLOCK
:
5247 case TARGET_SIG_SETMASK
:
5251 ret
= -TARGET_EINVAL
;
5254 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5256 target_to_host_sigset(&set
, p
);
5257 unlock_user(p
, arg2
, 0);
5263 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5264 if (!is_error(ret
) && arg3
) {
5265 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5267 host_to_target_sigset(p
, &oldset
);
5268 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5272 #ifdef TARGET_NR_sigpending
5273 case TARGET_NR_sigpending
:
5276 ret
= get_errno(sigpending(&set
));
5277 if (!is_error(ret
)) {
5278 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5280 host_to_target_old_sigset(p
, &set
);
5281 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5286 case TARGET_NR_rt_sigpending
:
5289 ret
= get_errno(sigpending(&set
));
5290 if (!is_error(ret
)) {
5291 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5293 host_to_target_sigset(p
, &set
);
5294 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5298 #ifdef TARGET_NR_sigsuspend
5299 case TARGET_NR_sigsuspend
:
5302 #if defined(TARGET_ALPHA)
5303 abi_ulong mask
= arg1
;
5304 target_to_host_old_sigset(&set
, &mask
);
5306 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5308 target_to_host_old_sigset(&set
, p
);
5309 unlock_user(p
, arg1
, 0);
5311 ret
= get_errno(sigsuspend(&set
));
5315 case TARGET_NR_rt_sigsuspend
:
5318 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5320 target_to_host_sigset(&set
, p
);
5321 unlock_user(p
, arg1
, 0);
5322 ret
= get_errno(sigsuspend(&set
));
5325 case TARGET_NR_rt_sigtimedwait
:
5328 struct timespec uts
, *puts
;
5331 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5333 target_to_host_sigset(&set
, p
);
5334 unlock_user(p
, arg1
, 0);
5337 target_to_host_timespec(puts
, arg3
);
5341 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5342 if (!is_error(ret
) && arg2
) {
5343 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5345 host_to_target_siginfo(p
, &uinfo
);
5346 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5350 case TARGET_NR_rt_sigqueueinfo
:
5353 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5355 target_to_host_siginfo(&uinfo
, p
);
5356 unlock_user(p
, arg1
, 0);
5357 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5360 #ifdef TARGET_NR_sigreturn
5361 case TARGET_NR_sigreturn
:
5362 /* NOTE: ret is eax, so not transcoding must be done */
5363 ret
= do_sigreturn(cpu_env
);
5366 case TARGET_NR_rt_sigreturn
:
5367 /* NOTE: ret is eax, so not transcoding must be done */
5368 ret
= do_rt_sigreturn(cpu_env
);
5370 case TARGET_NR_sethostname
:
5371 if (!(p
= lock_user_string(arg1
)))
5373 ret
= get_errno(sethostname(p
, arg2
));
5374 unlock_user(p
, arg1
, 0);
5376 case TARGET_NR_setrlimit
:
5378 int resource
= arg1
;
5379 struct target_rlimit
*target_rlim
;
5381 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5383 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5384 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5385 unlock_user_struct(target_rlim
, arg2
, 0);
5386 ret
= get_errno(setrlimit(resource
, &rlim
));
5389 case TARGET_NR_getrlimit
:
5391 int resource
= arg1
;
5392 struct target_rlimit
*target_rlim
;
5395 ret
= get_errno(getrlimit(resource
, &rlim
));
5396 if (!is_error(ret
)) {
5397 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5399 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5400 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5401 unlock_user_struct(target_rlim
, arg2
, 1);
5405 case TARGET_NR_getrusage
:
5407 struct rusage rusage
;
5408 ret
= get_errno(getrusage(arg1
, &rusage
));
5409 if (!is_error(ret
)) {
5410 host_to_target_rusage(arg2
, &rusage
);
5414 case TARGET_NR_gettimeofday
:
5417 ret
= get_errno(gettimeofday(&tv
, NULL
));
5418 if (!is_error(ret
)) {
5419 if (copy_to_user_timeval(arg1
, &tv
))
5424 case TARGET_NR_settimeofday
:
5427 if (copy_from_user_timeval(&tv
, arg1
))
5429 ret
= get_errno(settimeofday(&tv
, NULL
));
5432 #ifdef TARGET_NR_select
5433 case TARGET_NR_select
:
5435 struct target_sel_arg_struct
*sel
;
5436 abi_ulong inp
, outp
, exp
, tvp
;
5439 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5441 nsel
= tswapl(sel
->n
);
5442 inp
= tswapl(sel
->inp
);
5443 outp
= tswapl(sel
->outp
);
5444 exp
= tswapl(sel
->exp
);
5445 tvp
= tswapl(sel
->tvp
);
5446 unlock_user_struct(sel
, arg1
, 0);
5447 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5451 #ifdef TARGET_NR_pselect6
5452 case TARGET_NR_pselect6
:
5453 goto unimplemented_nowarn
;
5455 case TARGET_NR_symlink
:
5458 p
= lock_user_string(arg1
);
5459 p2
= lock_user_string(arg2
);
5461 ret
= -TARGET_EFAULT
;
5463 ret
= get_errno(symlink(p
, p2
));
5464 unlock_user(p2
, arg2
, 0);
5465 unlock_user(p
, arg1
, 0);
5468 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5469 case TARGET_NR_symlinkat
:
5472 p
= lock_user_string(arg1
);
5473 p2
= lock_user_string(arg3
);
5475 ret
= -TARGET_EFAULT
;
5477 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5478 unlock_user(p2
, arg3
, 0);
5479 unlock_user(p
, arg1
, 0);
5483 #ifdef TARGET_NR_oldlstat
5484 case TARGET_NR_oldlstat
:
5487 case TARGET_NR_readlink
:
5490 p
= lock_user_string(arg1
);
5491 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5493 ret
= -TARGET_EFAULT
;
5495 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5496 char real
[PATH_MAX
];
5497 temp
= realpath(exec_path
,real
);
5498 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5499 snprintf((char *)p2
, arg3
, "%s", real
);
5502 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5504 unlock_user(p2
, arg2
, ret
);
5505 unlock_user(p
, arg1
, 0);
5508 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5509 case TARGET_NR_readlinkat
:
5512 p
= lock_user_string(arg2
);
5513 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5515 ret
= -TARGET_EFAULT
;
5517 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5518 unlock_user(p2
, arg3
, ret
);
5519 unlock_user(p
, arg2
, 0);
5523 #ifdef TARGET_NR_uselib
5524 case TARGET_NR_uselib
:
5527 #ifdef TARGET_NR_swapon
5528 case TARGET_NR_swapon
:
5529 if (!(p
= lock_user_string(arg1
)))
5531 ret
= get_errno(swapon(p
, arg2
));
5532 unlock_user(p
, arg1
, 0);
5535 case TARGET_NR_reboot
:
5537 #ifdef TARGET_NR_readdir
5538 case TARGET_NR_readdir
:
5541 #ifdef TARGET_NR_mmap
5542 case TARGET_NR_mmap
:
5543 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5546 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5547 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5555 unlock_user(v
, arg1
, 0);
5556 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5557 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5561 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5562 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5568 #ifdef TARGET_NR_mmap2
5569 case TARGET_NR_mmap2
:
5571 #define MMAP_SHIFT 12
5573 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5574 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5576 arg6
<< MMAP_SHIFT
));
5579 case TARGET_NR_munmap
:
5580 ret
= get_errno(target_munmap(arg1
, arg2
));
5582 case TARGET_NR_mprotect
:
5584 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
5585 /* Special hack to detect libc making the stack executable. */
5586 if ((arg3
& PROT_GROWSDOWN
)
5587 && arg1
>= ts
->info
->stack_limit
5588 && arg1
<= ts
->info
->start_stack
) {
5589 arg3
&= ~PROT_GROWSDOWN
;
5590 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
5591 arg1
= ts
->info
->stack_limit
;
5594 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5596 #ifdef TARGET_NR_mremap
5597 case TARGET_NR_mremap
:
5598 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5601 /* ??? msync/mlock/munlock are broken for softmmu. */
5602 #ifdef TARGET_NR_msync
5603 case TARGET_NR_msync
:
5604 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5607 #ifdef TARGET_NR_mlock
5608 case TARGET_NR_mlock
:
5609 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5612 #ifdef TARGET_NR_munlock
5613 case TARGET_NR_munlock
:
5614 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5617 #ifdef TARGET_NR_mlockall
5618 case TARGET_NR_mlockall
:
5619 ret
= get_errno(mlockall(arg1
));
5622 #ifdef TARGET_NR_munlockall
5623 case TARGET_NR_munlockall
:
5624 ret
= get_errno(munlockall());
5627 case TARGET_NR_truncate
:
5628 if (!(p
= lock_user_string(arg1
)))
5630 ret
= get_errno(truncate(p
, arg2
));
5631 unlock_user(p
, arg1
, 0);
5633 case TARGET_NR_ftruncate
:
5634 ret
= get_errno(ftruncate(arg1
, arg2
));
5636 case TARGET_NR_fchmod
:
5637 ret
= get_errno(fchmod(arg1
, arg2
));
5639 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5640 case TARGET_NR_fchmodat
:
5641 if (!(p
= lock_user_string(arg2
)))
5643 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5644 unlock_user(p
, arg2
, 0);
5647 case TARGET_NR_getpriority
:
5648 /* libc does special remapping of the return value of
5649 * sys_getpriority() so it's just easiest to call
5650 * sys_getpriority() directly rather than through libc. */
5651 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5653 case TARGET_NR_setpriority
:
5654 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5656 #ifdef TARGET_NR_profil
5657 case TARGET_NR_profil
:
5660 case TARGET_NR_statfs
:
5661 if (!(p
= lock_user_string(arg1
)))
5663 ret
= get_errno(statfs(path(p
), &stfs
));
5664 unlock_user(p
, arg1
, 0);
5666 if (!is_error(ret
)) {
5667 struct target_statfs
*target_stfs
;
5669 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5671 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5672 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5673 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5674 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5675 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5676 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5677 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5678 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5679 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5680 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5681 unlock_user_struct(target_stfs
, arg2
, 1);
5684 case TARGET_NR_fstatfs
:
5685 ret
= get_errno(fstatfs(arg1
, &stfs
));
5686 goto convert_statfs
;
5687 #ifdef TARGET_NR_statfs64
5688 case TARGET_NR_statfs64
:
5689 if (!(p
= lock_user_string(arg1
)))
5691 ret
= get_errno(statfs(path(p
), &stfs
));
5692 unlock_user(p
, arg1
, 0);
5694 if (!is_error(ret
)) {
5695 struct target_statfs64
*target_stfs
;
5697 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5699 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5700 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5701 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5702 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5703 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5704 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5705 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5706 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5707 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5708 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5709 unlock_user_struct(target_stfs
, arg3
, 1);
5712 case TARGET_NR_fstatfs64
:
5713 ret
= get_errno(fstatfs(arg1
, &stfs
));
5714 goto convert_statfs64
;
5716 #ifdef TARGET_NR_ioperm
5717 case TARGET_NR_ioperm
:
5720 #ifdef TARGET_NR_socketcall
5721 case TARGET_NR_socketcall
:
5722 ret
= do_socketcall(arg1
, arg2
);
5725 #ifdef TARGET_NR_accept
5726 case TARGET_NR_accept
:
5727 ret
= do_accept(arg1
, arg2
, arg3
);
5730 #ifdef TARGET_NR_bind
5731 case TARGET_NR_bind
:
5732 ret
= do_bind(arg1
, arg2
, arg3
);
5735 #ifdef TARGET_NR_connect
5736 case TARGET_NR_connect
:
5737 ret
= do_connect(arg1
, arg2
, arg3
);
5740 #ifdef TARGET_NR_getpeername
5741 case TARGET_NR_getpeername
:
5742 ret
= do_getpeername(arg1
, arg2
, arg3
);
5745 #ifdef TARGET_NR_getsockname
5746 case TARGET_NR_getsockname
:
5747 ret
= do_getsockname(arg1
, arg2
, arg3
);
5750 #ifdef TARGET_NR_getsockopt
5751 case TARGET_NR_getsockopt
:
5752 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5755 #ifdef TARGET_NR_listen
5756 case TARGET_NR_listen
:
5757 ret
= get_errno(listen(arg1
, arg2
));
5760 #ifdef TARGET_NR_recv
5761 case TARGET_NR_recv
:
5762 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5765 #ifdef TARGET_NR_recvfrom
5766 case TARGET_NR_recvfrom
:
5767 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5770 #ifdef TARGET_NR_recvmsg
5771 case TARGET_NR_recvmsg
:
5772 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5775 #ifdef TARGET_NR_send
5776 case TARGET_NR_send
:
5777 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5780 #ifdef TARGET_NR_sendmsg
5781 case TARGET_NR_sendmsg
:
5782 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5785 #ifdef TARGET_NR_sendto
5786 case TARGET_NR_sendto
:
5787 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5790 #ifdef TARGET_NR_shutdown
5791 case TARGET_NR_shutdown
:
5792 ret
= get_errno(shutdown(arg1
, arg2
));
5795 #ifdef TARGET_NR_socket
5796 case TARGET_NR_socket
:
5797 ret
= do_socket(arg1
, arg2
, arg3
);
5800 #ifdef TARGET_NR_socketpair
5801 case TARGET_NR_socketpair
:
5802 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5805 #ifdef TARGET_NR_setsockopt
5806 case TARGET_NR_setsockopt
:
5807 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5811 case TARGET_NR_syslog
:
5812 if (!(p
= lock_user_string(arg2
)))
5814 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5815 unlock_user(p
, arg2
, 0);
5818 case TARGET_NR_setitimer
:
5820 struct itimerval value
, ovalue
, *pvalue
;
5824 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5825 || copy_from_user_timeval(&pvalue
->it_value
,
5826 arg2
+ sizeof(struct target_timeval
)))
5831 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5832 if (!is_error(ret
) && arg3
) {
5833 if (copy_to_user_timeval(arg3
,
5834 &ovalue
.it_interval
)
5835 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5841 case TARGET_NR_getitimer
:
5843 struct itimerval value
;
5845 ret
= get_errno(getitimer(arg1
, &value
));
5846 if (!is_error(ret
) && arg2
) {
5847 if (copy_to_user_timeval(arg2
,
5849 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5855 case TARGET_NR_stat
:
5856 if (!(p
= lock_user_string(arg1
)))
5858 ret
= get_errno(stat(path(p
), &st
));
5859 unlock_user(p
, arg1
, 0);
5861 case TARGET_NR_lstat
:
5862 if (!(p
= lock_user_string(arg1
)))
5864 ret
= get_errno(lstat(path(p
), &st
));
5865 unlock_user(p
, arg1
, 0);
5867 case TARGET_NR_fstat
:
5869 ret
= get_errno(fstat(arg1
, &st
));
5871 if (!is_error(ret
)) {
5872 struct target_stat
*target_st
;
5874 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5876 memset(target_st
, 0, sizeof(*target_st
));
5877 __put_user(st
.st_dev
, &target_st
->st_dev
);
5878 __put_user(st
.st_ino
, &target_st
->st_ino
);
5879 __put_user(st
.st_mode
, &target_st
->st_mode
);
5880 __put_user(st
.st_uid
, &target_st
->st_uid
);
5881 __put_user(st
.st_gid
, &target_st
->st_gid
);
5882 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5883 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5884 __put_user(st
.st_size
, &target_st
->st_size
);
5885 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5886 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5887 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5888 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5889 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5890 unlock_user_struct(target_st
, arg2
, 1);
5894 #ifdef TARGET_NR_olduname
5895 case TARGET_NR_olduname
:
5898 #ifdef TARGET_NR_iopl
5899 case TARGET_NR_iopl
:
5902 case TARGET_NR_vhangup
:
5903 ret
= get_errno(vhangup());
5905 #ifdef TARGET_NR_idle
5906 case TARGET_NR_idle
:
5909 #ifdef TARGET_NR_syscall
5910 case TARGET_NR_syscall
:
5911 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5914 case TARGET_NR_wait4
:
5917 abi_long status_ptr
= arg2
;
5918 struct rusage rusage
, *rusage_ptr
;
5919 abi_ulong target_rusage
= arg4
;
5921 rusage_ptr
= &rusage
;
5924 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5925 if (!is_error(ret
)) {
5927 status
= host_to_target_waitstatus(status
);
5928 if (put_user_s32(status
, status_ptr
))
5932 host_to_target_rusage(target_rusage
, &rusage
);
5936 #ifdef TARGET_NR_swapoff
5937 case TARGET_NR_swapoff
:
5938 if (!(p
= lock_user_string(arg1
)))
5940 ret
= get_errno(swapoff(p
));
5941 unlock_user(p
, arg1
, 0);
5944 case TARGET_NR_sysinfo
:
5946 struct target_sysinfo
*target_value
;
5947 struct sysinfo value
;
5948 ret
= get_errno(sysinfo(&value
));
5949 if (!is_error(ret
) && arg1
)
5951 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5953 __put_user(value
.uptime
, &target_value
->uptime
);
5954 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5955 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5956 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5957 __put_user(value
.totalram
, &target_value
->totalram
);
5958 __put_user(value
.freeram
, &target_value
->freeram
);
5959 __put_user(value
.sharedram
, &target_value
->sharedram
);
5960 __put_user(value
.bufferram
, &target_value
->bufferram
);
5961 __put_user(value
.totalswap
, &target_value
->totalswap
);
5962 __put_user(value
.freeswap
, &target_value
->freeswap
);
5963 __put_user(value
.procs
, &target_value
->procs
);
5964 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5965 __put_user(value
.freehigh
, &target_value
->freehigh
);
5966 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5967 unlock_user_struct(target_value
, arg1
, 1);
5971 #ifdef TARGET_NR_ipc
5973 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5976 #ifdef TARGET_NR_semget
5977 case TARGET_NR_semget
:
5978 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5981 #ifdef TARGET_NR_semop
5982 case TARGET_NR_semop
:
5983 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5986 #ifdef TARGET_NR_semctl
5987 case TARGET_NR_semctl
:
5988 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5991 #ifdef TARGET_NR_msgctl
5992 case TARGET_NR_msgctl
:
5993 ret
= do_msgctl(arg1
, arg2
, arg3
);
5996 #ifdef TARGET_NR_msgget
5997 case TARGET_NR_msgget
:
5998 ret
= get_errno(msgget(arg1
, arg2
));
6001 #ifdef TARGET_NR_msgrcv
6002 case TARGET_NR_msgrcv
:
6003 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6006 #ifdef TARGET_NR_msgsnd
6007 case TARGET_NR_msgsnd
:
6008 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6011 #ifdef TARGET_NR_shmget
6012 case TARGET_NR_shmget
:
6013 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6016 #ifdef TARGET_NR_shmctl
6017 case TARGET_NR_shmctl
:
6018 ret
= do_shmctl(arg1
, arg2
, arg3
);
6021 #ifdef TARGET_NR_shmat
6022 case TARGET_NR_shmat
:
6023 ret
= do_shmat(arg1
, arg2
, arg3
);
6026 #ifdef TARGET_NR_shmdt
6027 case TARGET_NR_shmdt
:
6028 ret
= do_shmdt(arg1
);
6031 case TARGET_NR_fsync
:
6032 ret
= get_errno(fsync(arg1
));
6034 case TARGET_NR_clone
:
6035 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6036 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6037 #elif defined(TARGET_CRIS)
6038 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6040 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6043 #ifdef __NR_exit_group
6044 /* new thread calls */
6045 case TARGET_NR_exit_group
:
6049 gdb_exit(cpu_env
, arg1
);
6050 ret
= get_errno(exit_group(arg1
));
6053 case TARGET_NR_setdomainname
:
6054 if (!(p
= lock_user_string(arg1
)))
6056 ret
= get_errno(setdomainname(p
, arg2
));
6057 unlock_user(p
, arg1
, 0);
6059 case TARGET_NR_uname
:
6060 /* no need to transcode because we use the linux syscall */
6062 struct new_utsname
* buf
;
6064 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6066 ret
= get_errno(sys_uname(buf
));
6067 if (!is_error(ret
)) {
6068 /* Overrite the native machine name with whatever is being
6070 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6071 /* Allow the user to override the reported release. */
6072 if (qemu_uname_release
&& *qemu_uname_release
)
6073 strcpy (buf
->release
, qemu_uname_release
);
6075 unlock_user_struct(buf
, arg1
, 1);
6079 case TARGET_NR_modify_ldt
:
6080 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6082 #if !defined(TARGET_X86_64)
6083 case TARGET_NR_vm86old
:
6085 case TARGET_NR_vm86
:
6086 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6090 case TARGET_NR_adjtimex
:
6092 #ifdef TARGET_NR_create_module
6093 case TARGET_NR_create_module
:
6095 case TARGET_NR_init_module
:
6096 case TARGET_NR_delete_module
:
6097 #ifdef TARGET_NR_get_kernel_syms
6098 case TARGET_NR_get_kernel_syms
:
6101 case TARGET_NR_quotactl
:
6103 case TARGET_NR_getpgid
:
6104 ret
= get_errno(getpgid(arg1
));
6106 case TARGET_NR_fchdir
:
6107 ret
= get_errno(fchdir(arg1
));
6109 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6110 case TARGET_NR_bdflush
:
6113 #ifdef TARGET_NR_sysfs
6114 case TARGET_NR_sysfs
:
6117 case TARGET_NR_personality
:
6118 ret
= get_errno(personality(arg1
));
6120 #ifdef TARGET_NR_afs_syscall
6121 case TARGET_NR_afs_syscall
:
6124 #ifdef TARGET_NR__llseek /* Not on alpha */
6125 case TARGET_NR__llseek
:
6127 #if !defined(__NR_llseek)
6128 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
6129 if (put_user_s64(ret
, arg4
))
6133 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6134 if (put_user_s64(res
, arg4
))
6140 case TARGET_NR_getdents
:
6141 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6143 struct target_dirent
*target_dirp
;
6144 struct linux_dirent
*dirp
;
6145 abi_long count
= arg3
;
6147 dirp
= malloc(count
);
6149 ret
= -TARGET_ENOMEM
;
6153 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6154 if (!is_error(ret
)) {
6155 struct linux_dirent
*de
;
6156 struct target_dirent
*tde
;
6158 int reclen
, treclen
;
6159 int count1
, tnamelen
;
6163 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6167 reclen
= de
->d_reclen
;
6168 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6169 tde
->d_reclen
= tswap16(treclen
);
6170 tde
->d_ino
= tswapl(de
->d_ino
);
6171 tde
->d_off
= tswapl(de
->d_off
);
6172 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6175 /* XXX: may not be correct */
6176 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6177 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6179 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6183 unlock_user(target_dirp
, arg2
, ret
);
6189 struct linux_dirent
*dirp
;
6190 abi_long count
= arg3
;
6192 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6194 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6195 if (!is_error(ret
)) {
6196 struct linux_dirent
*de
;
6201 reclen
= de
->d_reclen
;
6204 de
->d_reclen
= tswap16(reclen
);
6205 tswapls(&de
->d_ino
);
6206 tswapls(&de
->d_off
);
6207 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6211 unlock_user(dirp
, arg2
, ret
);
6215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6216 case TARGET_NR_getdents64
:
6218 struct linux_dirent64
*dirp
;
6219 abi_long count
= arg3
;
6220 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6222 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6223 if (!is_error(ret
)) {
6224 struct linux_dirent64
*de
;
6229 reclen
= de
->d_reclen
;
6232 de
->d_reclen
= tswap16(reclen
);
6233 tswap64s((uint64_t *)&de
->d_ino
);
6234 tswap64s((uint64_t *)&de
->d_off
);
6235 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6239 unlock_user(dirp
, arg2
, ret
);
6242 #endif /* TARGET_NR_getdents64 */
6243 #ifdef TARGET_NR__newselect
6244 case TARGET_NR__newselect
:
6245 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6248 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6249 # ifdef TARGET_NR_poll
6250 case TARGET_NR_poll
:
6252 # ifdef TARGET_NR_ppoll
6253 case TARGET_NR_ppoll
:
6256 struct target_pollfd
*target_pfd
;
6257 unsigned int nfds
= arg2
;
6262 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6266 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6267 for(i
= 0; i
< nfds
; i
++) {
6268 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6269 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6272 # ifdef TARGET_NR_ppoll
6273 if (num
== TARGET_NR_ppoll
) {
6274 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
6275 target_sigset_t
*target_set
;
6276 sigset_t _set
, *set
= &_set
;
6279 if (target_to_host_timespec(timeout_ts
, arg3
)) {
6280 unlock_user(target_pfd
, arg1
, 0);
6288 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
6290 unlock_user(target_pfd
, arg1
, 0);
6293 target_to_host_sigset(set
, target_set
);
6298 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
6300 if (!is_error(ret
) && arg3
) {
6301 host_to_target_timespec(arg3
, timeout_ts
);
6304 unlock_user(target_set
, arg4
, 0);
6308 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6310 if (!is_error(ret
)) {
6311 for(i
= 0; i
< nfds
; i
++) {
6312 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6314 ret
+= nfds
* (sizeof(struct target_pollfd
)
6315 - sizeof(struct pollfd
));
6317 unlock_user(target_pfd
, arg1
, ret
);
6321 case TARGET_NR_flock
:
6322 /* NOTE: the flock constant seems to be the same for every
6324 ret
= get_errno(flock(arg1
, arg2
));
6326 case TARGET_NR_readv
:
6331 vec
= alloca(count
* sizeof(struct iovec
));
6332 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6334 ret
= get_errno(readv(arg1
, vec
, count
));
6335 unlock_iovec(vec
, arg2
, count
, 1);
6338 case TARGET_NR_writev
:
6343 vec
= alloca(count
* sizeof(struct iovec
));
6344 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6346 ret
= get_errno(writev(arg1
, vec
, count
));
6347 unlock_iovec(vec
, arg2
, count
, 0);
6350 case TARGET_NR_getsid
:
6351 ret
= get_errno(getsid(arg1
));
6353 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6354 case TARGET_NR_fdatasync
:
6355 ret
= get_errno(fdatasync(arg1
));
6358 case TARGET_NR__sysctl
:
6359 /* We don't implement this, but ENOTDIR is always a safe
6361 ret
= -TARGET_ENOTDIR
;
6363 case TARGET_NR_sched_getaffinity
:
6365 unsigned int mask_size
;
6366 unsigned long *mask
;
6369 * sched_getaffinity needs multiples of ulong, so need to take
6370 * care of mismatches between target ulong and host ulong sizes.
6372 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6373 ret
= -TARGET_EINVAL
;
6376 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6378 mask
= alloca(mask_size
);
6379 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
6381 if (!is_error(ret
)) {
6383 /* Zero out any extra space kernel didn't fill */
6384 unsigned long zero
= arg2
- ret
;
6387 if (copy_to_user(arg3
+ zero
, p
, zero
)) {
6392 if (copy_to_user(arg3
, mask
, arg2
)) {
6399 case TARGET_NR_sched_setaffinity
:
6401 unsigned int mask_size
;
6402 unsigned long *mask
;
6405 * sched_setaffinity needs multiples of ulong, so need to take
6406 * care of mismatches between target ulong and host ulong sizes.
6408 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6409 ret
= -TARGET_EINVAL
;
6412 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6414 mask
= alloca(mask_size
);
6415 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
6418 memcpy(mask
, p
, arg2
);
6419 unlock_user_struct(p
, arg2
, 0);
6421 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
6424 case TARGET_NR_sched_setparam
:
6426 struct sched_param
*target_schp
;
6427 struct sched_param schp
;
6429 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6431 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6432 unlock_user_struct(target_schp
, arg2
, 0);
6433 ret
= get_errno(sched_setparam(arg1
, &schp
));
6436 case TARGET_NR_sched_getparam
:
6438 struct sched_param
*target_schp
;
6439 struct sched_param schp
;
6440 ret
= get_errno(sched_getparam(arg1
, &schp
));
6441 if (!is_error(ret
)) {
6442 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6444 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6445 unlock_user_struct(target_schp
, arg2
, 1);
6449 case TARGET_NR_sched_setscheduler
:
6451 struct sched_param
*target_schp
;
6452 struct sched_param schp
;
6453 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6455 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6456 unlock_user_struct(target_schp
, arg3
, 0);
6457 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6460 case TARGET_NR_sched_getscheduler
:
6461 ret
= get_errno(sched_getscheduler(arg1
));
6463 case TARGET_NR_sched_yield
:
6464 ret
= get_errno(sched_yield());
6466 case TARGET_NR_sched_get_priority_max
:
6467 ret
= get_errno(sched_get_priority_max(arg1
));
6469 case TARGET_NR_sched_get_priority_min
:
6470 ret
= get_errno(sched_get_priority_min(arg1
));
6472 case TARGET_NR_sched_rr_get_interval
:
6475 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6476 if (!is_error(ret
)) {
6477 host_to_target_timespec(arg2
, &ts
);
6481 case TARGET_NR_nanosleep
:
6483 struct timespec req
, rem
;
6484 target_to_host_timespec(&req
, arg1
);
6485 ret
= get_errno(nanosleep(&req
, &rem
));
6486 if (is_error(ret
) && arg2
) {
6487 host_to_target_timespec(arg2
, &rem
);
6491 #ifdef TARGET_NR_query_module
6492 case TARGET_NR_query_module
:
6495 #ifdef TARGET_NR_nfsservctl
6496 case TARGET_NR_nfsservctl
:
6499 case TARGET_NR_prctl
:
6502 case PR_GET_PDEATHSIG
:
6505 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6506 if (!is_error(ret
) && arg2
6507 && put_user_ual(deathsig
, arg2
))
6512 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6516 #ifdef TARGET_NR_arch_prctl
6517 case TARGET_NR_arch_prctl
:
6518 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6519 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6525 #ifdef TARGET_NR_pread
6526 case TARGET_NR_pread
:
6528 if (((CPUARMState
*)cpu_env
)->eabi
)
6531 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6533 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6534 unlock_user(p
, arg2
, ret
);
6536 case TARGET_NR_pwrite
:
6538 if (((CPUARMState
*)cpu_env
)->eabi
)
6541 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6543 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6544 unlock_user(p
, arg2
, 0);
6547 #ifdef TARGET_NR_pread64
6548 case TARGET_NR_pread64
:
6549 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6551 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6552 unlock_user(p
, arg2
, ret
);
6554 case TARGET_NR_pwrite64
:
6555 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6557 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6558 unlock_user(p
, arg2
, 0);
6561 case TARGET_NR_getcwd
:
6562 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6564 ret
= get_errno(sys_getcwd1(p
, arg2
));
6565 unlock_user(p
, arg1
, ret
);
6567 case TARGET_NR_capget
:
6569 case TARGET_NR_capset
:
6571 case TARGET_NR_sigaltstack
:
6572 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6573 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6574 defined(TARGET_M68K)
6575 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6580 case TARGET_NR_sendfile
:
6582 #ifdef TARGET_NR_getpmsg
6583 case TARGET_NR_getpmsg
:
6586 #ifdef TARGET_NR_putpmsg
6587 case TARGET_NR_putpmsg
:
6590 #ifdef TARGET_NR_vfork
6591 case TARGET_NR_vfork
:
6592 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6596 #ifdef TARGET_NR_ugetrlimit
6597 case TARGET_NR_ugetrlimit
:
6600 ret
= get_errno(getrlimit(arg1
, &rlim
));
6601 if (!is_error(ret
)) {
6602 struct target_rlimit
*target_rlim
;
6603 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6605 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6606 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6607 unlock_user_struct(target_rlim
, arg2
, 1);
6612 #ifdef TARGET_NR_truncate64
6613 case TARGET_NR_truncate64
:
6614 if (!(p
= lock_user_string(arg1
)))
6616 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6617 unlock_user(p
, arg1
, 0);
6620 #ifdef TARGET_NR_ftruncate64
6621 case TARGET_NR_ftruncate64
:
6622 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6625 #ifdef TARGET_NR_stat64
6626 case TARGET_NR_stat64
:
6627 if (!(p
= lock_user_string(arg1
)))
6629 ret
= get_errno(stat(path(p
), &st
));
6630 unlock_user(p
, arg1
, 0);
6632 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6635 #ifdef TARGET_NR_lstat64
6636 case TARGET_NR_lstat64
:
6637 if (!(p
= lock_user_string(arg1
)))
6639 ret
= get_errno(lstat(path(p
), &st
));
6640 unlock_user(p
, arg1
, 0);
6642 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6645 #ifdef TARGET_NR_fstat64
6646 case TARGET_NR_fstat64
:
6647 ret
= get_errno(fstat(arg1
, &st
));
6649 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6652 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6653 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6654 #ifdef TARGET_NR_fstatat64
6655 case TARGET_NR_fstatat64
:
6657 #ifdef TARGET_NR_newfstatat
6658 case TARGET_NR_newfstatat
:
6660 if (!(p
= lock_user_string(arg2
)))
6662 #ifdef __NR_fstatat64
6663 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6665 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6668 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6672 case TARGET_NR_lchown
:
6673 if (!(p
= lock_user_string(arg1
)))
6675 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6676 unlock_user(p
, arg1
, 0);
6678 case TARGET_NR_getuid
:
6679 ret
= get_errno(high2lowuid(getuid()));
6681 case TARGET_NR_getgid
:
6682 ret
= get_errno(high2lowgid(getgid()));
6684 case TARGET_NR_geteuid
:
6685 ret
= get_errno(high2lowuid(geteuid()));
6687 case TARGET_NR_getegid
:
6688 ret
= get_errno(high2lowgid(getegid()));
6690 case TARGET_NR_setreuid
:
6691 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6693 case TARGET_NR_setregid
:
6694 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6696 case TARGET_NR_getgroups
:
6698 int gidsetsize
= arg1
;
6699 uint16_t *target_grouplist
;
6703 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6704 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6705 if (gidsetsize
== 0)
6707 if (!is_error(ret
)) {
6708 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6709 if (!target_grouplist
)
6711 for(i
= 0;i
< ret
; i
++)
6712 target_grouplist
[i
] = tswap16(grouplist
[i
]);
6713 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6717 case TARGET_NR_setgroups
:
6719 int gidsetsize
= arg1
;
6720 uint16_t *target_grouplist
;
6724 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6725 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6726 if (!target_grouplist
) {
6727 ret
= -TARGET_EFAULT
;
6730 for(i
= 0;i
< gidsetsize
; i
++)
6731 grouplist
[i
] = tswap16(target_grouplist
[i
]);
6732 unlock_user(target_grouplist
, arg2
, 0);
6733 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6736 case TARGET_NR_fchown
:
6737 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6739 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6740 case TARGET_NR_fchownat
:
6741 if (!(p
= lock_user_string(arg2
)))
6743 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6744 unlock_user(p
, arg2
, 0);
6747 #ifdef TARGET_NR_setresuid
6748 case TARGET_NR_setresuid
:
6749 ret
= get_errno(setresuid(low2highuid(arg1
),
6751 low2highuid(arg3
)));
6754 #ifdef TARGET_NR_getresuid
6755 case TARGET_NR_getresuid
:
6757 uid_t ruid
, euid
, suid
;
6758 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6759 if (!is_error(ret
)) {
6760 if (put_user_u16(high2lowuid(ruid
), arg1
)
6761 || put_user_u16(high2lowuid(euid
), arg2
)
6762 || put_user_u16(high2lowuid(suid
), arg3
))
6768 #ifdef TARGET_NR_getresgid
6769 case TARGET_NR_setresgid
:
6770 ret
= get_errno(setresgid(low2highgid(arg1
),
6772 low2highgid(arg3
)));
6775 #ifdef TARGET_NR_getresgid
6776 case TARGET_NR_getresgid
:
6778 gid_t rgid
, egid
, sgid
;
6779 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6780 if (!is_error(ret
)) {
6781 if (put_user_u16(high2lowgid(rgid
), arg1
)
6782 || put_user_u16(high2lowgid(egid
), arg2
)
6783 || put_user_u16(high2lowgid(sgid
), arg3
))
6789 case TARGET_NR_chown
:
6790 if (!(p
= lock_user_string(arg1
)))
6792 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6793 unlock_user(p
, arg1
, 0);
6795 case TARGET_NR_setuid
:
6796 ret
= get_errno(setuid(low2highuid(arg1
)));
6798 case TARGET_NR_setgid
:
6799 ret
= get_errno(setgid(low2highgid(arg1
)));
6801 case TARGET_NR_setfsuid
:
6802 ret
= get_errno(setfsuid(arg1
));
6804 case TARGET_NR_setfsgid
:
6805 ret
= get_errno(setfsgid(arg1
));
6807 #endif /* USE_UID16 */
6809 #ifdef TARGET_NR_lchown32
6810 case TARGET_NR_lchown32
:
6811 if (!(p
= lock_user_string(arg1
)))
6813 ret
= get_errno(lchown(p
, arg2
, arg3
));
6814 unlock_user(p
, arg1
, 0);
6817 #ifdef TARGET_NR_getuid32
6818 case TARGET_NR_getuid32
:
6819 ret
= get_errno(getuid());
6823 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6824 /* Alpha specific */
6825 case TARGET_NR_getxuid
:
6829 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6831 ret
= get_errno(getuid());
6834 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6835 /* Alpha specific */
6836 case TARGET_NR_getxgid
:
6840 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6842 ret
= get_errno(getgid());
6845 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6846 /* Alpha specific */
6847 case TARGET_NR_osf_getsysinfo
:
6848 ret
= -TARGET_EOPNOTSUPP
;
6850 case TARGET_GSI_IEEE_FP_CONTROL
:
6852 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6854 /* Copied from linux ieee_fpcr_to_swcr. */
6855 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
6856 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
6857 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
6858 | SWCR_TRAP_ENABLE_DZE
6859 | SWCR_TRAP_ENABLE_OVF
);
6860 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
6861 | SWCR_TRAP_ENABLE_INE
);
6862 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
6863 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
6865 if (put_user_u64 (swcr
, arg2
))
6871 /* case GSI_IEEE_STATE_AT_SIGNAL:
6872 -- Not implemented in linux kernel.
6874 -- Retrieves current unaligned access state; not much used.
6876 -- Retrieves implver information; surely not used.
6878 -- Grabs a copy of the HWRPB; surely not used.
6883 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
6884 /* Alpha specific */
6885 case TARGET_NR_osf_setsysinfo
:
6886 ret
= -TARGET_EOPNOTSUPP
;
6888 case TARGET_SSI_IEEE_FP_CONTROL
:
6889 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
6891 uint64_t swcr
, fpcr
, orig_fpcr
;
6893 if (get_user_u64 (swcr
, arg2
))
6895 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6896 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
6898 /* Copied from linux ieee_swcr_to_fpcr. */
6899 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
6900 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
6901 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
6902 | SWCR_TRAP_ENABLE_DZE
6903 | SWCR_TRAP_ENABLE_OVF
)) << 48;
6904 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
6905 | SWCR_TRAP_ENABLE_INE
)) << 57;
6906 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
6907 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
6909 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
6912 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
6913 /* Old exceptions are not signaled. */
6914 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
6916 /* If any exceptions set by this call, and are unmasked,
6923 /* case SSI_NVPAIRS:
6924 -- Used with SSIN_UACPROC to enable unaligned accesses.
6925 case SSI_IEEE_STATE_AT_SIGNAL:
6926 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
6927 -- Not implemented in linux kernel
6932 #ifdef TARGET_NR_osf_sigprocmask
6933 /* Alpha specific. */
6934 case TARGET_NR_osf_sigprocmask
:
6938 sigset_t set
, oldset
;
6941 case TARGET_SIG_BLOCK
:
6944 case TARGET_SIG_UNBLOCK
:
6947 case TARGET_SIG_SETMASK
:
6951 ret
= -TARGET_EINVAL
;
6955 target_to_host_old_sigset(&set
, &mask
);
6956 sigprocmask(arg1
, &set
, &oldset
);
6957 host_to_target_old_sigset(&mask
, &oldset
);
6963 #ifdef TARGET_NR_getgid32
6964 case TARGET_NR_getgid32
:
6965 ret
= get_errno(getgid());
6968 #ifdef TARGET_NR_geteuid32
6969 case TARGET_NR_geteuid32
:
6970 ret
= get_errno(geteuid());
6973 #ifdef TARGET_NR_getegid32
6974 case TARGET_NR_getegid32
:
6975 ret
= get_errno(getegid());
6978 #ifdef TARGET_NR_setreuid32
6979 case TARGET_NR_setreuid32
:
6980 ret
= get_errno(setreuid(arg1
, arg2
));
6983 #ifdef TARGET_NR_setregid32
6984 case TARGET_NR_setregid32
:
6985 ret
= get_errno(setregid(arg1
, arg2
));
6988 #ifdef TARGET_NR_getgroups32
6989 case TARGET_NR_getgroups32
:
6991 int gidsetsize
= arg1
;
6992 uint32_t *target_grouplist
;
6996 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6997 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6998 if (gidsetsize
== 0)
7000 if (!is_error(ret
)) {
7001 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7002 if (!target_grouplist
) {
7003 ret
= -TARGET_EFAULT
;
7006 for(i
= 0;i
< ret
; i
++)
7007 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7008 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7013 #ifdef TARGET_NR_setgroups32
7014 case TARGET_NR_setgroups32
:
7016 int gidsetsize
= arg1
;
7017 uint32_t *target_grouplist
;
7021 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7022 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7023 if (!target_grouplist
) {
7024 ret
= -TARGET_EFAULT
;
7027 for(i
= 0;i
< gidsetsize
; i
++)
7028 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7029 unlock_user(target_grouplist
, arg2
, 0);
7030 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7034 #ifdef TARGET_NR_fchown32
7035 case TARGET_NR_fchown32
:
7036 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7039 #ifdef TARGET_NR_setresuid32
7040 case TARGET_NR_setresuid32
:
7041 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7044 #ifdef TARGET_NR_getresuid32
7045 case TARGET_NR_getresuid32
:
7047 uid_t ruid
, euid
, suid
;
7048 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7049 if (!is_error(ret
)) {
7050 if (put_user_u32(ruid
, arg1
)
7051 || put_user_u32(euid
, arg2
)
7052 || put_user_u32(suid
, arg3
))
7058 #ifdef TARGET_NR_setresgid32
7059 case TARGET_NR_setresgid32
:
7060 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7063 #ifdef TARGET_NR_getresgid32
7064 case TARGET_NR_getresgid32
:
7066 gid_t rgid
, egid
, sgid
;
7067 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7068 if (!is_error(ret
)) {
7069 if (put_user_u32(rgid
, arg1
)
7070 || put_user_u32(egid
, arg2
)
7071 || put_user_u32(sgid
, arg3
))
7077 #ifdef TARGET_NR_chown32
7078 case TARGET_NR_chown32
:
7079 if (!(p
= lock_user_string(arg1
)))
7081 ret
= get_errno(chown(p
, arg2
, arg3
));
7082 unlock_user(p
, arg1
, 0);
7085 #ifdef TARGET_NR_setuid32
7086 case TARGET_NR_setuid32
:
7087 ret
= get_errno(setuid(arg1
));
7090 #ifdef TARGET_NR_setgid32
7091 case TARGET_NR_setgid32
:
7092 ret
= get_errno(setgid(arg1
));
7095 #ifdef TARGET_NR_setfsuid32
7096 case TARGET_NR_setfsuid32
:
7097 ret
= get_errno(setfsuid(arg1
));
7100 #ifdef TARGET_NR_setfsgid32
7101 case TARGET_NR_setfsgid32
:
7102 ret
= get_errno(setfsgid(arg1
));
7106 case TARGET_NR_pivot_root
:
7108 #ifdef TARGET_NR_mincore
7109 case TARGET_NR_mincore
:
7112 ret
= -TARGET_EFAULT
;
7113 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7115 if (!(p
= lock_user_string(arg3
)))
7117 ret
= get_errno(mincore(a
, arg2
, p
));
7118 unlock_user(p
, arg3
, ret
);
7120 unlock_user(a
, arg1
, 0);
7124 #ifdef TARGET_NR_arm_fadvise64_64
7125 case TARGET_NR_arm_fadvise64_64
:
7128 * arm_fadvise64_64 looks like fadvise64_64 but
7129 * with different argument order
7137 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7138 #ifdef TARGET_NR_fadvise64_64
7139 case TARGET_NR_fadvise64_64
:
7141 #ifdef TARGET_NR_fadvise64
7142 case TARGET_NR_fadvise64
:
7146 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7147 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7148 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7149 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7153 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7156 #ifdef TARGET_NR_madvise
7157 case TARGET_NR_madvise
:
7158 /* A straight passthrough may not be safe because qemu sometimes
7159 turns private flie-backed mappings into anonymous mappings.
7160 This will break MADV_DONTNEED.
7161 This is a hint, so ignoring and returning success is ok. */
7165 #if TARGET_ABI_BITS == 32
7166 case TARGET_NR_fcntl64
:
7170 struct target_flock64
*target_fl
;
7172 struct target_eabi_flock64
*target_efl
;
7175 cmd
= target_to_host_fcntl_cmd(arg2
);
7176 if (cmd
== -TARGET_EINVAL
)
7180 case TARGET_F_GETLK64
:
7182 if (((CPUARMState
*)cpu_env
)->eabi
) {
7183 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7185 fl
.l_type
= tswap16(target_efl
->l_type
);
7186 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7187 fl
.l_start
= tswap64(target_efl
->l_start
);
7188 fl
.l_len
= tswap64(target_efl
->l_len
);
7189 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7190 unlock_user_struct(target_efl
, arg3
, 0);
7194 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7196 fl
.l_type
= tswap16(target_fl
->l_type
);
7197 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7198 fl
.l_start
= tswap64(target_fl
->l_start
);
7199 fl
.l_len
= tswap64(target_fl
->l_len
);
7200 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7201 unlock_user_struct(target_fl
, arg3
, 0);
7203 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7206 if (((CPUARMState
*)cpu_env
)->eabi
) {
7207 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7209 target_efl
->l_type
= tswap16(fl
.l_type
);
7210 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7211 target_efl
->l_start
= tswap64(fl
.l_start
);
7212 target_efl
->l_len
= tswap64(fl
.l_len
);
7213 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7214 unlock_user_struct(target_efl
, arg3
, 1);
7218 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7220 target_fl
->l_type
= tswap16(fl
.l_type
);
7221 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7222 target_fl
->l_start
= tswap64(fl
.l_start
);
7223 target_fl
->l_len
= tswap64(fl
.l_len
);
7224 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7225 unlock_user_struct(target_fl
, arg3
, 1);
7230 case TARGET_F_SETLK64
:
7231 case TARGET_F_SETLKW64
:
7233 if (((CPUARMState
*)cpu_env
)->eabi
) {
7234 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7236 fl
.l_type
= tswap16(target_efl
->l_type
);
7237 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7238 fl
.l_start
= tswap64(target_efl
->l_start
);
7239 fl
.l_len
= tswap64(target_efl
->l_len
);
7240 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7241 unlock_user_struct(target_efl
, arg3
, 0);
7245 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7247 fl
.l_type
= tswap16(target_fl
->l_type
);
7248 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7249 fl
.l_start
= tswap64(target_fl
->l_start
);
7250 fl
.l_len
= tswap64(target_fl
->l_len
);
7251 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7252 unlock_user_struct(target_fl
, arg3
, 0);
7254 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7257 ret
= do_fcntl(arg1
, arg2
, arg3
);
7263 #ifdef TARGET_NR_cacheflush
7264 case TARGET_NR_cacheflush
:
7265 /* self-modifying code is handled automatically, so nothing needed */
7269 #ifdef TARGET_NR_security
7270 case TARGET_NR_security
:
7273 #ifdef TARGET_NR_getpagesize
7274 case TARGET_NR_getpagesize
:
7275 ret
= TARGET_PAGE_SIZE
;
7278 case TARGET_NR_gettid
:
7279 ret
= get_errno(gettid());
7281 #ifdef TARGET_NR_readahead
7282 case TARGET_NR_readahead
:
7283 #if TARGET_ABI_BITS == 32
7285 if (((CPUARMState
*)cpu_env
)->eabi
)
7292 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7294 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7298 #ifdef TARGET_NR_setxattr
7299 case TARGET_NR_setxattr
:
7300 case TARGET_NR_lsetxattr
:
7301 case TARGET_NR_fsetxattr
:
7302 case TARGET_NR_getxattr
:
7303 case TARGET_NR_lgetxattr
:
7304 case TARGET_NR_fgetxattr
:
7305 case TARGET_NR_listxattr
:
7306 case TARGET_NR_llistxattr
:
7307 case TARGET_NR_flistxattr
:
7308 case TARGET_NR_removexattr
:
7309 case TARGET_NR_lremovexattr
:
7310 case TARGET_NR_fremovexattr
:
7311 ret
= -TARGET_EOPNOTSUPP
;
7314 #ifdef TARGET_NR_set_thread_area
7315 case TARGET_NR_set_thread_area
:
7316 #if defined(TARGET_MIPS)
7317 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7320 #elif defined(TARGET_CRIS)
7322 ret
= -TARGET_EINVAL
;
7324 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7328 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7329 ret
= do_set_thread_area(cpu_env
, arg1
);
7332 goto unimplemented_nowarn
;
7335 #ifdef TARGET_NR_get_thread_area
7336 case TARGET_NR_get_thread_area
:
7337 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7338 ret
= do_get_thread_area(cpu_env
, arg1
);
7340 goto unimplemented_nowarn
;
7343 #ifdef TARGET_NR_getdomainname
7344 case TARGET_NR_getdomainname
:
7345 goto unimplemented_nowarn
;
7348 #ifdef TARGET_NR_clock_gettime
7349 case TARGET_NR_clock_gettime
:
7352 ret
= get_errno(clock_gettime(arg1
, &ts
));
7353 if (!is_error(ret
)) {
7354 host_to_target_timespec(arg2
, &ts
);
7359 #ifdef TARGET_NR_clock_getres
7360 case TARGET_NR_clock_getres
:
7363 ret
= get_errno(clock_getres(arg1
, &ts
));
7364 if (!is_error(ret
)) {
7365 host_to_target_timespec(arg2
, &ts
);
7370 #ifdef TARGET_NR_clock_nanosleep
7371 case TARGET_NR_clock_nanosleep
:
7374 target_to_host_timespec(&ts
, arg3
);
7375 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7377 host_to_target_timespec(arg4
, &ts
);
7382 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7383 case TARGET_NR_set_tid_address
:
7384 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7388 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7389 case TARGET_NR_tkill
:
7390 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7394 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7395 case TARGET_NR_tgkill
:
7396 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7397 target_to_host_signal(arg3
)));
7401 #ifdef TARGET_NR_set_robust_list
7402 case TARGET_NR_set_robust_list
:
7403 goto unimplemented_nowarn
;
7406 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7407 case TARGET_NR_utimensat
:
7409 struct timespec
*tsp
, ts
[2];
7413 target_to_host_timespec(ts
, arg3
);
7414 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7418 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7420 if (!(p
= lock_user_string(arg2
))) {
7421 ret
= -TARGET_EFAULT
;
7424 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7425 unlock_user(p
, arg2
, 0);
7430 #if defined(CONFIG_USE_NPTL)
7431 case TARGET_NR_futex
:
7432 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7435 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7436 case TARGET_NR_inotify_init
:
7437 ret
= get_errno(sys_inotify_init());
7440 #ifdef CONFIG_INOTIFY1
7441 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7442 case TARGET_NR_inotify_init1
:
7443 ret
= get_errno(sys_inotify_init1(arg1
));
7447 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7448 case TARGET_NR_inotify_add_watch
:
7449 p
= lock_user_string(arg2
);
7450 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7451 unlock_user(p
, arg2
, 0);
7454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7455 case TARGET_NR_inotify_rm_watch
:
7456 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7460 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7461 case TARGET_NR_mq_open
:
7463 struct mq_attr posix_mq_attr
;
7465 p
= lock_user_string(arg1
- 1);
7467 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7468 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7469 unlock_user (p
, arg1
, 0);
7473 case TARGET_NR_mq_unlink
:
7474 p
= lock_user_string(arg1
- 1);
7475 ret
= get_errno(mq_unlink(p
));
7476 unlock_user (p
, arg1
, 0);
7479 case TARGET_NR_mq_timedsend
:
7483 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7485 target_to_host_timespec(&ts
, arg5
);
7486 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7487 host_to_target_timespec(arg5
, &ts
);
7490 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7491 unlock_user (p
, arg2
, arg3
);
7495 case TARGET_NR_mq_timedreceive
:
7500 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7502 target_to_host_timespec(&ts
, arg5
);
7503 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7504 host_to_target_timespec(arg5
, &ts
);
7507 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7508 unlock_user (p
, arg2
, arg3
);
7510 put_user_u32(prio
, arg4
);
7514 /* Not implemented for now... */
7515 /* case TARGET_NR_mq_notify: */
7518 case TARGET_NR_mq_getsetattr
:
7520 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7523 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7524 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7527 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7528 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7535 #ifdef CONFIG_SPLICE
7536 #ifdef TARGET_NR_tee
7539 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7543 #ifdef TARGET_NR_splice
7544 case TARGET_NR_splice
:
7546 loff_t loff_in
, loff_out
;
7547 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7549 get_user_u64(loff_in
, arg2
);
7550 ploff_in
= &loff_in
;
7553 get_user_u64(loff_out
, arg2
);
7554 ploff_out
= &loff_out
;
7556 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7560 #ifdef TARGET_NR_vmsplice
7561 case TARGET_NR_vmsplice
:
7566 vec
= alloca(count
* sizeof(struct iovec
));
7567 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7569 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7570 unlock_iovec(vec
, arg2
, count
, 0);
7574 #endif /* CONFIG_SPLICE */
7575 #ifdef CONFIG_EVENTFD
7576 #if defined(TARGET_NR_eventfd)
7577 case TARGET_NR_eventfd
:
7578 ret
= get_errno(eventfd(arg1
, 0));
7581 #if defined(TARGET_NR_eventfd2)
7582 case TARGET_NR_eventfd2
:
7583 ret
= get_errno(eventfd(arg1
, arg2
));
7586 #endif /* CONFIG_EVENTFD */
7587 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7588 case TARGET_NR_fallocate
:
7589 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7592 #if defined(CONFIG_SYNC_FILE_RANGE)
7593 #if defined(TARGET_NR_sync_file_range)
7594 case TARGET_NR_sync_file_range
:
7595 #if TARGET_ABI_BITS == 32
7596 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
7597 target_offset64(arg4
, arg5
), arg6
));
7599 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
7603 #if defined(TARGET_NR_sync_file_range2)
7604 case TARGET_NR_sync_file_range2
:
7605 /* This is like sync_file_range but the arguments are reordered */
7606 #if TARGET_ABI_BITS == 32
7607 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7608 target_offset64(arg5
, arg6
), arg2
));
7610 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
7617 gemu_log("qemu: Unsupported syscall: %d\n", num
);
7618 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7619 unimplemented_nowarn
:
7621 ret
= -TARGET_ENOSYS
;
7626 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
7629 print_syscall_ret(num
, ret
);
7632 ret
= -TARGET_EFAULT
;