4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
44 #include <sys/socket.h>
48 #include <sys/times.h>
51 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/utsname.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <qemu-common.h>
63 #include <sys/eventfd.h>
66 #define termios host_termios
67 #define winsize host_winsize
68 #define termio host_termio
69 #define sgttyb host_sgttyb /* same as target */
70 #define tchars host_tchars /* same as target */
71 #define ltchars host_ltchars /* same as target */
73 #include <linux/termios.h>
74 #include <linux/unistd.h>
75 #include <linux/utsname.h>
76 #include <linux/cdrom.h>
77 #include <linux/hdreg.h>
78 #include <linux/soundcard.h>
80 #include <linux/mtio.h>
84 #include "linux_loop.h"
87 #include "qemu-common.h"
89 #if defined(CONFIG_USE_NPTL)
90 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
91 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
93 /* XXX: Hardcode the above values. */
94 #define CLONE_NPTL_FLAGS2 0
99 //#include <linux/msdos_fs.h>
100 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
101 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
112 #define _syscall0(type,name) \
113 static type name (void) \
115 return syscall(__NR_##name); \
118 #define _syscall1(type,name,type1,arg1) \
119 static type name (type1 arg1) \
121 return syscall(__NR_##name, arg1); \
124 #define _syscall2(type,name,type1,arg1,type2,arg2) \
125 static type name (type1 arg1,type2 arg2) \
127 return syscall(__NR_##name, arg1, arg2); \
130 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
131 static type name (type1 arg1,type2 arg2,type3 arg3) \
133 return syscall(__NR_##name, arg1, arg2, arg3); \
136 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
137 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
139 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
142 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
144 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
146 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
150 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
151 type5,arg5,type6,arg6) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
155 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
159 #define __NR_sys_uname __NR_uname
160 #define __NR_sys_faccessat __NR_faccessat
161 #define __NR_sys_fchmodat __NR_fchmodat
162 #define __NR_sys_fchownat __NR_fchownat
163 #define __NR_sys_fstatat64 __NR_fstatat64
164 #define __NR_sys_futimesat __NR_futimesat
165 #define __NR_sys_getcwd1 __NR_getcwd
166 #define __NR_sys_getdents __NR_getdents
167 #define __NR_sys_getdents64 __NR_getdents64
168 #define __NR_sys_getpriority __NR_getpriority
169 #define __NR_sys_linkat __NR_linkat
170 #define __NR_sys_mkdirat __NR_mkdirat
171 #define __NR_sys_mknodat __NR_mknodat
172 #define __NR_sys_newfstatat __NR_newfstatat
173 #define __NR_sys_openat __NR_openat
174 #define __NR_sys_readlinkat __NR_readlinkat
175 #define __NR_sys_renameat __NR_renameat
176 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
177 #define __NR_sys_symlinkat __NR_symlinkat
178 #define __NR_sys_syslog __NR_syslog
179 #define __NR_sys_tgkill __NR_tgkill
180 #define __NR_sys_tkill __NR_tkill
181 #define __NR_sys_unlinkat __NR_unlinkat
182 #define __NR_sys_utimensat __NR_utimensat
183 #define __NR_sys_futex __NR_futex
184 #define __NR_sys_inotify_init __NR_inotify_init
185 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
186 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
188 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
189 #define __NR__llseek __NR_lseek
193 _syscall0(int, gettid
)
195 /* This is a replacement for the host gettid() and must return a host
197 static int gettid(void) {
201 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
202 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
203 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
205 _syscall2(int, sys_getpriority
, int, which
, int, who
);
206 #if defined(TARGET_NR__llseek) && !defined (__x86_64__)
207 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
208 loff_t
*, res
, uint
, wh
);
210 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
211 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
212 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
213 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
215 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
216 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
218 #ifdef __NR_exit_group
219 _syscall1(int,exit_group
,int,error_code
)
221 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
222 _syscall1(int,set_tid_address
,int *,tidptr
)
224 #if defined(CONFIG_USE_NPTL)
225 #if defined(TARGET_NR_futex) && defined(__NR_futex)
226 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
227 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
231 static bitmask_transtbl fcntl_flags_tbl
[] = {
232 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
233 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
234 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
235 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
236 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
237 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
238 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
239 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
240 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
241 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
242 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
243 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
244 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
245 #if defined(O_DIRECT)
246 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
251 #define COPY_UTSNAME_FIELD(dest, src) \
253 /* __NEW_UTS_LEN doesn't include terminating null */ \
254 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
255 (dest)[__NEW_UTS_LEN] = '\0'; \
258 static int sys_uname(struct new_utsname
*buf
)
260 struct utsname uts_buf
;
262 if (uname(&uts_buf
) < 0)
266 * Just in case these have some differences, we
267 * translate utsname to new_utsname (which is the
268 * struct linux kernel uses).
271 bzero(buf
, sizeof (*buf
));
272 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
273 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
274 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
275 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
276 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
278 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
282 #undef COPY_UTSNAME_FIELD
285 static int sys_getcwd1(char *buf
, size_t size
)
287 if (getcwd(buf
, size
) == NULL
) {
288 /* getcwd() sets errno */
291 return strlen(buf
)+1;
296 * Host system seems to have atfile syscall stubs available. We
297 * now enable them one by one as specified by target syscall_nr.h.
300 #ifdef TARGET_NR_faccessat
301 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
303 return (faccessat(dirfd
, pathname
, mode
, 0));
306 #ifdef TARGET_NR_fchmodat
307 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
309 return (fchmodat(dirfd
, pathname
, mode
, 0));
312 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
313 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
314 gid_t group
, int flags
)
316 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
319 #ifdef __NR_fstatat64
320 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
323 return (fstatat(dirfd
, pathname
, buf
, flags
));
326 #ifdef __NR_newfstatat
327 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
330 return (fstatat(dirfd
, pathname
, buf
, flags
));
333 #ifdef TARGET_NR_futimesat
334 static int sys_futimesat(int dirfd
, const char *pathname
,
335 const struct timeval times
[2])
337 return (futimesat(dirfd
, pathname
, times
));
340 #ifdef TARGET_NR_linkat
341 static int sys_linkat(int olddirfd
, const char *oldpath
,
342 int newdirfd
, const char *newpath
, int flags
)
344 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
347 #ifdef TARGET_NR_mkdirat
348 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
350 return (mkdirat(dirfd
, pathname
, mode
));
353 #ifdef TARGET_NR_mknodat
354 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
357 return (mknodat(dirfd
, pathname
, mode
, dev
));
360 #ifdef TARGET_NR_openat
361 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
364 * open(2) has extra parameter 'mode' when called with
367 if ((flags
& O_CREAT
) != 0) {
372 * Get the 'mode' parameter and translate it to
376 mode
= va_arg(ap
, mode_t
);
377 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
380 return (openat(dirfd
, pathname
, flags
, mode
));
382 return (openat(dirfd
, pathname
, flags
));
385 #ifdef TARGET_NR_readlinkat
386 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
388 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
391 #ifdef TARGET_NR_renameat
392 static int sys_renameat(int olddirfd
, const char *oldpath
,
393 int newdirfd
, const char *newpath
)
395 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
398 #ifdef TARGET_NR_symlinkat
399 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
401 return (symlinkat(oldpath
, newdirfd
, newpath
));
404 #ifdef TARGET_NR_unlinkat
405 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
407 return (unlinkat(dirfd
, pathname
, flags
));
410 #else /* !CONFIG_ATFILE */
413 * Try direct syscalls instead
415 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
416 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
418 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
419 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
421 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
422 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
423 uid_t
,owner
,gid_t
,group
,int,flags
)
425 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
426 defined(__NR_fstatat64)
427 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
428 struct stat
*,buf
,int,flags
)
430 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
431 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
432 const struct timeval
*,times
)
434 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
435 defined(__NR_newfstatat)
436 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
437 struct stat
*,buf
,int,flags
)
439 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
440 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
441 int,newdirfd
,const char *,newpath
,int,flags
)
443 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
444 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
446 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
447 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
448 mode_t
,mode
,dev_t
,dev
)
450 #if defined(TARGET_NR_openat) && defined(__NR_openat)
451 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
453 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
454 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
455 char *,buf
,size_t,bufsize
)
457 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
458 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
459 int,newdirfd
,const char *,newpath
)
461 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
462 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
463 int,newdirfd
,const char *,newpath
)
465 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
466 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
469 #endif /* CONFIG_ATFILE */
471 #ifdef CONFIG_UTIMENSAT
472 static int sys_utimensat(int dirfd
, const char *pathname
,
473 const struct timespec times
[2], int flags
)
475 if (pathname
== NULL
)
476 return futimens(dirfd
, times
);
478 return utimensat(dirfd
, pathname
, times
, flags
);
481 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
482 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
483 const struct timespec
*,tsp
,int,flags
)
485 #endif /* CONFIG_UTIMENSAT */
487 #ifdef CONFIG_INOTIFY
488 #include <sys/inotify.h>
490 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
491 static int sys_inotify_init(void)
493 return (inotify_init());
496 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
497 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
499 return (inotify_add_watch(fd
, pathname
, mask
));
502 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
503 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
505 return (inotify_rm_watch(fd
, wd
));
509 /* Userspace can usually survive runtime without inotify */
510 #undef TARGET_NR_inotify_init
511 #undef TARGET_NR_inotify_add_watch
512 #undef TARGET_NR_inotify_rm_watch
513 #endif /* CONFIG_INOTIFY */
516 extern int personality(int);
517 extern int flock(int, int);
518 extern int setfsuid(int);
519 extern int setfsgid(int);
520 extern int setgroups(int, gid_t
*);
522 #define ERRNO_TABLE_SIZE 1200
524 /* target_to_host_errno_table[] is initialized from
525 * host_to_target_errno_table[] in syscall_init(). */
526 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
530 * This list is the union of errno values overridden in asm-<arch>/errno.h
531 * minus the errnos that are not actually generic to all archs.
533 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
534 [EIDRM
] = TARGET_EIDRM
,
535 [ECHRNG
] = TARGET_ECHRNG
,
536 [EL2NSYNC
] = TARGET_EL2NSYNC
,
537 [EL3HLT
] = TARGET_EL3HLT
,
538 [EL3RST
] = TARGET_EL3RST
,
539 [ELNRNG
] = TARGET_ELNRNG
,
540 [EUNATCH
] = TARGET_EUNATCH
,
541 [ENOCSI
] = TARGET_ENOCSI
,
542 [EL2HLT
] = TARGET_EL2HLT
,
543 [EDEADLK
] = TARGET_EDEADLK
,
544 [ENOLCK
] = TARGET_ENOLCK
,
545 [EBADE
] = TARGET_EBADE
,
546 [EBADR
] = TARGET_EBADR
,
547 [EXFULL
] = TARGET_EXFULL
,
548 [ENOANO
] = TARGET_ENOANO
,
549 [EBADRQC
] = TARGET_EBADRQC
,
550 [EBADSLT
] = TARGET_EBADSLT
,
551 [EBFONT
] = TARGET_EBFONT
,
552 [ENOSTR
] = TARGET_ENOSTR
,
553 [ENODATA
] = TARGET_ENODATA
,
554 [ETIME
] = TARGET_ETIME
,
555 [ENOSR
] = TARGET_ENOSR
,
556 [ENONET
] = TARGET_ENONET
,
557 [ENOPKG
] = TARGET_ENOPKG
,
558 [EREMOTE
] = TARGET_EREMOTE
,
559 [ENOLINK
] = TARGET_ENOLINK
,
560 [EADV
] = TARGET_EADV
,
561 [ESRMNT
] = TARGET_ESRMNT
,
562 [ECOMM
] = TARGET_ECOMM
,
563 [EPROTO
] = TARGET_EPROTO
,
564 [EDOTDOT
] = TARGET_EDOTDOT
,
565 [EMULTIHOP
] = TARGET_EMULTIHOP
,
566 [EBADMSG
] = TARGET_EBADMSG
,
567 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
568 [EOVERFLOW
] = TARGET_EOVERFLOW
,
569 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
570 [EBADFD
] = TARGET_EBADFD
,
571 [EREMCHG
] = TARGET_EREMCHG
,
572 [ELIBACC
] = TARGET_ELIBACC
,
573 [ELIBBAD
] = TARGET_ELIBBAD
,
574 [ELIBSCN
] = TARGET_ELIBSCN
,
575 [ELIBMAX
] = TARGET_ELIBMAX
,
576 [ELIBEXEC
] = TARGET_ELIBEXEC
,
577 [EILSEQ
] = TARGET_EILSEQ
,
578 [ENOSYS
] = TARGET_ENOSYS
,
579 [ELOOP
] = TARGET_ELOOP
,
580 [ERESTART
] = TARGET_ERESTART
,
581 [ESTRPIPE
] = TARGET_ESTRPIPE
,
582 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
583 [EUSERS
] = TARGET_EUSERS
,
584 [ENOTSOCK
] = TARGET_ENOTSOCK
,
585 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
586 [EMSGSIZE
] = TARGET_EMSGSIZE
,
587 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
588 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
589 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
590 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
591 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
592 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
593 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
594 [EADDRINUSE
] = TARGET_EADDRINUSE
,
595 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
596 [ENETDOWN
] = TARGET_ENETDOWN
,
597 [ENETUNREACH
] = TARGET_ENETUNREACH
,
598 [ENETRESET
] = TARGET_ENETRESET
,
599 [ECONNABORTED
] = TARGET_ECONNABORTED
,
600 [ECONNRESET
] = TARGET_ECONNRESET
,
601 [ENOBUFS
] = TARGET_ENOBUFS
,
602 [EISCONN
] = TARGET_EISCONN
,
603 [ENOTCONN
] = TARGET_ENOTCONN
,
604 [EUCLEAN
] = TARGET_EUCLEAN
,
605 [ENOTNAM
] = TARGET_ENOTNAM
,
606 [ENAVAIL
] = TARGET_ENAVAIL
,
607 [EISNAM
] = TARGET_EISNAM
,
608 [EREMOTEIO
] = TARGET_EREMOTEIO
,
609 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
610 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
611 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
612 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
613 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
614 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
615 [EALREADY
] = TARGET_EALREADY
,
616 [EINPROGRESS
] = TARGET_EINPROGRESS
,
617 [ESTALE
] = TARGET_ESTALE
,
618 [ECANCELED
] = TARGET_ECANCELED
,
619 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
620 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
622 [ENOKEY
] = TARGET_ENOKEY
,
625 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
628 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
631 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
634 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
636 #ifdef ENOTRECOVERABLE
637 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
641 static inline int host_to_target_errno(int err
)
643 if(host_to_target_errno_table
[err
])
644 return host_to_target_errno_table
[err
];
648 static inline int target_to_host_errno(int err
)
650 if (target_to_host_errno_table
[err
])
651 return target_to_host_errno_table
[err
];
655 static inline abi_long
get_errno(abi_long ret
)
658 return -host_to_target_errno(errno
);
663 static inline int is_error(abi_long ret
)
665 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
668 char *target_strerror(int err
)
670 return strerror(target_to_host_errno(err
));
673 static abi_ulong target_brk
;
674 static abi_ulong target_original_brk
;
676 void target_set_brk(abi_ulong new_brk
)
678 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
681 /* do_brk() must return target values and target errnos. */
682 abi_long
do_brk(abi_ulong new_brk
)
685 abi_long mapped_addr
;
690 if (new_brk
< target_original_brk
)
693 brk_page
= HOST_PAGE_ALIGN(target_brk
);
695 /* If the new brk is less than this, set it and we're done... */
696 if (new_brk
< brk_page
) {
697 target_brk
= new_brk
;
701 /* We need to allocate more memory after the brk... */
702 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
703 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
704 PROT_READ
|PROT_WRITE
,
705 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
707 if (!is_error(mapped_addr
))
708 target_brk
= new_brk
;
713 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
714 abi_ulong target_fds_addr
,
718 abi_ulong b
, *target_fds
;
720 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
721 if (!(target_fds
= lock_user(VERIFY_READ
,
723 sizeof(abi_ulong
) * nw
,
725 return -TARGET_EFAULT
;
729 for (i
= 0; i
< nw
; i
++) {
730 /* grab the abi_ulong */
731 __get_user(b
, &target_fds
[i
]);
732 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
733 /* check the bit inside the abi_ulong */
740 unlock_user(target_fds
, target_fds_addr
, 0);
745 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
751 abi_ulong
*target_fds
;
753 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
754 if (!(target_fds
= lock_user(VERIFY_WRITE
,
756 sizeof(abi_ulong
) * nw
,
758 return -TARGET_EFAULT
;
761 for (i
= 0; i
< nw
; i
++) {
763 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
764 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
767 __put_user(v
, &target_fds
[i
]);
770 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
775 #if defined(__alpha__)
781 static inline abi_long
host_to_target_clock_t(long ticks
)
783 #if HOST_HZ == TARGET_HZ
786 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
790 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
791 const struct rusage
*rusage
)
793 struct target_rusage
*target_rusage
;
795 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
796 return -TARGET_EFAULT
;
797 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
798 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
799 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
800 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
801 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
802 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
803 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
804 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
805 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
806 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
807 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
808 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
809 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
810 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
811 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
812 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
813 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
814 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
815 unlock_user_struct(target_rusage
, target_addr
, 1);
820 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
821 abi_ulong target_tv_addr
)
823 struct target_timeval
*target_tv
;
825 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
826 return -TARGET_EFAULT
;
828 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
829 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
831 unlock_user_struct(target_tv
, target_tv_addr
, 0);
836 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
837 const struct timeval
*tv
)
839 struct target_timeval
*target_tv
;
841 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
842 return -TARGET_EFAULT
;
844 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
845 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
847 unlock_user_struct(target_tv
, target_tv_addr
, 1);
852 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
855 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
856 abi_ulong target_mq_attr_addr
)
858 struct target_mq_attr
*target_mq_attr
;
860 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
861 target_mq_attr_addr
, 1))
862 return -TARGET_EFAULT
;
864 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
865 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
866 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
867 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
869 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
874 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
875 const struct mq_attr
*attr
)
877 struct target_mq_attr
*target_mq_attr
;
879 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
880 target_mq_attr_addr
, 0))
881 return -TARGET_EFAULT
;
883 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
884 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
885 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
886 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
888 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
894 /* do_select() must return target values and target errnos. */
895 static abi_long
do_select(int n
,
896 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
897 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
899 fd_set rfds
, wfds
, efds
;
900 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
901 struct timeval tv
, *tv_ptr
;
905 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
906 return -TARGET_EFAULT
;
912 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
913 return -TARGET_EFAULT
;
919 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
920 return -TARGET_EFAULT
;
926 if (target_tv_addr
) {
927 if (copy_from_user_timeval(&tv
, target_tv_addr
))
928 return -TARGET_EFAULT
;
934 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
936 if (!is_error(ret
)) {
937 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
938 return -TARGET_EFAULT
;
939 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
940 return -TARGET_EFAULT
;
941 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
942 return -TARGET_EFAULT
;
944 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
945 return -TARGET_EFAULT
;
951 static abi_long
do_pipe2(int host_pipe
[], int flags
)
954 return pipe2(host_pipe
, flags
);
960 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
, int flags
)
964 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
967 return get_errno(ret
);
968 #if defined(TARGET_MIPS)
969 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
971 #elif defined(TARGET_SH4)
972 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
975 if (put_user_s32(host_pipe
[0], pipedes
)
976 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
977 return -TARGET_EFAULT
;
979 return get_errno(ret
);
982 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
983 abi_ulong target_addr
,
986 struct target_ip_mreqn
*target_smreqn
;
988 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
990 return -TARGET_EFAULT
;
991 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
992 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
993 if (len
== sizeof(struct target_ip_mreqn
))
994 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
995 unlock_user(target_smreqn
, target_addr
, 0);
1000 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1001 abi_ulong target_addr
,
1004 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1005 sa_family_t sa_family
;
1006 struct target_sockaddr
*target_saddr
;
1008 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1010 return -TARGET_EFAULT
;
1012 sa_family
= tswap16(target_saddr
->sa_family
);
1014 /* Oops. The caller might send a incomplete sun_path; sun_path
1015 * must be terminated by \0 (see the manual page), but
1016 * unfortunately it is quite common to specify sockaddr_un
1017 * length as "strlen(x->sun_path)" while it should be
1018 * "strlen(...) + 1". We'll fix that here if needed.
1019 * Linux kernel has a similar feature.
1022 if (sa_family
== AF_UNIX
) {
1023 if (len
< unix_maxlen
&& len
> 0) {
1024 char *cp
= (char*)target_saddr
;
1026 if ( cp
[len
-1] && !cp
[len
] )
1029 if (len
> unix_maxlen
)
1033 memcpy(addr
, target_saddr
, len
);
1034 addr
->sa_family
= sa_family
;
1035 unlock_user(target_saddr
, target_addr
, 0);
1040 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1041 struct sockaddr
*addr
,
1044 struct target_sockaddr
*target_saddr
;
1046 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1048 return -TARGET_EFAULT
;
1049 memcpy(target_saddr
, addr
, len
);
1050 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1051 unlock_user(target_saddr
, target_addr
, len
);
1056 /* ??? Should this also swap msgh->name? */
1057 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1058 struct target_msghdr
*target_msgh
)
1060 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1061 abi_long msg_controllen
;
1062 abi_ulong target_cmsg_addr
;
1063 struct target_cmsghdr
*target_cmsg
;
1064 socklen_t space
= 0;
1066 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1067 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1069 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1070 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1072 return -TARGET_EFAULT
;
1074 while (cmsg
&& target_cmsg
) {
1075 void *data
= CMSG_DATA(cmsg
);
1076 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1078 int len
= tswapl(target_cmsg
->cmsg_len
)
1079 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1081 space
+= CMSG_SPACE(len
);
1082 if (space
> msgh
->msg_controllen
) {
1083 space
-= CMSG_SPACE(len
);
1084 gemu_log("Host cmsg overflow\n");
1088 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1089 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1090 cmsg
->cmsg_len
= CMSG_LEN(len
);
1092 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1093 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1094 memcpy(data
, target_data
, len
);
1096 int *fd
= (int *)data
;
1097 int *target_fd
= (int *)target_data
;
1098 int i
, numfds
= len
/ sizeof(int);
1100 for (i
= 0; i
< numfds
; i
++)
1101 fd
[i
] = tswap32(target_fd
[i
]);
1104 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1105 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1107 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1109 msgh
->msg_controllen
= space
;
1113 /* ??? Should this also swap msgh->name? */
1114 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1115 struct msghdr
*msgh
)
1117 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1118 abi_long msg_controllen
;
1119 abi_ulong target_cmsg_addr
;
1120 struct target_cmsghdr
*target_cmsg
;
1121 socklen_t space
= 0;
1123 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1124 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1126 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1127 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1129 return -TARGET_EFAULT
;
1131 while (cmsg
&& target_cmsg
) {
1132 void *data
= CMSG_DATA(cmsg
);
1133 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1135 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1137 space
+= TARGET_CMSG_SPACE(len
);
1138 if (space
> msg_controllen
) {
1139 space
-= TARGET_CMSG_SPACE(len
);
1140 gemu_log("Target cmsg overflow\n");
1144 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1145 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1146 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1148 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1149 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1150 memcpy(target_data
, data
, len
);
1152 int *fd
= (int *)data
;
1153 int *target_fd
= (int *)target_data
;
1154 int i
, numfds
= len
/ sizeof(int);
1156 for (i
= 0; i
< numfds
; i
++)
1157 target_fd
[i
] = tswap32(fd
[i
]);
1160 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1161 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1163 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1165 target_msgh
->msg_controllen
= tswapl(space
);
1169 /* do_setsockopt() Must return target values and target errnos. */
1170 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1171 abi_ulong optval_addr
, socklen_t optlen
)
1175 struct ip_mreqn
*ip_mreq
;
1176 struct ip_mreq_source
*ip_mreq_source
;
1180 /* TCP options all take an 'int' value. */
1181 if (optlen
< sizeof(uint32_t))
1182 return -TARGET_EINVAL
;
1184 if (get_user_u32(val
, optval_addr
))
1185 return -TARGET_EFAULT
;
1186 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1193 case IP_ROUTER_ALERT
:
1197 case IP_MTU_DISCOVER
:
1203 case IP_MULTICAST_TTL
:
1204 case IP_MULTICAST_LOOP
:
1206 if (optlen
>= sizeof(uint32_t)) {
1207 if (get_user_u32(val
, optval_addr
))
1208 return -TARGET_EFAULT
;
1209 } else if (optlen
>= 1) {
1210 if (get_user_u8(val
, optval_addr
))
1211 return -TARGET_EFAULT
;
1213 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1215 case IP_ADD_MEMBERSHIP
:
1216 case IP_DROP_MEMBERSHIP
:
1217 if (optlen
< sizeof (struct target_ip_mreq
) ||
1218 optlen
> sizeof (struct target_ip_mreqn
))
1219 return -TARGET_EINVAL
;
1221 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1222 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1223 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1226 case IP_BLOCK_SOURCE
:
1227 case IP_UNBLOCK_SOURCE
:
1228 case IP_ADD_SOURCE_MEMBERSHIP
:
1229 case IP_DROP_SOURCE_MEMBERSHIP
:
1230 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1231 return -TARGET_EINVAL
;
1233 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1234 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1235 unlock_user (ip_mreq_source
, optval_addr
, 0);
1242 case TARGET_SOL_SOCKET
:
1244 /* Options with 'int' argument. */
1245 case TARGET_SO_DEBUG
:
1248 case TARGET_SO_REUSEADDR
:
1249 optname
= SO_REUSEADDR
;
1251 case TARGET_SO_TYPE
:
1254 case TARGET_SO_ERROR
:
1257 case TARGET_SO_DONTROUTE
:
1258 optname
= SO_DONTROUTE
;
1260 case TARGET_SO_BROADCAST
:
1261 optname
= SO_BROADCAST
;
1263 case TARGET_SO_SNDBUF
:
1264 optname
= SO_SNDBUF
;
1266 case TARGET_SO_RCVBUF
:
1267 optname
= SO_RCVBUF
;
1269 case TARGET_SO_KEEPALIVE
:
1270 optname
= SO_KEEPALIVE
;
1272 case TARGET_SO_OOBINLINE
:
1273 optname
= SO_OOBINLINE
;
1275 case TARGET_SO_NO_CHECK
:
1276 optname
= SO_NO_CHECK
;
1278 case TARGET_SO_PRIORITY
:
1279 optname
= SO_PRIORITY
;
1282 case TARGET_SO_BSDCOMPAT
:
1283 optname
= SO_BSDCOMPAT
;
1286 case TARGET_SO_PASSCRED
:
1287 optname
= SO_PASSCRED
;
1289 case TARGET_SO_TIMESTAMP
:
1290 optname
= SO_TIMESTAMP
;
1292 case TARGET_SO_RCVLOWAT
:
1293 optname
= SO_RCVLOWAT
;
1295 case TARGET_SO_RCVTIMEO
:
1296 optname
= SO_RCVTIMEO
;
1298 case TARGET_SO_SNDTIMEO
:
1299 optname
= SO_SNDTIMEO
;
1305 if (optlen
< sizeof(uint32_t))
1306 return -TARGET_EINVAL
;
1308 if (get_user_u32(val
, optval_addr
))
1309 return -TARGET_EFAULT
;
1310 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1314 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1315 ret
= -TARGET_ENOPROTOOPT
;
1320 /* do_getsockopt() Must return target values and target errnos. */
1321 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1322 abi_ulong optval_addr
, abi_ulong optlen
)
1329 case TARGET_SOL_SOCKET
:
1332 case TARGET_SO_LINGER
:
1333 case TARGET_SO_RCVTIMEO
:
1334 case TARGET_SO_SNDTIMEO
:
1335 case TARGET_SO_PEERCRED
:
1336 case TARGET_SO_PEERNAME
:
1337 /* These don't just return a single integer */
1344 /* TCP options all take an 'int' value. */
1346 if (get_user_u32(len
, optlen
))
1347 return -TARGET_EFAULT
;
1349 return -TARGET_EINVAL
;
1351 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1357 if (put_user_u32(val
, optval_addr
))
1358 return -TARGET_EFAULT
;
1360 if (put_user_u8(val
, optval_addr
))
1361 return -TARGET_EFAULT
;
1363 if (put_user_u32(len
, optlen
))
1364 return -TARGET_EFAULT
;
1371 case IP_ROUTER_ALERT
:
1375 case IP_MTU_DISCOVER
:
1381 case IP_MULTICAST_TTL
:
1382 case IP_MULTICAST_LOOP
:
1383 if (get_user_u32(len
, optlen
))
1384 return -TARGET_EFAULT
;
1386 return -TARGET_EINVAL
;
1388 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1391 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1393 if (put_user_u32(len
, optlen
)
1394 || put_user_u8(val
, optval_addr
))
1395 return -TARGET_EFAULT
;
1397 if (len
> sizeof(int))
1399 if (put_user_u32(len
, optlen
)
1400 || put_user_u32(val
, optval_addr
))
1401 return -TARGET_EFAULT
;
1405 ret
= -TARGET_ENOPROTOOPT
;
1411 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1413 ret
= -TARGET_EOPNOTSUPP
;
1420 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1421 * other lock functions have a return code of 0 for failure.
1423 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1424 int count
, int copy
)
1426 struct target_iovec
*target_vec
;
1430 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1432 return -TARGET_EFAULT
;
1433 for(i
= 0;i
< count
; i
++) {
1434 base
= tswapl(target_vec
[i
].iov_base
);
1435 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1436 if (vec
[i
].iov_len
!= 0) {
1437 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1438 /* Don't check lock_user return value. We must call writev even
1439 if a element has invalid base address. */
1441 /* zero length pointer is ignored */
1442 vec
[i
].iov_base
= NULL
;
1445 unlock_user (target_vec
, target_addr
, 0);
1449 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1450 int count
, int copy
)
1452 struct target_iovec
*target_vec
;
1456 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1458 return -TARGET_EFAULT
;
1459 for(i
= 0;i
< count
; i
++) {
1460 if (target_vec
[i
].iov_base
) {
1461 base
= tswapl(target_vec
[i
].iov_base
);
1462 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1465 unlock_user (target_vec
, target_addr
, 0);
1470 /* do_socket() Must return target values and target errnos. */
1471 static abi_long
do_socket(int domain
, int type
, int protocol
)
1473 #if defined(TARGET_MIPS)
1475 case TARGET_SOCK_DGRAM
:
1478 case TARGET_SOCK_STREAM
:
1481 case TARGET_SOCK_RAW
:
1484 case TARGET_SOCK_RDM
:
1487 case TARGET_SOCK_SEQPACKET
:
1488 type
= SOCK_SEQPACKET
;
1490 case TARGET_SOCK_PACKET
:
1495 if (domain
== PF_NETLINK
)
1496 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1497 return get_errno(socket(domain
, type
, protocol
));
1500 /* do_bind() Must return target values and target errnos. */
1501 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1508 return -TARGET_EINVAL
;
1510 addr
= alloca(addrlen
+1);
1512 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1516 return get_errno(bind(sockfd
, addr
, addrlen
));
1519 /* do_connect() Must return target values and target errnos. */
1520 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1527 return -TARGET_EINVAL
;
1529 addr
= alloca(addrlen
);
1531 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1535 return get_errno(connect(sockfd
, addr
, addrlen
));
1538 /* do_sendrecvmsg() Must return target values and target errnos. */
1539 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1540 int flags
, int send
)
1543 struct target_msghdr
*msgp
;
1547 abi_ulong target_vec
;
1550 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1554 return -TARGET_EFAULT
;
1555 if (msgp
->msg_name
) {
1556 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1557 msg
.msg_name
= alloca(msg
.msg_namelen
);
1558 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1561 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1565 msg
.msg_name
= NULL
;
1566 msg
.msg_namelen
= 0;
1568 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1569 msg
.msg_control
= alloca(msg
.msg_controllen
);
1570 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1572 count
= tswapl(msgp
->msg_iovlen
);
1573 vec
= alloca(count
* sizeof(struct iovec
));
1574 target_vec
= tswapl(msgp
->msg_iov
);
1575 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1576 msg
.msg_iovlen
= count
;
1580 ret
= target_to_host_cmsg(&msg
, msgp
);
1582 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1584 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1585 if (!is_error(ret
)) {
1587 ret
= host_to_target_cmsg(msgp
, &msg
);
1592 unlock_iovec(vec
, target_vec
, count
, !send
);
1593 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1597 /* do_accept() Must return target values and target errnos. */
1598 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1599 abi_ulong target_addrlen_addr
)
1605 if (target_addr
== 0)
1606 return get_errno(accept(fd
, NULL
, NULL
));
1608 /* linux returns EINVAL if addrlen pointer is invalid */
1609 if (get_user_u32(addrlen
, target_addrlen_addr
))
1610 return -TARGET_EINVAL
;
1613 return -TARGET_EINVAL
;
1615 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1616 return -TARGET_EINVAL
;
1618 addr
= alloca(addrlen
);
1620 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1621 if (!is_error(ret
)) {
1622 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1623 if (put_user_u32(addrlen
, target_addrlen_addr
))
1624 ret
= -TARGET_EFAULT
;
1629 /* do_getpeername() Must return target values and target errnos. */
1630 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1631 abi_ulong target_addrlen_addr
)
1637 if (get_user_u32(addrlen
, target_addrlen_addr
))
1638 return -TARGET_EFAULT
;
1641 return -TARGET_EINVAL
;
1643 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1644 return -TARGET_EFAULT
;
1646 addr
= alloca(addrlen
);
1648 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1649 if (!is_error(ret
)) {
1650 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1651 if (put_user_u32(addrlen
, target_addrlen_addr
))
1652 ret
= -TARGET_EFAULT
;
1657 /* do_getsockname() Must return target values and target errnos. */
1658 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1659 abi_ulong target_addrlen_addr
)
1665 if (get_user_u32(addrlen
, target_addrlen_addr
))
1666 return -TARGET_EFAULT
;
1669 return -TARGET_EINVAL
;
1671 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1672 return -TARGET_EFAULT
;
1674 addr
= alloca(addrlen
);
1676 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1677 if (!is_error(ret
)) {
1678 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1679 if (put_user_u32(addrlen
, target_addrlen_addr
))
1680 ret
= -TARGET_EFAULT
;
1685 /* do_socketpair() Must return target values and target errnos. */
1686 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1687 abi_ulong target_tab_addr
)
1692 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1693 if (!is_error(ret
)) {
1694 if (put_user_s32(tab
[0], target_tab_addr
)
1695 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1696 ret
= -TARGET_EFAULT
;
1701 /* do_sendto() Must return target values and target errnos. */
1702 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1703 abi_ulong target_addr
, socklen_t addrlen
)
1710 return -TARGET_EINVAL
;
1712 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1714 return -TARGET_EFAULT
;
1716 addr
= alloca(addrlen
);
1717 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1719 unlock_user(host_msg
, msg
, 0);
1722 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1724 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1726 unlock_user(host_msg
, msg
, 0);
1730 /* do_recvfrom() Must return target values and target errnos. */
1731 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1732 abi_ulong target_addr
,
1733 abi_ulong target_addrlen
)
1740 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1742 return -TARGET_EFAULT
;
1744 if (get_user_u32(addrlen
, target_addrlen
)) {
1745 ret
= -TARGET_EFAULT
;
1749 ret
= -TARGET_EINVAL
;
1752 addr
= alloca(addrlen
);
1753 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1755 addr
= NULL
; /* To keep compiler quiet. */
1756 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1758 if (!is_error(ret
)) {
1760 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1761 if (put_user_u32(addrlen
, target_addrlen
)) {
1762 ret
= -TARGET_EFAULT
;
1766 unlock_user(host_msg
, msg
, len
);
1769 unlock_user(host_msg
, msg
, 0);
1774 #ifdef TARGET_NR_socketcall
1775 /* do_socketcall() Must return target values and target errnos. */
1776 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1779 const int n
= sizeof(abi_ulong
);
1784 abi_ulong domain
, type
, protocol
;
1786 if (get_user_ual(domain
, vptr
)
1787 || get_user_ual(type
, vptr
+ n
)
1788 || get_user_ual(protocol
, vptr
+ 2 * n
))
1789 return -TARGET_EFAULT
;
1791 ret
= do_socket(domain
, type
, protocol
);
1797 abi_ulong target_addr
;
1800 if (get_user_ual(sockfd
, vptr
)
1801 || get_user_ual(target_addr
, vptr
+ n
)
1802 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1803 return -TARGET_EFAULT
;
1805 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1808 case SOCKOP_connect
:
1811 abi_ulong target_addr
;
1814 if (get_user_ual(sockfd
, vptr
)
1815 || get_user_ual(target_addr
, vptr
+ n
)
1816 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1817 return -TARGET_EFAULT
;
1819 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1824 abi_ulong sockfd
, backlog
;
1826 if (get_user_ual(sockfd
, vptr
)
1827 || get_user_ual(backlog
, vptr
+ n
))
1828 return -TARGET_EFAULT
;
1830 ret
= get_errno(listen(sockfd
, backlog
));
1836 abi_ulong target_addr
, target_addrlen
;
1838 if (get_user_ual(sockfd
, vptr
)
1839 || get_user_ual(target_addr
, vptr
+ n
)
1840 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1841 return -TARGET_EFAULT
;
1843 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1846 case SOCKOP_getsockname
:
1849 abi_ulong target_addr
, target_addrlen
;
1851 if (get_user_ual(sockfd
, vptr
)
1852 || get_user_ual(target_addr
, vptr
+ n
)
1853 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1854 return -TARGET_EFAULT
;
1856 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1859 case SOCKOP_getpeername
:
1862 abi_ulong target_addr
, target_addrlen
;
1864 if (get_user_ual(sockfd
, vptr
)
1865 || get_user_ual(target_addr
, vptr
+ n
)
1866 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1867 return -TARGET_EFAULT
;
1869 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1872 case SOCKOP_socketpair
:
1874 abi_ulong domain
, type
, protocol
;
1877 if (get_user_ual(domain
, vptr
)
1878 || get_user_ual(type
, vptr
+ n
)
1879 || get_user_ual(protocol
, vptr
+ 2 * n
)
1880 || get_user_ual(tab
, vptr
+ 3 * n
))
1881 return -TARGET_EFAULT
;
1883 ret
= do_socketpair(domain
, type
, protocol
, tab
);
1893 if (get_user_ual(sockfd
, vptr
)
1894 || get_user_ual(msg
, vptr
+ n
)
1895 || get_user_ual(len
, vptr
+ 2 * n
)
1896 || get_user_ual(flags
, vptr
+ 3 * n
))
1897 return -TARGET_EFAULT
;
1899 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
1909 if (get_user_ual(sockfd
, vptr
)
1910 || get_user_ual(msg
, vptr
+ n
)
1911 || get_user_ual(len
, vptr
+ 2 * n
)
1912 || get_user_ual(flags
, vptr
+ 3 * n
))
1913 return -TARGET_EFAULT
;
1915 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
1927 if (get_user_ual(sockfd
, vptr
)
1928 || get_user_ual(msg
, vptr
+ n
)
1929 || get_user_ual(len
, vptr
+ 2 * n
)
1930 || get_user_ual(flags
, vptr
+ 3 * n
)
1931 || get_user_ual(addr
, vptr
+ 4 * n
)
1932 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1933 return -TARGET_EFAULT
;
1935 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1938 case SOCKOP_recvfrom
:
1947 if (get_user_ual(sockfd
, vptr
)
1948 || get_user_ual(msg
, vptr
+ n
)
1949 || get_user_ual(len
, vptr
+ 2 * n
)
1950 || get_user_ual(flags
, vptr
+ 3 * n
)
1951 || get_user_ual(addr
, vptr
+ 4 * n
)
1952 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1953 return -TARGET_EFAULT
;
1955 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1958 case SOCKOP_shutdown
:
1960 abi_ulong sockfd
, how
;
1962 if (get_user_ual(sockfd
, vptr
)
1963 || get_user_ual(how
, vptr
+ n
))
1964 return -TARGET_EFAULT
;
1966 ret
= get_errno(shutdown(sockfd
, how
));
1969 case SOCKOP_sendmsg
:
1970 case SOCKOP_recvmsg
:
1973 abi_ulong target_msg
;
1976 if (get_user_ual(fd
, vptr
)
1977 || get_user_ual(target_msg
, vptr
+ n
)
1978 || get_user_ual(flags
, vptr
+ 2 * n
))
1979 return -TARGET_EFAULT
;
1981 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
1982 (num
== SOCKOP_sendmsg
));
1985 case SOCKOP_setsockopt
:
1993 if (get_user_ual(sockfd
, vptr
)
1994 || get_user_ual(level
, vptr
+ n
)
1995 || get_user_ual(optname
, vptr
+ 2 * n
)
1996 || get_user_ual(optval
, vptr
+ 3 * n
)
1997 || get_user_ual(optlen
, vptr
+ 4 * n
))
1998 return -TARGET_EFAULT
;
2000 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2003 case SOCKOP_getsockopt
:
2011 if (get_user_ual(sockfd
, vptr
)
2012 || get_user_ual(level
, vptr
+ n
)
2013 || get_user_ual(optname
, vptr
+ 2 * n
)
2014 || get_user_ual(optval
, vptr
+ 3 * n
)
2015 || get_user_ual(optlen
, vptr
+ 4 * n
))
2016 return -TARGET_EFAULT
;
2018 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2022 gemu_log("Unsupported socketcall: %d\n", num
);
2023 ret
= -TARGET_ENOSYS
;
2030 #define N_SHM_REGIONS 32
2032 static struct shm_region
{
2035 } shm_regions
[N_SHM_REGIONS
];
2037 struct target_ipc_perm
2044 unsigned short int mode
;
2045 unsigned short int __pad1
;
2046 unsigned short int __seq
;
2047 unsigned short int __pad2
;
2048 abi_ulong __unused1
;
2049 abi_ulong __unused2
;
2052 struct target_semid_ds
2054 struct target_ipc_perm sem_perm
;
2055 abi_ulong sem_otime
;
2056 abi_ulong __unused1
;
2057 abi_ulong sem_ctime
;
2058 abi_ulong __unused2
;
2059 abi_ulong sem_nsems
;
2060 abi_ulong __unused3
;
2061 abi_ulong __unused4
;
2064 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2065 abi_ulong target_addr
)
2067 struct target_ipc_perm
*target_ip
;
2068 struct target_semid_ds
*target_sd
;
2070 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2071 return -TARGET_EFAULT
;
2072 target_ip
= &(target_sd
->sem_perm
);
2073 host_ip
->__key
= tswapl(target_ip
->__key
);
2074 host_ip
->uid
= tswapl(target_ip
->uid
);
2075 host_ip
->gid
= tswapl(target_ip
->gid
);
2076 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2077 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2078 host_ip
->mode
= tswapl(target_ip
->mode
);
2079 unlock_user_struct(target_sd
, target_addr
, 0);
2083 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2084 struct ipc_perm
*host_ip
)
2086 struct target_ipc_perm
*target_ip
;
2087 struct target_semid_ds
*target_sd
;
2089 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2090 return -TARGET_EFAULT
;
2091 target_ip
= &(target_sd
->sem_perm
);
2092 target_ip
->__key
= tswapl(host_ip
->__key
);
2093 target_ip
->uid
= tswapl(host_ip
->uid
);
2094 target_ip
->gid
= tswapl(host_ip
->gid
);
2095 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2096 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2097 target_ip
->mode
= tswapl(host_ip
->mode
);
2098 unlock_user_struct(target_sd
, target_addr
, 1);
2102 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2103 abi_ulong target_addr
)
2105 struct target_semid_ds
*target_sd
;
2107 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2108 return -TARGET_EFAULT
;
2109 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2110 return -TARGET_EFAULT
;
2111 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2112 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2113 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2114 unlock_user_struct(target_sd
, target_addr
, 0);
2118 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2119 struct semid_ds
*host_sd
)
2121 struct target_semid_ds
*target_sd
;
2123 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2124 return -TARGET_EFAULT
;
2125 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2126 return -TARGET_EFAULT
;;
2127 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2128 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2129 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2130 unlock_user_struct(target_sd
, target_addr
, 1);
2134 struct target_seminfo
{
2147 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2148 struct seminfo
*host_seminfo
)
2150 struct target_seminfo
*target_seminfo
;
2151 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2152 return -TARGET_EFAULT
;
2153 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2154 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2155 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2156 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2157 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2158 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2159 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2160 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2161 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2162 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2163 unlock_user_struct(target_seminfo
, target_addr
, 1);
2169 struct semid_ds
*buf
;
2170 unsigned short *array
;
2171 struct seminfo
*__buf
;
2174 union target_semun
{
2181 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2182 abi_ulong target_addr
)
2185 unsigned short *array
;
2187 struct semid_ds semid_ds
;
2190 semun
.buf
= &semid_ds
;
2192 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2194 return get_errno(ret
);
2196 nsems
= semid_ds
.sem_nsems
;
2198 *host_array
= malloc(nsems
*sizeof(unsigned short));
2199 array
= lock_user(VERIFY_READ
, target_addr
,
2200 nsems
*sizeof(unsigned short), 1);
2202 return -TARGET_EFAULT
;
2204 for(i
=0; i
<nsems
; i
++) {
2205 __get_user((*host_array
)[i
], &array
[i
]);
2207 unlock_user(array
, target_addr
, 0);
2212 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2213 unsigned short **host_array
)
2216 unsigned short *array
;
2218 struct semid_ds semid_ds
;
2221 semun
.buf
= &semid_ds
;
2223 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2225 return get_errno(ret
);
2227 nsems
= semid_ds
.sem_nsems
;
2229 array
= lock_user(VERIFY_WRITE
, target_addr
,
2230 nsems
*sizeof(unsigned short), 0);
2232 return -TARGET_EFAULT
;
2234 for(i
=0; i
<nsems
; i
++) {
2235 __put_user((*host_array
)[i
], &array
[i
]);
2238 unlock_user(array
, target_addr
, 1);
2243 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2244 union target_semun target_su
)
2247 struct semid_ds dsarg
;
2248 unsigned short *array
= NULL
;
2249 struct seminfo seminfo
;
2250 abi_long ret
= -TARGET_EINVAL
;
2257 arg
.val
= tswapl(target_su
.val
);
2258 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2259 target_su
.val
= tswapl(arg
.val
);
2263 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2267 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2268 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2275 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2279 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2280 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2286 arg
.__buf
= &seminfo
;
2287 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2288 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2296 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2303 struct target_sembuf
{
2304 unsigned short sem_num
;
2309 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2310 abi_ulong target_addr
,
2313 struct target_sembuf
*target_sembuf
;
2316 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2317 nsops
*sizeof(struct target_sembuf
), 1);
2319 return -TARGET_EFAULT
;
2321 for(i
=0; i
<nsops
; i
++) {
2322 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2323 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2324 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2327 unlock_user(target_sembuf
, target_addr
, 0);
2332 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2334 struct sembuf sops
[nsops
];
2336 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2337 return -TARGET_EFAULT
;
2339 return semop(semid
, sops
, nsops
);
2342 struct target_msqid_ds
2344 struct target_ipc_perm msg_perm
;
2345 abi_ulong msg_stime
;
2346 #if TARGET_ABI_BITS == 32
2347 abi_ulong __unused1
;
2349 abi_ulong msg_rtime
;
2350 #if TARGET_ABI_BITS == 32
2351 abi_ulong __unused2
;
2353 abi_ulong msg_ctime
;
2354 #if TARGET_ABI_BITS == 32
2355 abi_ulong __unused3
;
2357 abi_ulong __msg_cbytes
;
2359 abi_ulong msg_qbytes
;
2360 abi_ulong msg_lspid
;
2361 abi_ulong msg_lrpid
;
2362 abi_ulong __unused4
;
2363 abi_ulong __unused5
;
2366 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2367 abi_ulong target_addr
)
2369 struct target_msqid_ds
*target_md
;
2371 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2372 return -TARGET_EFAULT
;
2373 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2374 return -TARGET_EFAULT
;
2375 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2376 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2377 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2378 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2379 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2380 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2381 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2382 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2383 unlock_user_struct(target_md
, target_addr
, 0);
2387 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2388 struct msqid_ds
*host_md
)
2390 struct target_msqid_ds
*target_md
;
2392 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2393 return -TARGET_EFAULT
;
2394 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2395 return -TARGET_EFAULT
;
2396 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2397 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2398 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2399 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2400 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2401 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2402 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2403 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2404 unlock_user_struct(target_md
, target_addr
, 1);
2408 struct target_msginfo
{
2416 unsigned short int msgseg
;
2419 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2420 struct msginfo
*host_msginfo
)
2422 struct target_msginfo
*target_msginfo
;
2423 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2424 return -TARGET_EFAULT
;
2425 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2426 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2427 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2428 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2429 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2430 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2431 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2432 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2433 unlock_user_struct(target_msginfo
, target_addr
, 1);
2437 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2439 struct msqid_ds dsarg
;
2440 struct msginfo msginfo
;
2441 abi_long ret
= -TARGET_EINVAL
;
2449 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2450 return -TARGET_EFAULT
;
2451 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2452 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2453 return -TARGET_EFAULT
;
2456 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2460 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2461 if (host_to_target_msginfo(ptr
, &msginfo
))
2462 return -TARGET_EFAULT
;
2469 struct target_msgbuf
{
2474 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2475 unsigned int msgsz
, int msgflg
)
2477 struct target_msgbuf
*target_mb
;
2478 struct msgbuf
*host_mb
;
2481 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2482 return -TARGET_EFAULT
;
2483 host_mb
= malloc(msgsz
+sizeof(long));
2484 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2485 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2486 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2488 unlock_user_struct(target_mb
, msgp
, 0);
2493 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2494 unsigned int msgsz
, abi_long msgtyp
,
2497 struct target_msgbuf
*target_mb
;
2499 struct msgbuf
*host_mb
;
2502 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2503 return -TARGET_EFAULT
;
2505 host_mb
= malloc(msgsz
+sizeof(long));
2506 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2509 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2510 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2511 if (!target_mtext
) {
2512 ret
= -TARGET_EFAULT
;
2515 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2516 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2519 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2524 unlock_user_struct(target_mb
, msgp
, 1);
2528 struct target_shmid_ds
2530 struct target_ipc_perm shm_perm
;
2531 abi_ulong shm_segsz
;
2532 abi_ulong shm_atime
;
2533 #if TARGET_ABI_BITS == 32
2534 abi_ulong __unused1
;
2536 abi_ulong shm_dtime
;
2537 #if TARGET_ABI_BITS == 32
2538 abi_ulong __unused2
;
2540 abi_ulong shm_ctime
;
2541 #if TARGET_ABI_BITS == 32
2542 abi_ulong __unused3
;
2546 abi_ulong shm_nattch
;
2547 unsigned long int __unused4
;
2548 unsigned long int __unused5
;
2551 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2552 abi_ulong target_addr
)
2554 struct target_shmid_ds
*target_sd
;
2556 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2557 return -TARGET_EFAULT
;
2558 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2559 return -TARGET_EFAULT
;
2560 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2561 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2562 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2563 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2564 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2565 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2566 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2567 unlock_user_struct(target_sd
, target_addr
, 0);
2571 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2572 struct shmid_ds
*host_sd
)
2574 struct target_shmid_ds
*target_sd
;
2576 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2577 return -TARGET_EFAULT
;
2578 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2579 return -TARGET_EFAULT
;
2580 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2581 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2582 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2583 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2584 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2585 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2586 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2587 unlock_user_struct(target_sd
, target_addr
, 1);
2591 struct target_shminfo
{
2599 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2600 struct shminfo
*host_shminfo
)
2602 struct target_shminfo
*target_shminfo
;
2603 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2604 return -TARGET_EFAULT
;
2605 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2606 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2607 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2608 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2609 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2610 unlock_user_struct(target_shminfo
, target_addr
, 1);
2614 struct target_shm_info
{
2619 abi_ulong swap_attempts
;
2620 abi_ulong swap_successes
;
2623 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2624 struct shm_info
*host_shm_info
)
2626 struct target_shm_info
*target_shm_info
;
2627 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2628 return -TARGET_EFAULT
;
2629 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2630 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2631 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2632 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2633 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2634 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2635 unlock_user_struct(target_shm_info
, target_addr
, 1);
2639 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2641 struct shmid_ds dsarg
;
2642 struct shminfo shminfo
;
2643 struct shm_info shm_info
;
2644 abi_long ret
= -TARGET_EINVAL
;
2652 if (target_to_host_shmid_ds(&dsarg
, buf
))
2653 return -TARGET_EFAULT
;
2654 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2655 if (host_to_target_shmid_ds(buf
, &dsarg
))
2656 return -TARGET_EFAULT
;
2659 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2660 if (host_to_target_shminfo(buf
, &shminfo
))
2661 return -TARGET_EFAULT
;
2664 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2665 if (host_to_target_shm_info(buf
, &shm_info
))
2666 return -TARGET_EFAULT
;
2671 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2678 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2682 struct shmid_ds shm_info
;
2685 /* find out the length of the shared memory segment */
2686 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2687 if (is_error(ret
)) {
2688 /* can't get length, bail out */
2695 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2697 abi_ulong mmap_start
;
2699 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2701 if (mmap_start
== -1) {
2703 host_raddr
= (void *)-1;
2705 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2708 if (host_raddr
== (void *)-1) {
2710 return get_errno((long)host_raddr
);
2712 raddr
=h2g((unsigned long)host_raddr
);
2714 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2715 PAGE_VALID
| PAGE_READ
|
2716 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2718 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2719 if (shm_regions
[i
].start
== 0) {
2720 shm_regions
[i
].start
= raddr
;
2721 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2731 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2735 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2736 if (shm_regions
[i
].start
== shmaddr
) {
2737 shm_regions
[i
].start
= 0;
2738 page_set_flags(shmaddr
, shm_regions
[i
].size
, 0);
2743 return get_errno(shmdt(g2h(shmaddr
)));
2746 #ifdef TARGET_NR_ipc
2747 /* ??? This only works with linear mappings. */
2748 /* do_ipc() must return target values and target errnos. */
2749 static abi_long
do_ipc(unsigned int call
, int first
,
2750 int second
, int third
,
2751 abi_long ptr
, abi_long fifth
)
2756 version
= call
>> 16;
2761 ret
= do_semop(first
, ptr
, second
);
2765 ret
= get_errno(semget(first
, second
, third
));
2769 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2773 ret
= get_errno(msgget(first
, second
));
2777 ret
= do_msgsnd(first
, ptr
, second
, third
);
2781 ret
= do_msgctl(first
, second
, ptr
);
2788 struct target_ipc_kludge
{
2793 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2794 ret
= -TARGET_EFAULT
;
2798 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2800 unlock_user_struct(tmp
, ptr
, 0);
2804 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2813 raddr
= do_shmat(first
, ptr
, second
);
2814 if (is_error(raddr
))
2815 return get_errno(raddr
);
2816 if (put_user_ual(raddr
, third
))
2817 return -TARGET_EFAULT
;
2821 ret
= -TARGET_EINVAL
;
2826 ret
= do_shmdt(ptr
);
2830 /* IPC_* flag values are the same on all linux platforms */
2831 ret
= get_errno(shmget(first
, second
, third
));
2834 /* IPC_* and SHM_* command values are the same on all linux platforms */
2836 ret
= do_shmctl(first
, second
, third
);
2839 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2840 ret
= -TARGET_ENOSYS
;
2847 /* kernel structure types definitions */
2850 #define STRUCT(name, ...) STRUCT_ ## name,
2851 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2853 #include "syscall_types.h"
2856 #undef STRUCT_SPECIAL
2858 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2859 #define STRUCT_SPECIAL(name)
2860 #include "syscall_types.h"
2862 #undef STRUCT_SPECIAL
2864 typedef struct IOCTLEntry
{
2865 unsigned int target_cmd
;
2866 unsigned int host_cmd
;
2869 const argtype arg_type
[5];
2872 #define IOC_R 0x0001
2873 #define IOC_W 0x0002
2874 #define IOC_RW (IOC_R | IOC_W)
2876 #define MAX_STRUCT_SIZE 4096
2878 static IOCTLEntry ioctl_entries
[] = {
2879 #define IOCTL(cmd, access, ...) \
2880 { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
2885 /* ??? Implement proper locking for ioctls. */
2886 /* do_ioctl() Must return target values and target errnos. */
2887 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
2889 const IOCTLEntry
*ie
;
2890 const argtype
*arg_type
;
2892 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
2898 if (ie
->target_cmd
== 0) {
2899 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
2900 return -TARGET_ENOSYS
;
2902 if (ie
->target_cmd
== cmd
)
2906 arg_type
= ie
->arg_type
;
2908 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
2910 switch(arg_type
[0]) {
2913 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
2918 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
2922 target_size
= thunk_type_size(arg_type
, 0);
2923 switch(ie
->access
) {
2925 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2926 if (!is_error(ret
)) {
2927 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2929 return -TARGET_EFAULT
;
2930 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2931 unlock_user(argptr
, arg
, target_size
);
2935 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2937 return -TARGET_EFAULT
;
2938 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2939 unlock_user(argptr
, arg
, 0);
2940 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2944 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2946 return -TARGET_EFAULT
;
2947 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2948 unlock_user(argptr
, arg
, 0);
2949 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2950 if (!is_error(ret
)) {
2951 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2953 return -TARGET_EFAULT
;
2954 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2955 unlock_user(argptr
, arg
, target_size
);
2961 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2962 (long)cmd
, arg_type
[0]);
2963 ret
= -TARGET_ENOSYS
;
2969 static const bitmask_transtbl iflag_tbl
[] = {
2970 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
2971 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
2972 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
2973 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
2974 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
2975 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
2976 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
2977 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
2978 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
2979 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
2980 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
2981 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
2982 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
2983 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
2987 static const bitmask_transtbl oflag_tbl
[] = {
2988 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
2989 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
2990 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
2991 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
2992 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
2993 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
2994 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
2995 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
2996 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
2997 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
2998 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
2999 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3000 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3001 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3002 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3003 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3004 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3005 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3006 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3007 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3008 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3009 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3010 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3011 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3015 static const bitmask_transtbl cflag_tbl
[] = {
3016 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3017 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3018 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3019 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3020 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3021 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3022 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3023 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3024 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3025 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3026 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3027 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3028 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3029 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3030 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3031 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3032 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3033 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3034 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3035 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3036 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3037 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3038 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3039 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3040 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3041 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3042 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3043 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3044 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3045 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3046 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3050 static const bitmask_transtbl lflag_tbl
[] = {
3051 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3052 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3053 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3054 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3055 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3056 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3057 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3058 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3059 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3060 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3061 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3062 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3063 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3064 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3065 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3069 static void target_to_host_termios (void *dst
, const void *src
)
3071 struct host_termios
*host
= dst
;
3072 const struct target_termios
*target
= src
;
3075 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3077 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3079 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3081 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3082 host
->c_line
= target
->c_line
;
3084 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3085 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3086 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3087 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3088 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3089 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3090 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3091 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3092 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3093 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3094 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3095 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3096 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3097 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3098 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3099 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3100 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3101 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3104 static void host_to_target_termios (void *dst
, const void *src
)
3106 struct target_termios
*target
= dst
;
3107 const struct host_termios
*host
= src
;
3110 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3112 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3114 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3116 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3117 target
->c_line
= host
->c_line
;
3119 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3120 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3121 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3122 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3123 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3124 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3125 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3126 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3127 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3128 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3129 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3130 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3131 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3132 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3133 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3134 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3135 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3136 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3139 static const StructEntry struct_termios_def
= {
3140 .convert
= { host_to_target_termios
, target_to_host_termios
},
3141 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3142 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3145 static bitmask_transtbl mmap_flags_tbl
[] = {
3146 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3147 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3148 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3149 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3150 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3151 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3152 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3153 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3157 #if defined(TARGET_I386)
3159 /* NOTE: there is really one LDT for all the threads */
3160 static uint8_t *ldt_table
;
3162 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3169 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3170 if (size
> bytecount
)
3172 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3174 return -TARGET_EFAULT
;
3175 /* ??? Should this by byteswapped? */
3176 memcpy(p
, ldt_table
, size
);
3177 unlock_user(p
, ptr
, size
);
3181 /* XXX: add locking support */
3182 static abi_long
write_ldt(CPUX86State
*env
,
3183 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3185 struct target_modify_ldt_ldt_s ldt_info
;
3186 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3187 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3188 int seg_not_present
, useable
, lm
;
3189 uint32_t *lp
, entry_1
, entry_2
;
3191 if (bytecount
!= sizeof(ldt_info
))
3192 return -TARGET_EINVAL
;
3193 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3194 return -TARGET_EFAULT
;
3195 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3196 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3197 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3198 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3199 unlock_user_struct(target_ldt_info
, ptr
, 0);
3201 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3202 return -TARGET_EINVAL
;
3203 seg_32bit
= ldt_info
.flags
& 1;
3204 contents
= (ldt_info
.flags
>> 1) & 3;
3205 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3206 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3207 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3208 useable
= (ldt_info
.flags
>> 6) & 1;
3212 lm
= (ldt_info
.flags
>> 7) & 1;
3214 if (contents
== 3) {
3216 return -TARGET_EINVAL
;
3217 if (seg_not_present
== 0)
3218 return -TARGET_EINVAL
;
3220 /* allocate the LDT */
3222 env
->ldt
.base
= target_mmap(0,
3223 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3224 PROT_READ
|PROT_WRITE
,
3225 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3226 if (env
->ldt
.base
== -1)
3227 return -TARGET_ENOMEM
;
3228 memset(g2h(env
->ldt
.base
), 0,
3229 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3230 env
->ldt
.limit
= 0xffff;
3231 ldt_table
= g2h(env
->ldt
.base
);
3234 /* NOTE: same code as Linux kernel */
3235 /* Allow LDTs to be cleared by the user. */
3236 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3239 read_exec_only
== 1 &&
3241 limit_in_pages
== 0 &&
3242 seg_not_present
== 1 &&
3250 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3251 (ldt_info
.limit
& 0x0ffff);
3252 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3253 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3254 (ldt_info
.limit
& 0xf0000) |
3255 ((read_exec_only
^ 1) << 9) |
3257 ((seg_not_present
^ 1) << 15) |
3259 (limit_in_pages
<< 23) |
3263 entry_2
|= (useable
<< 20);
3265 /* Install the new entry ... */
3267 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3268 lp
[0] = tswap32(entry_1
);
3269 lp
[1] = tswap32(entry_2
);
3273 /* specific and weird i386 syscalls */
3274 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3275 unsigned long bytecount
)
3281 ret
= read_ldt(ptr
, bytecount
);
3284 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3287 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3290 ret
= -TARGET_ENOSYS
;
3296 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3297 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3299 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3300 struct target_modify_ldt_ldt_s ldt_info
;
3301 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3302 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3303 int seg_not_present
, useable
, lm
;
3304 uint32_t *lp
, entry_1
, entry_2
;
3307 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3308 if (!target_ldt_info
)
3309 return -TARGET_EFAULT
;
3310 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3311 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3312 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3313 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3314 if (ldt_info
.entry_number
== -1) {
3315 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3316 if (gdt_table
[i
] == 0) {
3317 ldt_info
.entry_number
= i
;
3318 target_ldt_info
->entry_number
= tswap32(i
);
3323 unlock_user_struct(target_ldt_info
, ptr
, 1);
3325 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3326 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3327 return -TARGET_EINVAL
;
3328 seg_32bit
= ldt_info
.flags
& 1;
3329 contents
= (ldt_info
.flags
>> 1) & 3;
3330 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3331 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3332 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3333 useable
= (ldt_info
.flags
>> 6) & 1;
3337 lm
= (ldt_info
.flags
>> 7) & 1;
3340 if (contents
== 3) {
3341 if (seg_not_present
== 0)
3342 return -TARGET_EINVAL
;
3345 /* NOTE: same code as Linux kernel */
3346 /* Allow LDTs to be cleared by the user. */
3347 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3348 if ((contents
== 0 &&
3349 read_exec_only
== 1 &&
3351 limit_in_pages
== 0 &&
3352 seg_not_present
== 1 &&
3360 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3361 (ldt_info
.limit
& 0x0ffff);
3362 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3363 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3364 (ldt_info
.limit
& 0xf0000) |
3365 ((read_exec_only
^ 1) << 9) |
3367 ((seg_not_present
^ 1) << 15) |
3369 (limit_in_pages
<< 23) |
3374 /* Install the new entry ... */
3376 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3377 lp
[0] = tswap32(entry_1
);
3378 lp
[1] = tswap32(entry_2
);
3382 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3384 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3385 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3386 uint32_t base_addr
, limit
, flags
;
3387 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3388 int seg_not_present
, useable
, lm
;
3389 uint32_t *lp
, entry_1
, entry_2
;
3391 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3392 if (!target_ldt_info
)
3393 return -TARGET_EFAULT
;
3394 idx
= tswap32(target_ldt_info
->entry_number
);
3395 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3396 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3397 unlock_user_struct(target_ldt_info
, ptr
, 1);
3398 return -TARGET_EINVAL
;
3400 lp
= (uint32_t *)(gdt_table
+ idx
);
3401 entry_1
= tswap32(lp
[0]);
3402 entry_2
= tswap32(lp
[1]);
3404 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3405 contents
= (entry_2
>> 10) & 3;
3406 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3407 seg_32bit
= (entry_2
>> 22) & 1;
3408 limit_in_pages
= (entry_2
>> 23) & 1;
3409 useable
= (entry_2
>> 20) & 1;
3413 lm
= (entry_2
>> 21) & 1;
3415 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3416 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3417 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3418 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3419 base_addr
= (entry_1
>> 16) |
3420 (entry_2
& 0xff000000) |
3421 ((entry_2
& 0xff) << 16);
3422 target_ldt_info
->base_addr
= tswapl(base_addr
);
3423 target_ldt_info
->limit
= tswap32(limit
);
3424 target_ldt_info
->flags
= tswap32(flags
);
3425 unlock_user_struct(target_ldt_info
, ptr
, 1);
3428 #endif /* TARGET_I386 && TARGET_ABI32 */
3430 #ifndef TARGET_ABI32
3431 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3438 case TARGET_ARCH_SET_GS
:
3439 case TARGET_ARCH_SET_FS
:
3440 if (code
== TARGET_ARCH_SET_GS
)
3444 cpu_x86_load_seg(env
, idx
, 0);
3445 env
->segs
[idx
].base
= addr
;
3447 case TARGET_ARCH_GET_GS
:
3448 case TARGET_ARCH_GET_FS
:
3449 if (code
== TARGET_ARCH_GET_GS
)
3453 val
= env
->segs
[idx
].base
;
3454 if (put_user(val
, addr
, abi_ulong
))
3455 return -TARGET_EFAULT
;
3458 ret
= -TARGET_EINVAL
;
3465 #endif /* defined(TARGET_I386) */
3467 #if defined(CONFIG_USE_NPTL)
3469 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3471 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3474 pthread_mutex_t mutex
;
3475 pthread_cond_t cond
;
3478 abi_ulong child_tidptr
;
3479 abi_ulong parent_tidptr
;
3483 static void *clone_func(void *arg
)
3485 new_thread_info
*info
= arg
;
3491 ts
= (TaskState
*)thread_env
->opaque
;
3492 info
->tid
= gettid();
3493 env
->host_tid
= info
->tid
;
3495 if (info
->child_tidptr
)
3496 put_user_u32(info
->tid
, info
->child_tidptr
);
3497 if (info
->parent_tidptr
)
3498 put_user_u32(info
->tid
, info
->parent_tidptr
);
3499 /* Enable signals. */
3500 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3501 /* Signal to the parent that we're ready. */
3502 pthread_mutex_lock(&info
->mutex
);
3503 pthread_cond_broadcast(&info
->cond
);
3504 pthread_mutex_unlock(&info
->mutex
);
3505 /* Wait until the parent has finshed initializing the tls state. */
3506 pthread_mutex_lock(&clone_lock
);
3507 pthread_mutex_unlock(&clone_lock
);
3513 /* this stack is the equivalent of the kernel stack associated with a
3515 #define NEW_STACK_SIZE 8192
3517 static int clone_func(void *arg
)
3519 CPUState
*env
= arg
;
3526 /* do_fork() Must return host values and target errnos (unlike most
3527 do_*() functions). */
3528 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3529 abi_ulong parent_tidptr
, target_ulong newtls
,
3530 abi_ulong child_tidptr
)
3536 #if defined(CONFIG_USE_NPTL)
3537 unsigned int nptl_flags
;
3541 /* Emulate vfork() with fork() */
3542 if (flags
& CLONE_VFORK
)
3543 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3545 if (flags
& CLONE_VM
) {
3546 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3547 #if defined(CONFIG_USE_NPTL)
3548 new_thread_info info
;
3549 pthread_attr_t attr
;
3551 ts
= qemu_mallocz(sizeof(TaskState
) + NEW_STACK_SIZE
);
3552 init_task_state(ts
);
3553 new_stack
= ts
->stack
;
3554 /* we create a new CPU instance. */
3555 new_env
= cpu_copy(env
);
3556 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3559 /* Init regs that differ from the parent. */
3560 cpu_clone_regs(new_env
, newsp
);
3561 new_env
->opaque
= ts
;
3562 ts
->bprm
= parent_ts
->bprm
;
3563 ts
->info
= parent_ts
->info
;
3564 #if defined(CONFIG_USE_NPTL)
3566 flags
&= ~CLONE_NPTL_FLAGS2
;
3568 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3569 ts
->child_tidptr
= child_tidptr
;
3572 if (nptl_flags
& CLONE_SETTLS
)
3573 cpu_set_tls (new_env
, newtls
);
3575 /* Grab a mutex so that thread setup appears atomic. */
3576 pthread_mutex_lock(&clone_lock
);
3578 memset(&info
, 0, sizeof(info
));
3579 pthread_mutex_init(&info
.mutex
, NULL
);
3580 pthread_mutex_lock(&info
.mutex
);
3581 pthread_cond_init(&info
.cond
, NULL
);
3583 if (nptl_flags
& CLONE_CHILD_SETTID
)
3584 info
.child_tidptr
= child_tidptr
;
3585 if (nptl_flags
& CLONE_PARENT_SETTID
)
3586 info
.parent_tidptr
= parent_tidptr
;
3588 ret
= pthread_attr_init(&attr
);
3589 ret
= pthread_attr_setstack(&attr
, new_stack
, NEW_STACK_SIZE
);
3590 /* It is not safe to deliver signals until the child has finished
3591 initializing, so temporarily block all signals. */
3592 sigfillset(&sigmask
);
3593 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3595 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3596 /* TODO: Free new CPU state if thread creation failed. */
3598 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3599 pthread_attr_destroy(&attr
);
3601 /* Wait for the child to initialize. */
3602 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3604 if (flags
& CLONE_PARENT_SETTID
)
3605 put_user_u32(ret
, parent_tidptr
);
3609 pthread_mutex_unlock(&info
.mutex
);
3610 pthread_cond_destroy(&info
.cond
);
3611 pthread_mutex_destroy(&info
.mutex
);
3612 pthread_mutex_unlock(&clone_lock
);
3614 if (flags
& CLONE_NPTL_FLAGS2
)
3616 /* This is probably going to die very quickly, but do it anyway. */
3618 ret
= __clone2(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3620 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3624 /* if no CLONE_VM, we consider it is a fork */
3625 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3630 /* Child Process. */
3631 cpu_clone_regs(env
, newsp
);
3633 #if defined(CONFIG_USE_NPTL)
3634 /* There is a race condition here. The parent process could
3635 theoretically read the TID in the child process before the child
3636 tid is set. This would require using either ptrace
3637 (not implemented) or having *_tidptr to point at a shared memory
3638 mapping. We can't repeat the spinlock hack used above because
3639 the child process gets its own copy of the lock. */
3640 if (flags
& CLONE_CHILD_SETTID
)
3641 put_user_u32(gettid(), child_tidptr
);
3642 if (flags
& CLONE_PARENT_SETTID
)
3643 put_user_u32(gettid(), parent_tidptr
);
3644 ts
= (TaskState
*)env
->opaque
;
3645 if (flags
& CLONE_SETTLS
)
3646 cpu_set_tls (env
, newtls
);
3647 if (flags
& CLONE_CHILD_CLEARTID
)
3648 ts
->child_tidptr
= child_tidptr
;
3657 /* warning : doesn't handle linux specific flags... */
3658 static int target_to_host_fcntl_cmd(int cmd
)
3661 case TARGET_F_DUPFD
:
3662 case TARGET_F_GETFD
:
3663 case TARGET_F_SETFD
:
3664 case TARGET_F_GETFL
:
3665 case TARGET_F_SETFL
:
3667 case TARGET_F_GETLK
:
3669 case TARGET_F_SETLK
:
3671 case TARGET_F_SETLKW
:
3673 case TARGET_F_GETOWN
:
3675 case TARGET_F_SETOWN
:
3677 case TARGET_F_GETSIG
:
3679 case TARGET_F_SETSIG
:
3681 #if TARGET_ABI_BITS == 32
3682 case TARGET_F_GETLK64
:
3684 case TARGET_F_SETLK64
:
3686 case TARGET_F_SETLKW64
:
3689 case TARGET_F_SETLEASE
:
3691 case TARGET_F_GETLEASE
:
3693 #ifdef F_DUPFD_CLOEXEC
3694 case TARGET_F_DUPFD_CLOEXEC
:
3695 return F_DUPFD_CLOEXEC
;
3697 case TARGET_F_NOTIFY
:
3700 return -TARGET_EINVAL
;
3702 return -TARGET_EINVAL
;
3705 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3708 struct target_flock
*target_fl
;
3709 struct flock64 fl64
;
3710 struct target_flock64
*target_fl64
;
3712 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
3714 if (host_cmd
== -TARGET_EINVAL
)
3718 case TARGET_F_GETLK
:
3719 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3720 return -TARGET_EFAULT
;
3721 fl
.l_type
= tswap16(target_fl
->l_type
);
3722 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3723 fl
.l_start
= tswapl(target_fl
->l_start
);
3724 fl
.l_len
= tswapl(target_fl
->l_len
);
3725 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3726 unlock_user_struct(target_fl
, arg
, 0);
3727 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3729 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3730 return -TARGET_EFAULT
;
3731 target_fl
->l_type
= tswap16(fl
.l_type
);
3732 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3733 target_fl
->l_start
= tswapl(fl
.l_start
);
3734 target_fl
->l_len
= tswapl(fl
.l_len
);
3735 target_fl
->l_pid
= tswap32(fl
.l_pid
);
3736 unlock_user_struct(target_fl
, arg
, 1);
3740 case TARGET_F_SETLK
:
3741 case TARGET_F_SETLKW
:
3742 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3743 return -TARGET_EFAULT
;
3744 fl
.l_type
= tswap16(target_fl
->l_type
);
3745 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3746 fl
.l_start
= tswapl(target_fl
->l_start
);
3747 fl
.l_len
= tswapl(target_fl
->l_len
);
3748 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3749 unlock_user_struct(target_fl
, arg
, 0);
3750 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3753 case TARGET_F_GETLK64
:
3754 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3755 return -TARGET_EFAULT
;
3756 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3757 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3758 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3759 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3760 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3761 unlock_user_struct(target_fl64
, arg
, 0);
3762 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3764 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3765 return -TARGET_EFAULT
;
3766 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3767 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3768 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3769 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3770 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
3771 unlock_user_struct(target_fl64
, arg
, 1);
3774 case TARGET_F_SETLK64
:
3775 case TARGET_F_SETLKW64
:
3776 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3777 return -TARGET_EFAULT
;
3778 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3779 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3780 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3781 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3782 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3783 unlock_user_struct(target_fl64
, arg
, 0);
3784 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3787 case TARGET_F_GETFL
:
3788 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3790 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
3794 case TARGET_F_SETFL
:
3795 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
3798 case TARGET_F_SETOWN
:
3799 case TARGET_F_GETOWN
:
3800 case TARGET_F_SETSIG
:
3801 case TARGET_F_GETSIG
:
3802 case TARGET_F_SETLEASE
:
3803 case TARGET_F_GETLEASE
:
3804 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3808 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3816 static inline int high2lowuid(int uid
)
3824 static inline int high2lowgid(int gid
)
3832 static inline int low2highuid(int uid
)
3834 if ((int16_t)uid
== -1)
3840 static inline int low2highgid(int gid
)
3842 if ((int16_t)gid
== -1)
3848 #endif /* USE_UID16 */
3850 void syscall_init(void)
3853 const argtype
*arg_type
;
3857 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3858 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3859 #include "syscall_types.h"
3861 #undef STRUCT_SPECIAL
3863 /* we patch the ioctl size if necessary. We rely on the fact that
3864 no ioctl has all the bits at '1' in the size field */
3866 while (ie
->target_cmd
!= 0) {
3867 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
3868 TARGET_IOC_SIZEMASK
) {
3869 arg_type
= ie
->arg_type
;
3870 if (arg_type
[0] != TYPE_PTR
) {
3871 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
3876 size
= thunk_type_size(arg_type
, 0);
3877 ie
->target_cmd
= (ie
->target_cmd
&
3878 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
3879 (size
<< TARGET_IOC_SIZESHIFT
);
3882 /* Build target_to_host_errno_table[] table from
3883 * host_to_target_errno_table[]. */
3884 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
3885 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
3887 /* automatic consistency check if same arch */
3888 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3889 (defined(__x86_64__) && defined(TARGET_X86_64))
3890 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
3891 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3892 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
3899 #if TARGET_ABI_BITS == 32
3900 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
3902 #ifdef TARGET_WORDS_BIGENDIAN
3903 return ((uint64_t)word0
<< 32) | word1
;
3905 return ((uint64_t)word1
<< 32) | word0
;
3908 #else /* TARGET_ABI_BITS == 32 */
3909 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
3913 #endif /* TARGET_ABI_BITS != 32 */
3915 #ifdef TARGET_NR_truncate64
3916 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
3922 if (((CPUARMState
*)cpu_env
)->eabi
)
3928 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
3932 #ifdef TARGET_NR_ftruncate64
3933 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
3939 if (((CPUARMState
*)cpu_env
)->eabi
)
3945 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
3949 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
3950 abi_ulong target_addr
)
3952 struct target_timespec
*target_ts
;
3954 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
3955 return -TARGET_EFAULT
;
3956 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
3957 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
3958 unlock_user_struct(target_ts
, target_addr
, 0);
3962 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
3963 struct timespec
*host_ts
)
3965 struct target_timespec
*target_ts
;
3967 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
3968 return -TARGET_EFAULT
;
3969 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
3970 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
3971 unlock_user_struct(target_ts
, target_addr
, 1);
3975 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3976 static inline abi_long
host_to_target_stat64(void *cpu_env
,
3977 abi_ulong target_addr
,
3978 struct stat
*host_st
)
3981 if (((CPUARMState
*)cpu_env
)->eabi
) {
3982 struct target_eabi_stat64
*target_st
;
3984 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3985 return -TARGET_EFAULT
;
3986 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
3987 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3988 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3989 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3990 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3992 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3993 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3994 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3995 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
3996 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
3997 __put_user(host_st
->st_size
, &target_st
->st_size
);
3998 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
3999 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4000 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4001 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4002 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4003 unlock_user_struct(target_st
, target_addr
, 1);
4007 #if TARGET_LONG_BITS == 64
4008 struct target_stat
*target_st
;
4010 struct target_stat64
*target_st
;
4013 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4014 return -TARGET_EFAULT
;
4015 memset(target_st
, 0, sizeof(*target_st
));
4016 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4017 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4018 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4019 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4021 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4022 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4023 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4024 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4025 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4026 /* XXX: better use of kernel struct */
4027 __put_user(host_st
->st_size
, &target_st
->st_size
);
4028 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4029 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4030 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4031 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4032 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4033 unlock_user_struct(target_st
, target_addr
, 1);
4040 #if defined(CONFIG_USE_NPTL)
4041 /* ??? Using host futex calls even when target atomic operations
4042 are not really atomic probably breaks things. However implementing
4043 futexes locally would make futexes shared between multiple processes
4044 tricky. However they're probably useless because guest atomic
4045 operations won't work either. */
4046 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4047 target_ulong uaddr2
, int val3
)
4049 struct timespec ts
, *pts
;
4052 /* ??? We assume FUTEX_* constants are the same on both host
4054 #ifdef FUTEX_CMD_MASK
4055 base_op
= op
& FUTEX_CMD_MASK
;
4063 target_to_host_timespec(pts
, timeout
);
4067 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4070 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4072 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4074 case FUTEX_CMP_REQUEUE
:
4076 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4077 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4078 But the prototype takes a `struct timespec *'; insert casts
4079 to satisfy the compiler. We do not need to tswap TIMEOUT
4080 since it's not compared to guest memory. */
4081 pts
= (struct timespec
*)(uintptr_t) timeout
;
4082 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4084 (base_op
== FUTEX_CMP_REQUEUE
4088 return -TARGET_ENOSYS
;
4093 /* Map host to target signal numbers for the wait family of syscalls.
4094 Assume all other status bits are the same. */
4095 static int host_to_target_waitstatus(int status
)
4097 if (WIFSIGNALED(status
)) {
4098 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4100 if (WIFSTOPPED(status
)) {
4101 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4107 int get_osversion(void)
4109 static int osversion
;
4110 struct new_utsname buf
;
4115 if (qemu_uname_release
&& *qemu_uname_release
) {
4116 s
= qemu_uname_release
;
4118 if (sys_uname(&buf
))
4123 for (i
= 0; i
< 3; i
++) {
4125 while (*s
>= '0' && *s
<= '9') {
4130 tmp
= (tmp
<< 8) + n
;
4138 /* do_syscall() should always have a single exit point at the end so
4139 that actions, such as logging of syscall results, can be performed.
4140 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4141 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4142 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4143 abi_long arg5
, abi_long arg6
)
4151 gemu_log("syscall %d", num
);
4154 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4157 case TARGET_NR_exit
:
4158 #ifdef CONFIG_USE_NPTL
4159 /* In old applications this may be used to implement _exit(2).
4160 However in threaded applictions it is used for thread termination,
4161 and _exit_group is used for application termination.
4162 Do thread termination if we have more then one thread. */
4163 /* FIXME: This probably breaks if a signal arrives. We should probably
4164 be disabling signals. */
4165 if (first_cpu
->next_cpu
) {
4173 while (p
&& p
!= (CPUState
*)cpu_env
) {
4174 lastp
= &p
->next_cpu
;
4177 /* If we didn't find the CPU for this thread then something is
4181 /* Remove the CPU from the list. */
4182 *lastp
= p
->next_cpu
;
4184 ts
= ((CPUState
*)cpu_env
)->opaque
;
4185 if (ts
->child_tidptr
) {
4186 put_user_u32(0, ts
->child_tidptr
);
4187 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4190 /* TODO: Free CPU state. */
4197 gdb_exit(cpu_env
, arg1
);
4199 ret
= 0; /* avoid warning */
4201 case TARGET_NR_read
:
4205 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4207 ret
= get_errno(read(arg1
, p
, arg3
));
4208 unlock_user(p
, arg2
, ret
);
4211 case TARGET_NR_write
:
4212 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4214 ret
= get_errno(write(arg1
, p
, arg3
));
4215 unlock_user(p
, arg2
, 0);
4217 case TARGET_NR_open
:
4218 if (!(p
= lock_user_string(arg1
)))
4220 ret
= get_errno(open(path(p
),
4221 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4223 unlock_user(p
, arg1
, 0);
4225 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4226 case TARGET_NR_openat
:
4227 if (!(p
= lock_user_string(arg2
)))
4229 ret
= get_errno(sys_openat(arg1
,
4231 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4233 unlock_user(p
, arg2
, 0);
4236 case TARGET_NR_close
:
4237 ret
= get_errno(close(arg1
));
4242 case TARGET_NR_fork
:
4243 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4245 #ifdef TARGET_NR_waitpid
4246 case TARGET_NR_waitpid
:
4249 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4250 if (!is_error(ret
) && arg2
4251 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4256 #ifdef TARGET_NR_waitid
4257 case TARGET_NR_waitid
:
4261 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4262 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4263 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4265 host_to_target_siginfo(p
, &info
);
4266 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4271 #ifdef TARGET_NR_creat /* not on alpha */
4272 case TARGET_NR_creat
:
4273 if (!(p
= lock_user_string(arg1
)))
4275 ret
= get_errno(creat(p
, arg2
));
4276 unlock_user(p
, arg1
, 0);
4279 case TARGET_NR_link
:
4282 p
= lock_user_string(arg1
);
4283 p2
= lock_user_string(arg2
);
4285 ret
= -TARGET_EFAULT
;
4287 ret
= get_errno(link(p
, p2
));
4288 unlock_user(p2
, arg2
, 0);
4289 unlock_user(p
, arg1
, 0);
4292 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4293 case TARGET_NR_linkat
:
4298 p
= lock_user_string(arg2
);
4299 p2
= lock_user_string(arg4
);
4301 ret
= -TARGET_EFAULT
;
4303 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4304 unlock_user(p
, arg2
, 0);
4305 unlock_user(p2
, arg4
, 0);
4309 case TARGET_NR_unlink
:
4310 if (!(p
= lock_user_string(arg1
)))
4312 ret
= get_errno(unlink(p
));
4313 unlock_user(p
, arg1
, 0);
4315 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4316 case TARGET_NR_unlinkat
:
4317 if (!(p
= lock_user_string(arg2
)))
4319 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4320 unlock_user(p
, arg2
, 0);
4323 case TARGET_NR_execve
:
4325 char **argp
, **envp
;
4328 abi_ulong guest_argp
;
4329 abi_ulong guest_envp
;
4335 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4336 if (get_user_ual(addr
, gp
))
4344 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4345 if (get_user_ual(addr
, gp
))
4352 argp
= alloca((argc
+ 1) * sizeof(void *));
4353 envp
= alloca((envc
+ 1) * sizeof(void *));
4355 for (gp
= guest_argp
, q
= argp
; gp
;
4356 gp
+= sizeof(abi_ulong
), q
++) {
4357 if (get_user_ual(addr
, gp
))
4361 if (!(*q
= lock_user_string(addr
)))
4366 for (gp
= guest_envp
, q
= envp
; gp
;
4367 gp
+= sizeof(abi_ulong
), q
++) {
4368 if (get_user_ual(addr
, gp
))
4372 if (!(*q
= lock_user_string(addr
)))
4377 if (!(p
= lock_user_string(arg1
)))
4379 ret
= get_errno(execve(p
, argp
, envp
));
4380 unlock_user(p
, arg1
, 0);
4385 ret
= -TARGET_EFAULT
;
4388 for (gp
= guest_argp
, q
= argp
; *q
;
4389 gp
+= sizeof(abi_ulong
), q
++) {
4390 if (get_user_ual(addr
, gp
)
4393 unlock_user(*q
, addr
, 0);
4395 for (gp
= guest_envp
, q
= envp
; *q
;
4396 gp
+= sizeof(abi_ulong
), q
++) {
4397 if (get_user_ual(addr
, gp
)
4400 unlock_user(*q
, addr
, 0);
4404 case TARGET_NR_chdir
:
4405 if (!(p
= lock_user_string(arg1
)))
4407 ret
= get_errno(chdir(p
));
4408 unlock_user(p
, arg1
, 0);
4410 #ifdef TARGET_NR_time
4411 case TARGET_NR_time
:
4414 ret
= get_errno(time(&host_time
));
4417 && put_user_sal(host_time
, arg1
))
4422 case TARGET_NR_mknod
:
4423 if (!(p
= lock_user_string(arg1
)))
4425 ret
= get_errno(mknod(p
, arg2
, arg3
));
4426 unlock_user(p
, arg1
, 0);
4428 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4429 case TARGET_NR_mknodat
:
4430 if (!(p
= lock_user_string(arg2
)))
4432 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4433 unlock_user(p
, arg2
, 0);
4436 case TARGET_NR_chmod
:
4437 if (!(p
= lock_user_string(arg1
)))
4439 ret
= get_errno(chmod(p
, arg2
));
4440 unlock_user(p
, arg1
, 0);
4442 #ifdef TARGET_NR_break
4443 case TARGET_NR_break
:
4446 #ifdef TARGET_NR_oldstat
4447 case TARGET_NR_oldstat
:
4450 case TARGET_NR_lseek
:
4451 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4453 #ifdef TARGET_NR_getxpid
4454 case TARGET_NR_getxpid
:
4456 case TARGET_NR_getpid
:
4458 ret
= get_errno(getpid());
4460 case TARGET_NR_mount
:
4462 /* need to look at the data field */
4464 p
= lock_user_string(arg1
);
4465 p2
= lock_user_string(arg2
);
4466 p3
= lock_user_string(arg3
);
4467 if (!p
|| !p2
|| !p3
)
4468 ret
= -TARGET_EFAULT
;
4470 /* FIXME - arg5 should be locked, but it isn't clear how to
4471 * do that since it's not guaranteed to be a NULL-terminated
4475 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4477 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4479 unlock_user(p
, arg1
, 0);
4480 unlock_user(p2
, arg2
, 0);
4481 unlock_user(p3
, arg3
, 0);
4484 #ifdef TARGET_NR_umount
4485 case TARGET_NR_umount
:
4486 if (!(p
= lock_user_string(arg1
)))
4488 ret
= get_errno(umount(p
));
4489 unlock_user(p
, arg1
, 0);
4492 #ifdef TARGET_NR_stime /* not on alpha */
4493 case TARGET_NR_stime
:
4496 if (get_user_sal(host_time
, arg1
))
4498 ret
= get_errno(stime(&host_time
));
4502 case TARGET_NR_ptrace
:
4504 #ifdef TARGET_NR_alarm /* not on alpha */
4505 case TARGET_NR_alarm
:
4509 #ifdef TARGET_NR_oldfstat
4510 case TARGET_NR_oldfstat
:
4513 #ifdef TARGET_NR_pause /* not on alpha */
4514 case TARGET_NR_pause
:
4515 ret
= get_errno(pause());
4518 #ifdef TARGET_NR_utime
4519 case TARGET_NR_utime
:
4521 struct utimbuf tbuf
, *host_tbuf
;
4522 struct target_utimbuf
*target_tbuf
;
4524 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4526 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4527 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4528 unlock_user_struct(target_tbuf
, arg2
, 0);
4533 if (!(p
= lock_user_string(arg1
)))
4535 ret
= get_errno(utime(p
, host_tbuf
));
4536 unlock_user(p
, arg1
, 0);
4540 case TARGET_NR_utimes
:
4542 struct timeval
*tvp
, tv
[2];
4544 if (copy_from_user_timeval(&tv
[0], arg2
)
4545 || copy_from_user_timeval(&tv
[1],
4546 arg2
+ sizeof(struct target_timeval
)))
4552 if (!(p
= lock_user_string(arg1
)))
4554 ret
= get_errno(utimes(p
, tvp
));
4555 unlock_user(p
, arg1
, 0);
4558 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4559 case TARGET_NR_futimesat
:
4561 struct timeval
*tvp
, tv
[2];
4563 if (copy_from_user_timeval(&tv
[0], arg3
)
4564 || copy_from_user_timeval(&tv
[1],
4565 arg3
+ sizeof(struct target_timeval
)))
4571 if (!(p
= lock_user_string(arg2
)))
4573 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4574 unlock_user(p
, arg2
, 0);
4578 #ifdef TARGET_NR_stty
4579 case TARGET_NR_stty
:
4582 #ifdef TARGET_NR_gtty
4583 case TARGET_NR_gtty
:
4586 case TARGET_NR_access
:
4587 if (!(p
= lock_user_string(arg1
)))
4589 ret
= get_errno(access(path(p
), arg2
));
4590 unlock_user(p
, arg1
, 0);
4592 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4593 case TARGET_NR_faccessat
:
4594 if (!(p
= lock_user_string(arg2
)))
4596 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4597 unlock_user(p
, arg2
, 0);
4600 #ifdef TARGET_NR_nice /* not on alpha */
4601 case TARGET_NR_nice
:
4602 ret
= get_errno(nice(arg1
));
4605 #ifdef TARGET_NR_ftime
4606 case TARGET_NR_ftime
:
4609 case TARGET_NR_sync
:
4613 case TARGET_NR_kill
:
4614 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4616 case TARGET_NR_rename
:
4619 p
= lock_user_string(arg1
);
4620 p2
= lock_user_string(arg2
);
4622 ret
= -TARGET_EFAULT
;
4624 ret
= get_errno(rename(p
, p2
));
4625 unlock_user(p2
, arg2
, 0);
4626 unlock_user(p
, arg1
, 0);
4629 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4630 case TARGET_NR_renameat
:
4633 p
= lock_user_string(arg2
);
4634 p2
= lock_user_string(arg4
);
4636 ret
= -TARGET_EFAULT
;
4638 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4639 unlock_user(p2
, arg4
, 0);
4640 unlock_user(p
, arg2
, 0);
4644 case TARGET_NR_mkdir
:
4645 if (!(p
= lock_user_string(arg1
)))
4647 ret
= get_errno(mkdir(p
, arg2
));
4648 unlock_user(p
, arg1
, 0);
4650 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4651 case TARGET_NR_mkdirat
:
4652 if (!(p
= lock_user_string(arg2
)))
4654 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4655 unlock_user(p
, arg2
, 0);
4658 case TARGET_NR_rmdir
:
4659 if (!(p
= lock_user_string(arg1
)))
4661 ret
= get_errno(rmdir(p
));
4662 unlock_user(p
, arg1
, 0);
4665 ret
= get_errno(dup(arg1
));
4667 case TARGET_NR_pipe
:
4668 ret
= do_pipe(cpu_env
, arg1
, 0);
4670 #ifdef TARGET_NR_pipe2
4671 case TARGET_NR_pipe2
:
4672 ret
= do_pipe(cpu_env
, arg1
, arg2
);
4675 case TARGET_NR_times
:
4677 struct target_tms
*tmsp
;
4679 ret
= get_errno(times(&tms
));
4681 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4684 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4685 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4686 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4687 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4690 ret
= host_to_target_clock_t(ret
);
4693 #ifdef TARGET_NR_prof
4694 case TARGET_NR_prof
:
4697 #ifdef TARGET_NR_signal
4698 case TARGET_NR_signal
:
4701 case TARGET_NR_acct
:
4703 ret
= get_errno(acct(NULL
));
4705 if (!(p
= lock_user_string(arg1
)))
4707 ret
= get_errno(acct(path(p
)));
4708 unlock_user(p
, arg1
, 0);
4711 #ifdef TARGET_NR_umount2 /* not on alpha */
4712 case TARGET_NR_umount2
:
4713 if (!(p
= lock_user_string(arg1
)))
4715 ret
= get_errno(umount2(p
, arg2
));
4716 unlock_user(p
, arg1
, 0);
4719 #ifdef TARGET_NR_lock
4720 case TARGET_NR_lock
:
4723 case TARGET_NR_ioctl
:
4724 ret
= do_ioctl(arg1
, arg2
, arg3
);
4726 case TARGET_NR_fcntl
:
4727 ret
= do_fcntl(arg1
, arg2
, arg3
);
4729 #ifdef TARGET_NR_mpx
4733 case TARGET_NR_setpgid
:
4734 ret
= get_errno(setpgid(arg1
, arg2
));
4736 #ifdef TARGET_NR_ulimit
4737 case TARGET_NR_ulimit
:
4740 #ifdef TARGET_NR_oldolduname
4741 case TARGET_NR_oldolduname
:
4744 case TARGET_NR_umask
:
4745 ret
= get_errno(umask(arg1
));
4747 case TARGET_NR_chroot
:
4748 if (!(p
= lock_user_string(arg1
)))
4750 ret
= get_errno(chroot(p
));
4751 unlock_user(p
, arg1
, 0);
4753 case TARGET_NR_ustat
:
4755 case TARGET_NR_dup2
:
4756 ret
= get_errno(dup2(arg1
, arg2
));
4758 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
4759 case TARGET_NR_dup3
:
4760 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
4763 #ifdef TARGET_NR_getppid /* not on alpha */
4764 case TARGET_NR_getppid
:
4765 ret
= get_errno(getppid());
4768 case TARGET_NR_getpgrp
:
4769 ret
= get_errno(getpgrp());
4771 case TARGET_NR_setsid
:
4772 ret
= get_errno(setsid());
4774 #ifdef TARGET_NR_sigaction
4775 case TARGET_NR_sigaction
:
4777 #if !defined(TARGET_MIPS)
4778 struct target_old_sigaction
*old_act
;
4779 struct target_sigaction act
, oact
, *pact
;
4781 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4783 act
._sa_handler
= old_act
->_sa_handler
;
4784 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4785 act
.sa_flags
= old_act
->sa_flags
;
4786 act
.sa_restorer
= old_act
->sa_restorer
;
4787 unlock_user_struct(old_act
, arg2
, 0);
4792 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4793 if (!is_error(ret
) && arg3
) {
4794 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4796 old_act
->_sa_handler
= oact
._sa_handler
;
4797 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4798 old_act
->sa_flags
= oact
.sa_flags
;
4799 old_act
->sa_restorer
= oact
.sa_restorer
;
4800 unlock_user_struct(old_act
, arg3
, 1);
4803 struct target_sigaction act
, oact
, *pact
, *old_act
;
4806 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4808 act
._sa_handler
= old_act
->_sa_handler
;
4809 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
4810 act
.sa_flags
= old_act
->sa_flags
;
4811 unlock_user_struct(old_act
, arg2
, 0);
4817 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4819 if (!is_error(ret
) && arg3
) {
4820 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4822 old_act
->_sa_handler
= oact
._sa_handler
;
4823 old_act
->sa_flags
= oact
.sa_flags
;
4824 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
4825 old_act
->sa_mask
.sig
[1] = 0;
4826 old_act
->sa_mask
.sig
[2] = 0;
4827 old_act
->sa_mask
.sig
[3] = 0;
4828 unlock_user_struct(old_act
, arg3
, 1);
4834 case TARGET_NR_rt_sigaction
:
4836 struct target_sigaction
*act
;
4837 struct target_sigaction
*oact
;
4840 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
4845 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
4846 ret
= -TARGET_EFAULT
;
4847 goto rt_sigaction_fail
;
4851 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
4854 unlock_user_struct(act
, arg2
, 0);
4856 unlock_user_struct(oact
, arg3
, 1);
4859 #ifdef TARGET_NR_sgetmask /* not on alpha */
4860 case TARGET_NR_sgetmask
:
4863 abi_ulong target_set
;
4864 sigprocmask(0, NULL
, &cur_set
);
4865 host_to_target_old_sigset(&target_set
, &cur_set
);
4870 #ifdef TARGET_NR_ssetmask /* not on alpha */
4871 case TARGET_NR_ssetmask
:
4873 sigset_t set
, oset
, cur_set
;
4874 abi_ulong target_set
= arg1
;
4875 sigprocmask(0, NULL
, &cur_set
);
4876 target_to_host_old_sigset(&set
, &target_set
);
4877 sigorset(&set
, &set
, &cur_set
);
4878 sigprocmask(SIG_SETMASK
, &set
, &oset
);
4879 host_to_target_old_sigset(&target_set
, &oset
);
4884 #ifdef TARGET_NR_sigprocmask
4885 case TARGET_NR_sigprocmask
:
4888 sigset_t set
, oldset
, *set_ptr
;
4892 case TARGET_SIG_BLOCK
:
4895 case TARGET_SIG_UNBLOCK
:
4898 case TARGET_SIG_SETMASK
:
4902 ret
= -TARGET_EINVAL
;
4905 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4907 target_to_host_old_sigset(&set
, p
);
4908 unlock_user(p
, arg2
, 0);
4914 ret
= get_errno(sigprocmask(arg1
, set_ptr
, &oldset
));
4915 if (!is_error(ret
) && arg3
) {
4916 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4918 host_to_target_old_sigset(p
, &oldset
);
4919 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4924 case TARGET_NR_rt_sigprocmask
:
4927 sigset_t set
, oldset
, *set_ptr
;
4931 case TARGET_SIG_BLOCK
:
4934 case TARGET_SIG_UNBLOCK
:
4937 case TARGET_SIG_SETMASK
:
4941 ret
= -TARGET_EINVAL
;
4944 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4946 target_to_host_sigset(&set
, p
);
4947 unlock_user(p
, arg2
, 0);
4953 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
4954 if (!is_error(ret
) && arg3
) {
4955 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4957 host_to_target_sigset(p
, &oldset
);
4958 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4962 #ifdef TARGET_NR_sigpending
4963 case TARGET_NR_sigpending
:
4966 ret
= get_errno(sigpending(&set
));
4967 if (!is_error(ret
)) {
4968 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4970 host_to_target_old_sigset(p
, &set
);
4971 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4976 case TARGET_NR_rt_sigpending
:
4979 ret
= get_errno(sigpending(&set
));
4980 if (!is_error(ret
)) {
4981 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4983 host_to_target_sigset(p
, &set
);
4984 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4988 #ifdef TARGET_NR_sigsuspend
4989 case TARGET_NR_sigsuspend
:
4992 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4994 target_to_host_old_sigset(&set
, p
);
4995 unlock_user(p
, arg1
, 0);
4996 ret
= get_errno(sigsuspend(&set
));
5000 case TARGET_NR_rt_sigsuspend
:
5003 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5005 target_to_host_sigset(&set
, p
);
5006 unlock_user(p
, arg1
, 0);
5007 ret
= get_errno(sigsuspend(&set
));
5010 case TARGET_NR_rt_sigtimedwait
:
5013 struct timespec uts
, *puts
;
5016 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5018 target_to_host_sigset(&set
, p
);
5019 unlock_user(p
, arg1
, 0);
5022 target_to_host_timespec(puts
, arg3
);
5026 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5027 if (!is_error(ret
) && arg2
) {
5028 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5030 host_to_target_siginfo(p
, &uinfo
);
5031 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5035 case TARGET_NR_rt_sigqueueinfo
:
5038 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5040 target_to_host_siginfo(&uinfo
, p
);
5041 unlock_user(p
, arg1
, 0);
5042 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5045 #ifdef TARGET_NR_sigreturn
5046 case TARGET_NR_sigreturn
:
5047 /* NOTE: ret is eax, so not transcoding must be done */
5048 ret
= do_sigreturn(cpu_env
);
5051 case TARGET_NR_rt_sigreturn
:
5052 /* NOTE: ret is eax, so not transcoding must be done */
5053 ret
= do_rt_sigreturn(cpu_env
);
5055 case TARGET_NR_sethostname
:
5056 if (!(p
= lock_user_string(arg1
)))
5058 ret
= get_errno(sethostname(p
, arg2
));
5059 unlock_user(p
, arg1
, 0);
5061 case TARGET_NR_setrlimit
:
5063 /* XXX: convert resource ? */
5064 int resource
= arg1
;
5065 struct target_rlimit
*target_rlim
;
5067 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5069 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
5070 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
5071 unlock_user_struct(target_rlim
, arg2
, 0);
5072 ret
= get_errno(setrlimit(resource
, &rlim
));
5075 case TARGET_NR_getrlimit
:
5077 /* XXX: convert resource ? */
5078 int resource
= arg1
;
5079 struct target_rlimit
*target_rlim
;
5082 ret
= get_errno(getrlimit(resource
, &rlim
));
5083 if (!is_error(ret
)) {
5084 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5086 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
5087 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
5088 unlock_user_struct(target_rlim
, arg2
, 1);
5092 case TARGET_NR_getrusage
:
5094 struct rusage rusage
;
5095 ret
= get_errno(getrusage(arg1
, &rusage
));
5096 if (!is_error(ret
)) {
5097 host_to_target_rusage(arg2
, &rusage
);
5101 case TARGET_NR_gettimeofday
:
5104 ret
= get_errno(gettimeofday(&tv
, NULL
));
5105 if (!is_error(ret
)) {
5106 if (copy_to_user_timeval(arg1
, &tv
))
5111 case TARGET_NR_settimeofday
:
5114 if (copy_from_user_timeval(&tv
, arg1
))
5116 ret
= get_errno(settimeofday(&tv
, NULL
));
5119 #ifdef TARGET_NR_select
5120 case TARGET_NR_select
:
5122 struct target_sel_arg_struct
*sel
;
5123 abi_ulong inp
, outp
, exp
, tvp
;
5126 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5128 nsel
= tswapl(sel
->n
);
5129 inp
= tswapl(sel
->inp
);
5130 outp
= tswapl(sel
->outp
);
5131 exp
= tswapl(sel
->exp
);
5132 tvp
= tswapl(sel
->tvp
);
5133 unlock_user_struct(sel
, arg1
, 0);
5134 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5138 case TARGET_NR_symlink
:
5141 p
= lock_user_string(arg1
);
5142 p2
= lock_user_string(arg2
);
5144 ret
= -TARGET_EFAULT
;
5146 ret
= get_errno(symlink(p
, p2
));
5147 unlock_user(p2
, arg2
, 0);
5148 unlock_user(p
, arg1
, 0);
5151 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5152 case TARGET_NR_symlinkat
:
5155 p
= lock_user_string(arg1
);
5156 p2
= lock_user_string(arg3
);
5158 ret
= -TARGET_EFAULT
;
5160 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5161 unlock_user(p2
, arg3
, 0);
5162 unlock_user(p
, arg1
, 0);
5166 #ifdef TARGET_NR_oldlstat
5167 case TARGET_NR_oldlstat
:
5170 case TARGET_NR_readlink
:
5173 p
= lock_user_string(arg1
);
5174 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5176 ret
= -TARGET_EFAULT
;
5178 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5179 char real
[PATH_MAX
];
5180 temp
= realpath(exec_path
,real
);
5181 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5182 snprintf((char *)p2
, arg3
, "%s", real
);
5185 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5187 unlock_user(p2
, arg2
, ret
);
5188 unlock_user(p
, arg1
, 0);
5191 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5192 case TARGET_NR_readlinkat
:
5195 p
= lock_user_string(arg2
);
5196 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5198 ret
= -TARGET_EFAULT
;
5200 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5201 unlock_user(p2
, arg3
, ret
);
5202 unlock_user(p
, arg2
, 0);
5206 #ifdef TARGET_NR_uselib
5207 case TARGET_NR_uselib
:
5210 #ifdef TARGET_NR_swapon
5211 case TARGET_NR_swapon
:
5212 if (!(p
= lock_user_string(arg1
)))
5214 ret
= get_errno(swapon(p
, arg2
));
5215 unlock_user(p
, arg1
, 0);
5218 case TARGET_NR_reboot
:
5220 #ifdef TARGET_NR_readdir
5221 case TARGET_NR_readdir
:
5224 #ifdef TARGET_NR_mmap
5225 case TARGET_NR_mmap
:
5226 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5229 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5230 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5238 unlock_user(v
, arg1
, 0);
5239 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5240 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5244 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5245 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5251 #ifdef TARGET_NR_mmap2
5252 case TARGET_NR_mmap2
:
5254 #define MMAP_SHIFT 12
5256 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5257 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5259 arg6
<< MMAP_SHIFT
));
5262 case TARGET_NR_munmap
:
5263 ret
= get_errno(target_munmap(arg1
, arg2
));
5265 case TARGET_NR_mprotect
:
5266 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5268 #ifdef TARGET_NR_mremap
5269 case TARGET_NR_mremap
:
5270 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5273 /* ??? msync/mlock/munlock are broken for softmmu. */
5274 #ifdef TARGET_NR_msync
5275 case TARGET_NR_msync
:
5276 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5279 #ifdef TARGET_NR_mlock
5280 case TARGET_NR_mlock
:
5281 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5284 #ifdef TARGET_NR_munlock
5285 case TARGET_NR_munlock
:
5286 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5289 #ifdef TARGET_NR_mlockall
5290 case TARGET_NR_mlockall
:
5291 ret
= get_errno(mlockall(arg1
));
5294 #ifdef TARGET_NR_munlockall
5295 case TARGET_NR_munlockall
:
5296 ret
= get_errno(munlockall());
5299 case TARGET_NR_truncate
:
5300 if (!(p
= lock_user_string(arg1
)))
5302 ret
= get_errno(truncate(p
, arg2
));
5303 unlock_user(p
, arg1
, 0);
5305 case TARGET_NR_ftruncate
:
5306 ret
= get_errno(ftruncate(arg1
, arg2
));
5308 case TARGET_NR_fchmod
:
5309 ret
= get_errno(fchmod(arg1
, arg2
));
5311 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5312 case TARGET_NR_fchmodat
:
5313 if (!(p
= lock_user_string(arg2
)))
5315 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5316 unlock_user(p
, arg2
, 0);
5319 case TARGET_NR_getpriority
:
5320 /* libc does special remapping of the return value of
5321 * sys_getpriority() so it's just easiest to call
5322 * sys_getpriority() directly rather than through libc. */
5323 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5325 case TARGET_NR_setpriority
:
5326 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5328 #ifdef TARGET_NR_profil
5329 case TARGET_NR_profil
:
5332 case TARGET_NR_statfs
:
5333 if (!(p
= lock_user_string(arg1
)))
5335 ret
= get_errno(statfs(path(p
), &stfs
));
5336 unlock_user(p
, arg1
, 0);
5338 if (!is_error(ret
)) {
5339 struct target_statfs
*target_stfs
;
5341 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5343 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5344 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5345 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5346 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5347 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5348 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5349 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5350 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5351 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5352 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5353 unlock_user_struct(target_stfs
, arg2
, 1);
5356 case TARGET_NR_fstatfs
:
5357 ret
= get_errno(fstatfs(arg1
, &stfs
));
5358 goto convert_statfs
;
5359 #ifdef TARGET_NR_statfs64
5360 case TARGET_NR_statfs64
:
5361 if (!(p
= lock_user_string(arg1
)))
5363 ret
= get_errno(statfs(path(p
), &stfs
));
5364 unlock_user(p
, arg1
, 0);
5366 if (!is_error(ret
)) {
5367 struct target_statfs64
*target_stfs
;
5369 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5371 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5372 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5373 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5374 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5375 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5376 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5377 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5378 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5379 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5380 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5381 unlock_user_struct(target_stfs
, arg3
, 1);
5384 case TARGET_NR_fstatfs64
:
5385 ret
= get_errno(fstatfs(arg1
, &stfs
));
5386 goto convert_statfs64
;
5388 #ifdef TARGET_NR_ioperm
5389 case TARGET_NR_ioperm
:
5392 #ifdef TARGET_NR_socketcall
5393 case TARGET_NR_socketcall
:
5394 ret
= do_socketcall(arg1
, arg2
);
5397 #ifdef TARGET_NR_accept
5398 case TARGET_NR_accept
:
5399 ret
= do_accept(arg1
, arg2
, arg3
);
5402 #ifdef TARGET_NR_bind
5403 case TARGET_NR_bind
:
5404 ret
= do_bind(arg1
, arg2
, arg3
);
5407 #ifdef TARGET_NR_connect
5408 case TARGET_NR_connect
:
5409 ret
= do_connect(arg1
, arg2
, arg3
);
5412 #ifdef TARGET_NR_getpeername
5413 case TARGET_NR_getpeername
:
5414 ret
= do_getpeername(arg1
, arg2
, arg3
);
5417 #ifdef TARGET_NR_getsockname
5418 case TARGET_NR_getsockname
:
5419 ret
= do_getsockname(arg1
, arg2
, arg3
);
5422 #ifdef TARGET_NR_getsockopt
5423 case TARGET_NR_getsockopt
:
5424 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5427 #ifdef TARGET_NR_listen
5428 case TARGET_NR_listen
:
5429 ret
= get_errno(listen(arg1
, arg2
));
5432 #ifdef TARGET_NR_recv
5433 case TARGET_NR_recv
:
5434 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5437 #ifdef TARGET_NR_recvfrom
5438 case TARGET_NR_recvfrom
:
5439 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5442 #ifdef TARGET_NR_recvmsg
5443 case TARGET_NR_recvmsg
:
5444 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5447 #ifdef TARGET_NR_send
5448 case TARGET_NR_send
:
5449 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5452 #ifdef TARGET_NR_sendmsg
5453 case TARGET_NR_sendmsg
:
5454 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5457 #ifdef TARGET_NR_sendto
5458 case TARGET_NR_sendto
:
5459 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5462 #ifdef TARGET_NR_shutdown
5463 case TARGET_NR_shutdown
:
5464 ret
= get_errno(shutdown(arg1
, arg2
));
5467 #ifdef TARGET_NR_socket
5468 case TARGET_NR_socket
:
5469 ret
= do_socket(arg1
, arg2
, arg3
);
5472 #ifdef TARGET_NR_socketpair
5473 case TARGET_NR_socketpair
:
5474 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5477 #ifdef TARGET_NR_setsockopt
5478 case TARGET_NR_setsockopt
:
5479 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5483 case TARGET_NR_syslog
:
5484 if (!(p
= lock_user_string(arg2
)))
5486 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5487 unlock_user(p
, arg2
, 0);
5490 case TARGET_NR_setitimer
:
5492 struct itimerval value
, ovalue
, *pvalue
;
5496 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5497 || copy_from_user_timeval(&pvalue
->it_value
,
5498 arg2
+ sizeof(struct target_timeval
)))
5503 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5504 if (!is_error(ret
) && arg3
) {
5505 if (copy_to_user_timeval(arg3
,
5506 &ovalue
.it_interval
)
5507 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5513 case TARGET_NR_getitimer
:
5515 struct itimerval value
;
5517 ret
= get_errno(getitimer(arg1
, &value
));
5518 if (!is_error(ret
) && arg2
) {
5519 if (copy_to_user_timeval(arg2
,
5521 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5527 case TARGET_NR_stat
:
5528 if (!(p
= lock_user_string(arg1
)))
5530 ret
= get_errno(stat(path(p
), &st
));
5531 unlock_user(p
, arg1
, 0);
5533 case TARGET_NR_lstat
:
5534 if (!(p
= lock_user_string(arg1
)))
5536 ret
= get_errno(lstat(path(p
), &st
));
5537 unlock_user(p
, arg1
, 0);
5539 case TARGET_NR_fstat
:
5541 ret
= get_errno(fstat(arg1
, &st
));
5543 if (!is_error(ret
)) {
5544 struct target_stat
*target_st
;
5546 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5548 memset(target_st
, 0, sizeof(*target_st
));
5549 __put_user(st
.st_dev
, &target_st
->st_dev
);
5550 __put_user(st
.st_ino
, &target_st
->st_ino
);
5551 __put_user(st
.st_mode
, &target_st
->st_mode
);
5552 __put_user(st
.st_uid
, &target_st
->st_uid
);
5553 __put_user(st
.st_gid
, &target_st
->st_gid
);
5554 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5555 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5556 __put_user(st
.st_size
, &target_st
->st_size
);
5557 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5558 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5559 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5560 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5561 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5562 unlock_user_struct(target_st
, arg2
, 1);
5566 #ifdef TARGET_NR_olduname
5567 case TARGET_NR_olduname
:
5570 #ifdef TARGET_NR_iopl
5571 case TARGET_NR_iopl
:
5574 case TARGET_NR_vhangup
:
5575 ret
= get_errno(vhangup());
5577 #ifdef TARGET_NR_idle
5578 case TARGET_NR_idle
:
5581 #ifdef TARGET_NR_syscall
5582 case TARGET_NR_syscall
:
5583 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5586 case TARGET_NR_wait4
:
5589 abi_long status_ptr
= arg2
;
5590 struct rusage rusage
, *rusage_ptr
;
5591 abi_ulong target_rusage
= arg4
;
5593 rusage_ptr
= &rusage
;
5596 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5597 if (!is_error(ret
)) {
5599 status
= host_to_target_waitstatus(status
);
5600 if (put_user_s32(status
, status_ptr
))
5604 host_to_target_rusage(target_rusage
, &rusage
);
5608 #ifdef TARGET_NR_swapoff
5609 case TARGET_NR_swapoff
:
5610 if (!(p
= lock_user_string(arg1
)))
5612 ret
= get_errno(swapoff(p
));
5613 unlock_user(p
, arg1
, 0);
5616 case TARGET_NR_sysinfo
:
5618 struct target_sysinfo
*target_value
;
5619 struct sysinfo value
;
5620 ret
= get_errno(sysinfo(&value
));
5621 if (!is_error(ret
) && arg1
)
5623 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5625 __put_user(value
.uptime
, &target_value
->uptime
);
5626 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5627 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5628 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5629 __put_user(value
.totalram
, &target_value
->totalram
);
5630 __put_user(value
.freeram
, &target_value
->freeram
);
5631 __put_user(value
.sharedram
, &target_value
->sharedram
);
5632 __put_user(value
.bufferram
, &target_value
->bufferram
);
5633 __put_user(value
.totalswap
, &target_value
->totalswap
);
5634 __put_user(value
.freeswap
, &target_value
->freeswap
);
5635 __put_user(value
.procs
, &target_value
->procs
);
5636 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5637 __put_user(value
.freehigh
, &target_value
->freehigh
);
5638 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5639 unlock_user_struct(target_value
, arg1
, 1);
5643 #ifdef TARGET_NR_ipc
5645 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5648 #ifdef TARGET_NR_semget
5649 case TARGET_NR_semget
:
5650 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5653 #ifdef TARGET_NR_semop
5654 case TARGET_NR_semop
:
5655 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5658 #ifdef TARGET_NR_semctl
5659 case TARGET_NR_semctl
:
5660 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5663 #ifdef TARGET_NR_msgctl
5664 case TARGET_NR_msgctl
:
5665 ret
= do_msgctl(arg1
, arg2
, arg3
);
5668 #ifdef TARGET_NR_msgget
5669 case TARGET_NR_msgget
:
5670 ret
= get_errno(msgget(arg1
, arg2
));
5673 #ifdef TARGET_NR_msgrcv
5674 case TARGET_NR_msgrcv
:
5675 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
5678 #ifdef TARGET_NR_msgsnd
5679 case TARGET_NR_msgsnd
:
5680 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
5683 #ifdef TARGET_NR_shmget
5684 case TARGET_NR_shmget
:
5685 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
5688 #ifdef TARGET_NR_shmctl
5689 case TARGET_NR_shmctl
:
5690 ret
= do_shmctl(arg1
, arg2
, arg3
);
5693 #ifdef TARGET_NR_shmat
5694 case TARGET_NR_shmat
:
5695 ret
= do_shmat(arg1
, arg2
, arg3
);
5698 #ifdef TARGET_NR_shmdt
5699 case TARGET_NR_shmdt
:
5700 ret
= do_shmdt(arg1
);
5703 case TARGET_NR_fsync
:
5704 ret
= get_errno(fsync(arg1
));
5706 case TARGET_NR_clone
:
5707 #if defined(TARGET_SH4)
5708 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
5709 #elif defined(TARGET_CRIS)
5710 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
5712 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
5715 #ifdef __NR_exit_group
5716 /* new thread calls */
5717 case TARGET_NR_exit_group
:
5721 gdb_exit(cpu_env
, arg1
);
5722 ret
= get_errno(exit_group(arg1
));
5725 case TARGET_NR_setdomainname
:
5726 if (!(p
= lock_user_string(arg1
)))
5728 ret
= get_errno(setdomainname(p
, arg2
));
5729 unlock_user(p
, arg1
, 0);
5731 case TARGET_NR_uname
:
5732 /* no need to transcode because we use the linux syscall */
5734 struct new_utsname
* buf
;
5736 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
5738 ret
= get_errno(sys_uname(buf
));
5739 if (!is_error(ret
)) {
5740 /* Overrite the native machine name with whatever is being
5742 strcpy (buf
->machine
, UNAME_MACHINE
);
5743 /* Allow the user to override the reported release. */
5744 if (qemu_uname_release
&& *qemu_uname_release
)
5745 strcpy (buf
->release
, qemu_uname_release
);
5747 unlock_user_struct(buf
, arg1
, 1);
5751 case TARGET_NR_modify_ldt
:
5752 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
5754 #if !defined(TARGET_X86_64)
5755 case TARGET_NR_vm86old
:
5757 case TARGET_NR_vm86
:
5758 ret
= do_vm86(cpu_env
, arg1
, arg2
);
5762 case TARGET_NR_adjtimex
:
5764 #ifdef TARGET_NR_create_module
5765 case TARGET_NR_create_module
:
5767 case TARGET_NR_init_module
:
5768 case TARGET_NR_delete_module
:
5769 #ifdef TARGET_NR_get_kernel_syms
5770 case TARGET_NR_get_kernel_syms
:
5773 case TARGET_NR_quotactl
:
5775 case TARGET_NR_getpgid
:
5776 ret
= get_errno(getpgid(arg1
));
5778 case TARGET_NR_fchdir
:
5779 ret
= get_errno(fchdir(arg1
));
5781 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5782 case TARGET_NR_bdflush
:
5785 #ifdef TARGET_NR_sysfs
5786 case TARGET_NR_sysfs
:
5789 case TARGET_NR_personality
:
5790 ret
= get_errno(personality(arg1
));
5792 #ifdef TARGET_NR_afs_syscall
5793 case TARGET_NR_afs_syscall
:
5796 #ifdef TARGET_NR__llseek /* Not on alpha */
5797 case TARGET_NR__llseek
:
5799 #if defined (__x86_64__)
5800 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
5801 if (put_user_s64(ret
, arg4
))
5805 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
5806 if (put_user_s64(res
, arg4
))
5812 case TARGET_NR_getdents
:
5813 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5815 struct target_dirent
*target_dirp
;
5816 struct linux_dirent
*dirp
;
5817 abi_long count
= arg3
;
5819 dirp
= malloc(count
);
5821 ret
= -TARGET_ENOMEM
;
5825 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5826 if (!is_error(ret
)) {
5827 struct linux_dirent
*de
;
5828 struct target_dirent
*tde
;
5830 int reclen
, treclen
;
5831 int count1
, tnamelen
;
5835 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5839 reclen
= de
->d_reclen
;
5840 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
5841 tde
->d_reclen
= tswap16(treclen
);
5842 tde
->d_ino
= tswapl(de
->d_ino
);
5843 tde
->d_off
= tswapl(de
->d_off
);
5844 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
5847 /* XXX: may not be correct */
5848 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
5849 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5851 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
5855 unlock_user(target_dirp
, arg2
, ret
);
5861 struct linux_dirent
*dirp
;
5862 abi_long count
= arg3
;
5864 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5866 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5867 if (!is_error(ret
)) {
5868 struct linux_dirent
*de
;
5873 reclen
= de
->d_reclen
;
5876 de
->d_reclen
= tswap16(reclen
);
5877 tswapls(&de
->d_ino
);
5878 tswapls(&de
->d_off
);
5879 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5883 unlock_user(dirp
, arg2
, ret
);
5887 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5888 case TARGET_NR_getdents64
:
5890 struct linux_dirent64
*dirp
;
5891 abi_long count
= arg3
;
5892 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5894 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
5895 if (!is_error(ret
)) {
5896 struct linux_dirent64
*de
;
5901 reclen
= de
->d_reclen
;
5904 de
->d_reclen
= tswap16(reclen
);
5905 tswap64s((uint64_t *)&de
->d_ino
);
5906 tswap64s((uint64_t *)&de
->d_off
);
5907 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
5911 unlock_user(dirp
, arg2
, ret
);
5914 #endif /* TARGET_NR_getdents64 */
5915 #ifdef TARGET_NR__newselect
5916 case TARGET_NR__newselect
:
5917 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
5920 #ifdef TARGET_NR_poll
5921 case TARGET_NR_poll
:
5923 struct target_pollfd
*target_pfd
;
5924 unsigned int nfds
= arg2
;
5929 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
5932 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
5933 for(i
= 0; i
< nfds
; i
++) {
5934 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
5935 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
5937 ret
= get_errno(poll(pfd
, nfds
, timeout
));
5938 if (!is_error(ret
)) {
5939 for(i
= 0; i
< nfds
; i
++) {
5940 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
5942 ret
+= nfds
* (sizeof(struct target_pollfd
)
5943 - sizeof(struct pollfd
));
5945 unlock_user(target_pfd
, arg1
, ret
);
5949 case TARGET_NR_flock
:
5950 /* NOTE: the flock constant seems to be the same for every
5952 ret
= get_errno(flock(arg1
, arg2
));
5954 case TARGET_NR_readv
:
5959 vec
= alloca(count
* sizeof(struct iovec
));
5960 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
5962 ret
= get_errno(readv(arg1
, vec
, count
));
5963 unlock_iovec(vec
, arg2
, count
, 1);
5966 case TARGET_NR_writev
:
5971 vec
= alloca(count
* sizeof(struct iovec
));
5972 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
5974 ret
= get_errno(writev(arg1
, vec
, count
));
5975 unlock_iovec(vec
, arg2
, count
, 0);
5978 case TARGET_NR_getsid
:
5979 ret
= get_errno(getsid(arg1
));
5981 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5982 case TARGET_NR_fdatasync
:
5983 ret
= get_errno(fdatasync(arg1
));
5986 case TARGET_NR__sysctl
:
5987 /* We don't implement this, but ENOTDIR is always a safe
5989 ret
= -TARGET_ENOTDIR
;
5991 case TARGET_NR_sched_setparam
:
5993 struct sched_param
*target_schp
;
5994 struct sched_param schp
;
5996 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
5998 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5999 unlock_user_struct(target_schp
, arg2
, 0);
6000 ret
= get_errno(sched_setparam(arg1
, &schp
));
6003 case TARGET_NR_sched_getparam
:
6005 struct sched_param
*target_schp
;
6006 struct sched_param schp
;
6007 ret
= get_errno(sched_getparam(arg1
, &schp
));
6008 if (!is_error(ret
)) {
6009 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6011 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6012 unlock_user_struct(target_schp
, arg2
, 1);
6016 case TARGET_NR_sched_setscheduler
:
6018 struct sched_param
*target_schp
;
6019 struct sched_param schp
;
6020 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6022 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6023 unlock_user_struct(target_schp
, arg3
, 0);
6024 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6027 case TARGET_NR_sched_getscheduler
:
6028 ret
= get_errno(sched_getscheduler(arg1
));
6030 case TARGET_NR_sched_yield
:
6031 ret
= get_errno(sched_yield());
6033 case TARGET_NR_sched_get_priority_max
:
6034 ret
= get_errno(sched_get_priority_max(arg1
));
6036 case TARGET_NR_sched_get_priority_min
:
6037 ret
= get_errno(sched_get_priority_min(arg1
));
6039 case TARGET_NR_sched_rr_get_interval
:
6042 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6043 if (!is_error(ret
)) {
6044 host_to_target_timespec(arg2
, &ts
);
6048 case TARGET_NR_nanosleep
:
6050 struct timespec req
, rem
;
6051 target_to_host_timespec(&req
, arg1
);
6052 ret
= get_errno(nanosleep(&req
, &rem
));
6053 if (is_error(ret
) && arg2
) {
6054 host_to_target_timespec(arg2
, &rem
);
6058 #ifdef TARGET_NR_query_module
6059 case TARGET_NR_query_module
:
6062 #ifdef TARGET_NR_nfsservctl
6063 case TARGET_NR_nfsservctl
:
6066 case TARGET_NR_prctl
:
6069 case PR_GET_PDEATHSIG
:
6072 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6073 if (!is_error(ret
) && arg2
6074 && put_user_ual(deathsig
, arg2
))
6079 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6083 #ifdef TARGET_NR_arch_prctl
6084 case TARGET_NR_arch_prctl
:
6085 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6086 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6092 #ifdef TARGET_NR_pread
6093 case TARGET_NR_pread
:
6095 if (((CPUARMState
*)cpu_env
)->eabi
)
6098 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6100 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6101 unlock_user(p
, arg2
, ret
);
6103 case TARGET_NR_pwrite
:
6105 if (((CPUARMState
*)cpu_env
)->eabi
)
6108 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6110 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6111 unlock_user(p
, arg2
, 0);
6114 #ifdef TARGET_NR_pread64
6115 case TARGET_NR_pread64
:
6116 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6118 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6119 unlock_user(p
, arg2
, ret
);
6121 case TARGET_NR_pwrite64
:
6122 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6124 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6125 unlock_user(p
, arg2
, 0);
6128 case TARGET_NR_getcwd
:
6129 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6131 ret
= get_errno(sys_getcwd1(p
, arg2
));
6132 unlock_user(p
, arg1
, ret
);
6134 case TARGET_NR_capget
:
6136 case TARGET_NR_capset
:
6138 case TARGET_NR_sigaltstack
:
6139 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6140 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6141 defined(TARGET_M68K)
6142 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6147 case TARGET_NR_sendfile
:
6149 #ifdef TARGET_NR_getpmsg
6150 case TARGET_NR_getpmsg
:
6153 #ifdef TARGET_NR_putpmsg
6154 case TARGET_NR_putpmsg
:
6157 #ifdef TARGET_NR_vfork
6158 case TARGET_NR_vfork
:
6159 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6163 #ifdef TARGET_NR_ugetrlimit
6164 case TARGET_NR_ugetrlimit
:
6167 ret
= get_errno(getrlimit(arg1
, &rlim
));
6168 if (!is_error(ret
)) {
6169 struct target_rlimit
*target_rlim
;
6170 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6172 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
6173 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
6174 unlock_user_struct(target_rlim
, arg2
, 1);
6179 #ifdef TARGET_NR_truncate64
6180 case TARGET_NR_truncate64
:
6181 if (!(p
= lock_user_string(arg1
)))
6183 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6184 unlock_user(p
, arg1
, 0);
6187 #ifdef TARGET_NR_ftruncate64
6188 case TARGET_NR_ftruncate64
:
6189 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6192 #ifdef TARGET_NR_stat64
6193 case TARGET_NR_stat64
:
6194 if (!(p
= lock_user_string(arg1
)))
6196 ret
= get_errno(stat(path(p
), &st
));
6197 unlock_user(p
, arg1
, 0);
6199 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6202 #ifdef TARGET_NR_lstat64
6203 case TARGET_NR_lstat64
:
6204 if (!(p
= lock_user_string(arg1
)))
6206 ret
= get_errno(lstat(path(p
), &st
));
6207 unlock_user(p
, arg1
, 0);
6209 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6212 #ifdef TARGET_NR_fstat64
6213 case TARGET_NR_fstat64
:
6214 ret
= get_errno(fstat(arg1
, &st
));
6216 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6219 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6220 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6221 #ifdef TARGET_NR_fstatat64
6222 case TARGET_NR_fstatat64
:
6224 #ifdef TARGET_NR_newfstatat
6225 case TARGET_NR_newfstatat
:
6227 if (!(p
= lock_user_string(arg2
)))
6229 #ifdef __NR_fstatat64
6230 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6232 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6235 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6239 case TARGET_NR_lchown
:
6240 if (!(p
= lock_user_string(arg1
)))
6242 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6243 unlock_user(p
, arg1
, 0);
6245 case TARGET_NR_getuid
:
6246 ret
= get_errno(high2lowuid(getuid()));
6248 case TARGET_NR_getgid
:
6249 ret
= get_errno(high2lowgid(getgid()));
6251 case TARGET_NR_geteuid
:
6252 ret
= get_errno(high2lowuid(geteuid()));
6254 case TARGET_NR_getegid
:
6255 ret
= get_errno(high2lowgid(getegid()));
6257 case TARGET_NR_setreuid
:
6258 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6260 case TARGET_NR_setregid
:
6261 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6263 case TARGET_NR_getgroups
:
6265 int gidsetsize
= arg1
;
6266 uint16_t *target_grouplist
;
6270 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6271 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6272 if (gidsetsize
== 0)
6274 if (!is_error(ret
)) {
6275 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6276 if (!target_grouplist
)
6278 for(i
= 0;i
< ret
; i
++)
6279 target_grouplist
[i
] = tswap16(grouplist
[i
]);
6280 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6284 case TARGET_NR_setgroups
:
6286 int gidsetsize
= arg1
;
6287 uint16_t *target_grouplist
;
6291 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6292 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6293 if (!target_grouplist
) {
6294 ret
= -TARGET_EFAULT
;
6297 for(i
= 0;i
< gidsetsize
; i
++)
6298 grouplist
[i
] = tswap16(target_grouplist
[i
]);
6299 unlock_user(target_grouplist
, arg2
, 0);
6300 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6303 case TARGET_NR_fchown
:
6304 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6306 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6307 case TARGET_NR_fchownat
:
6308 if (!(p
= lock_user_string(arg2
)))
6310 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6311 unlock_user(p
, arg2
, 0);
6314 #ifdef TARGET_NR_setresuid
6315 case TARGET_NR_setresuid
:
6316 ret
= get_errno(setresuid(low2highuid(arg1
),
6318 low2highuid(arg3
)));
6321 #ifdef TARGET_NR_getresuid
6322 case TARGET_NR_getresuid
:
6324 uid_t ruid
, euid
, suid
;
6325 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6326 if (!is_error(ret
)) {
6327 if (put_user_u16(high2lowuid(ruid
), arg1
)
6328 || put_user_u16(high2lowuid(euid
), arg2
)
6329 || put_user_u16(high2lowuid(suid
), arg3
))
6335 #ifdef TARGET_NR_getresgid
6336 case TARGET_NR_setresgid
:
6337 ret
= get_errno(setresgid(low2highgid(arg1
),
6339 low2highgid(arg3
)));
6342 #ifdef TARGET_NR_getresgid
6343 case TARGET_NR_getresgid
:
6345 gid_t rgid
, egid
, sgid
;
6346 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6347 if (!is_error(ret
)) {
6348 if (put_user_u16(high2lowgid(rgid
), arg1
)
6349 || put_user_u16(high2lowgid(egid
), arg2
)
6350 || put_user_u16(high2lowgid(sgid
), arg3
))
6356 case TARGET_NR_chown
:
6357 if (!(p
= lock_user_string(arg1
)))
6359 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6360 unlock_user(p
, arg1
, 0);
6362 case TARGET_NR_setuid
:
6363 ret
= get_errno(setuid(low2highuid(arg1
)));
6365 case TARGET_NR_setgid
:
6366 ret
= get_errno(setgid(low2highgid(arg1
)));
6368 case TARGET_NR_setfsuid
:
6369 ret
= get_errno(setfsuid(arg1
));
6371 case TARGET_NR_setfsgid
:
6372 ret
= get_errno(setfsgid(arg1
));
6374 #endif /* USE_UID16 */
6376 #ifdef TARGET_NR_lchown32
6377 case TARGET_NR_lchown32
:
6378 if (!(p
= lock_user_string(arg1
)))
6380 ret
= get_errno(lchown(p
, arg2
, arg3
));
6381 unlock_user(p
, arg1
, 0);
6384 #ifdef TARGET_NR_getuid32
6385 case TARGET_NR_getuid32
:
6386 ret
= get_errno(getuid());
6390 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6391 /* Alpha specific */
6392 case TARGET_NR_getxuid
:
6396 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6398 ret
= get_errno(getuid());
6401 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6402 /* Alpha specific */
6403 case TARGET_NR_getxgid
:
6407 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6409 ret
= get_errno(getgid());
6412 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6413 /* Alpha specific */
6414 case TARGET_NR_osf_getsysinfo
:
6415 ret
= -TARGET_EOPNOTSUPP
;
6417 case TARGET_GSI_IEEE_FP_CONTROL
:
6419 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6421 /* Copied from linux ieee_fpcr_to_swcr. */
6422 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
6423 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
6424 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
6425 | SWCR_TRAP_ENABLE_DZE
6426 | SWCR_TRAP_ENABLE_OVF
);
6427 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
6428 | SWCR_TRAP_ENABLE_INE
);
6429 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
6430 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
6432 if (put_user_u64 (swcr
, arg2
))
6438 /* case GSI_IEEE_STATE_AT_SIGNAL:
6439 -- Not implemented in linux kernel.
6441 -- Retrieves current unaligned access state; not much used.
6443 -- Retrieves implver information; surely not used.
6445 -- Grabs a copy of the HWRPB; surely not used.
6450 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
6451 /* Alpha specific */
6452 case TARGET_NR_osf_setsysinfo
:
6453 ret
= -TARGET_EOPNOTSUPP
;
6455 case TARGET_SSI_IEEE_FP_CONTROL
:
6456 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
6458 uint64_t swcr
, fpcr
, orig_fpcr
;
6460 if (get_user_u64 (swcr
, arg2
))
6462 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6463 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
6465 /* Copied from linux ieee_swcr_to_fpcr. */
6466 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
6467 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
6468 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
6469 | SWCR_TRAP_ENABLE_DZE
6470 | SWCR_TRAP_ENABLE_OVF
)) << 48;
6471 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
6472 | SWCR_TRAP_ENABLE_INE
)) << 57;
6473 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
6474 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
6476 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
6479 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
6480 /* Old exceptions are not signaled. */
6481 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
6483 /* If any exceptions set by this call, and are unmasked,
6490 /* case SSI_NVPAIRS:
6491 -- Used with SSIN_UACPROC to enable unaligned accesses.
6492 case SSI_IEEE_STATE_AT_SIGNAL:
6493 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
6494 -- Not implemented in linux kernel
6499 #ifdef TARGET_NR_osf_sigprocmask
6500 /* Alpha specific. */
6501 case TARGET_NR_osf_sigprocmask
:
6505 sigset_t set
, oldset
;
6508 case TARGET_SIG_BLOCK
:
6511 case TARGET_SIG_UNBLOCK
:
6514 case TARGET_SIG_SETMASK
:
6518 ret
= -TARGET_EINVAL
;
6522 target_to_host_old_sigset(&set
, &mask
);
6523 sigprocmask(arg1
, &set
, &oldset
);
6524 host_to_target_old_sigset(&mask
, &oldset
);
6530 #ifdef TARGET_NR_getgid32
6531 case TARGET_NR_getgid32
:
6532 ret
= get_errno(getgid());
6535 #ifdef TARGET_NR_geteuid32
6536 case TARGET_NR_geteuid32
:
6537 ret
= get_errno(geteuid());
6540 #ifdef TARGET_NR_getegid32
6541 case TARGET_NR_getegid32
:
6542 ret
= get_errno(getegid());
6545 #ifdef TARGET_NR_setreuid32
6546 case TARGET_NR_setreuid32
:
6547 ret
= get_errno(setreuid(arg1
, arg2
));
6550 #ifdef TARGET_NR_setregid32
6551 case TARGET_NR_setregid32
:
6552 ret
= get_errno(setregid(arg1
, arg2
));
6555 #ifdef TARGET_NR_getgroups32
6556 case TARGET_NR_getgroups32
:
6558 int gidsetsize
= arg1
;
6559 uint32_t *target_grouplist
;
6563 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6564 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6565 if (gidsetsize
== 0)
6567 if (!is_error(ret
)) {
6568 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
6569 if (!target_grouplist
) {
6570 ret
= -TARGET_EFAULT
;
6573 for(i
= 0;i
< ret
; i
++)
6574 target_grouplist
[i
] = tswap32(grouplist
[i
]);
6575 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
6580 #ifdef TARGET_NR_setgroups32
6581 case TARGET_NR_setgroups32
:
6583 int gidsetsize
= arg1
;
6584 uint32_t *target_grouplist
;
6588 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6589 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
6590 if (!target_grouplist
) {
6591 ret
= -TARGET_EFAULT
;
6594 for(i
= 0;i
< gidsetsize
; i
++)
6595 grouplist
[i
] = tswap32(target_grouplist
[i
]);
6596 unlock_user(target_grouplist
, arg2
, 0);
6597 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6601 #ifdef TARGET_NR_fchown32
6602 case TARGET_NR_fchown32
:
6603 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
6606 #ifdef TARGET_NR_setresuid32
6607 case TARGET_NR_setresuid32
:
6608 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
6611 #ifdef TARGET_NR_getresuid32
6612 case TARGET_NR_getresuid32
:
6614 uid_t ruid
, euid
, suid
;
6615 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6616 if (!is_error(ret
)) {
6617 if (put_user_u32(ruid
, arg1
)
6618 || put_user_u32(euid
, arg2
)
6619 || put_user_u32(suid
, arg3
))
6625 #ifdef TARGET_NR_setresgid32
6626 case TARGET_NR_setresgid32
:
6627 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
6630 #ifdef TARGET_NR_getresgid32
6631 case TARGET_NR_getresgid32
:
6633 gid_t rgid
, egid
, sgid
;
6634 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6635 if (!is_error(ret
)) {
6636 if (put_user_u32(rgid
, arg1
)
6637 || put_user_u32(egid
, arg2
)
6638 || put_user_u32(sgid
, arg3
))
6644 #ifdef TARGET_NR_chown32
6645 case TARGET_NR_chown32
:
6646 if (!(p
= lock_user_string(arg1
)))
6648 ret
= get_errno(chown(p
, arg2
, arg3
));
6649 unlock_user(p
, arg1
, 0);
6652 #ifdef TARGET_NR_setuid32
6653 case TARGET_NR_setuid32
:
6654 ret
= get_errno(setuid(arg1
));
6657 #ifdef TARGET_NR_setgid32
6658 case TARGET_NR_setgid32
:
6659 ret
= get_errno(setgid(arg1
));
6662 #ifdef TARGET_NR_setfsuid32
6663 case TARGET_NR_setfsuid32
:
6664 ret
= get_errno(setfsuid(arg1
));
6667 #ifdef TARGET_NR_setfsgid32
6668 case TARGET_NR_setfsgid32
:
6669 ret
= get_errno(setfsgid(arg1
));
6673 case TARGET_NR_pivot_root
:
6675 #ifdef TARGET_NR_mincore
6676 case TARGET_NR_mincore
:
6679 ret
= -TARGET_EFAULT
;
6680 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
6682 if (!(p
= lock_user_string(arg3
)))
6684 ret
= get_errno(mincore(a
, arg2
, p
));
6685 unlock_user(p
, arg3
, ret
);
6687 unlock_user(a
, arg1
, 0);
6691 #ifdef TARGET_NR_arm_fadvise64_64
6692 case TARGET_NR_arm_fadvise64_64
:
6695 * arm_fadvise64_64 looks like fadvise64_64 but
6696 * with different argument order
6704 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
6705 #ifdef TARGET_NR_fadvise64_64
6706 case TARGET_NR_fadvise64_64
:
6708 #ifdef TARGET_NR_fadvise64
6709 case TARGET_NR_fadvise64
:
6713 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
6714 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
6715 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
6716 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
6720 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
6723 #ifdef TARGET_NR_madvise
6724 case TARGET_NR_madvise
:
6725 /* A straight passthrough may not be safe because qemu sometimes
6726 turns private flie-backed mappings into anonymous mappings.
6727 This will break MADV_DONTNEED.
6728 This is a hint, so ignoring and returning success is ok. */
6732 #if TARGET_ABI_BITS == 32
6733 case TARGET_NR_fcntl64
:
6737 struct target_flock64
*target_fl
;
6739 struct target_eabi_flock64
*target_efl
;
6742 cmd
= target_to_host_fcntl_cmd(arg2
);
6743 if (cmd
== -TARGET_EINVAL
)
6747 case TARGET_F_GETLK64
:
6749 if (((CPUARMState
*)cpu_env
)->eabi
) {
6750 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6752 fl
.l_type
= tswap16(target_efl
->l_type
);
6753 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6754 fl
.l_start
= tswap64(target_efl
->l_start
);
6755 fl
.l_len
= tswap64(target_efl
->l_len
);
6756 fl
.l_pid
= tswap32(target_efl
->l_pid
);
6757 unlock_user_struct(target_efl
, arg3
, 0);
6761 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6763 fl
.l_type
= tswap16(target_fl
->l_type
);
6764 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6765 fl
.l_start
= tswap64(target_fl
->l_start
);
6766 fl
.l_len
= tswap64(target_fl
->l_len
);
6767 fl
.l_pid
= tswap32(target_fl
->l_pid
);
6768 unlock_user_struct(target_fl
, arg3
, 0);
6770 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6773 if (((CPUARMState
*)cpu_env
)->eabi
) {
6774 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
6776 target_efl
->l_type
= tswap16(fl
.l_type
);
6777 target_efl
->l_whence
= tswap16(fl
.l_whence
);
6778 target_efl
->l_start
= tswap64(fl
.l_start
);
6779 target_efl
->l_len
= tswap64(fl
.l_len
);
6780 target_efl
->l_pid
= tswap32(fl
.l_pid
);
6781 unlock_user_struct(target_efl
, arg3
, 1);
6785 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
6787 target_fl
->l_type
= tswap16(fl
.l_type
);
6788 target_fl
->l_whence
= tswap16(fl
.l_whence
);
6789 target_fl
->l_start
= tswap64(fl
.l_start
);
6790 target_fl
->l_len
= tswap64(fl
.l_len
);
6791 target_fl
->l_pid
= tswap32(fl
.l_pid
);
6792 unlock_user_struct(target_fl
, arg3
, 1);
6797 case TARGET_F_SETLK64
:
6798 case TARGET_F_SETLKW64
:
6800 if (((CPUARMState
*)cpu_env
)->eabi
) {
6801 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6803 fl
.l_type
= tswap16(target_efl
->l_type
);
6804 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6805 fl
.l_start
= tswap64(target_efl
->l_start
);
6806 fl
.l_len
= tswap64(target_efl
->l_len
);
6807 fl
.l_pid
= tswap32(target_efl
->l_pid
);
6808 unlock_user_struct(target_efl
, arg3
, 0);
6812 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6814 fl
.l_type
= tswap16(target_fl
->l_type
);
6815 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6816 fl
.l_start
= tswap64(target_fl
->l_start
);
6817 fl
.l_len
= tswap64(target_fl
->l_len
);
6818 fl
.l_pid
= tswap32(target_fl
->l_pid
);
6819 unlock_user_struct(target_fl
, arg3
, 0);
6821 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6824 ret
= do_fcntl(arg1
, arg2
, arg3
);
6830 #ifdef TARGET_NR_cacheflush
6831 case TARGET_NR_cacheflush
:
6832 /* self-modifying code is handled automatically, so nothing needed */
6836 #ifdef TARGET_NR_security
6837 case TARGET_NR_security
:
6840 #ifdef TARGET_NR_getpagesize
6841 case TARGET_NR_getpagesize
:
6842 ret
= TARGET_PAGE_SIZE
;
6845 case TARGET_NR_gettid
:
6846 ret
= get_errno(gettid());
6848 #ifdef TARGET_NR_readahead
6849 case TARGET_NR_readahead
:
6850 #if TARGET_ABI_BITS == 32
6852 if (((CPUARMState
*)cpu_env
)->eabi
)
6859 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
6861 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
6865 #ifdef TARGET_NR_setxattr
6866 case TARGET_NR_setxattr
:
6867 case TARGET_NR_lsetxattr
:
6868 case TARGET_NR_fsetxattr
:
6869 case TARGET_NR_getxattr
:
6870 case TARGET_NR_lgetxattr
:
6871 case TARGET_NR_fgetxattr
:
6872 case TARGET_NR_listxattr
:
6873 case TARGET_NR_llistxattr
:
6874 case TARGET_NR_flistxattr
:
6875 case TARGET_NR_removexattr
:
6876 case TARGET_NR_lremovexattr
:
6877 case TARGET_NR_fremovexattr
:
6878 ret
= -TARGET_EOPNOTSUPP
;
6881 #ifdef TARGET_NR_set_thread_area
6882 case TARGET_NR_set_thread_area
:
6883 #if defined(TARGET_MIPS)
6884 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
6887 #elif defined(TARGET_CRIS)
6889 ret
= -TARGET_EINVAL
;
6891 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
6895 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6896 ret
= do_set_thread_area(cpu_env
, arg1
);
6899 goto unimplemented_nowarn
;
6902 #ifdef TARGET_NR_get_thread_area
6903 case TARGET_NR_get_thread_area
:
6904 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6905 ret
= do_get_thread_area(cpu_env
, arg1
);
6907 goto unimplemented_nowarn
;
6910 #ifdef TARGET_NR_getdomainname
6911 case TARGET_NR_getdomainname
:
6912 goto unimplemented_nowarn
;
6915 #ifdef TARGET_NR_clock_gettime
6916 case TARGET_NR_clock_gettime
:
6919 ret
= get_errno(clock_gettime(arg1
, &ts
));
6920 if (!is_error(ret
)) {
6921 host_to_target_timespec(arg2
, &ts
);
6926 #ifdef TARGET_NR_clock_getres
6927 case TARGET_NR_clock_getres
:
6930 ret
= get_errno(clock_getres(arg1
, &ts
));
6931 if (!is_error(ret
)) {
6932 host_to_target_timespec(arg2
, &ts
);
6937 #ifdef TARGET_NR_clock_nanosleep
6938 case TARGET_NR_clock_nanosleep
:
6941 target_to_host_timespec(&ts
, arg3
);
6942 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
6944 host_to_target_timespec(arg4
, &ts
);
6949 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6950 case TARGET_NR_set_tid_address
:
6951 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
6955 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6956 case TARGET_NR_tkill
:
6957 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
6961 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6962 case TARGET_NR_tgkill
:
6963 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
6964 target_to_host_signal(arg3
)));
6968 #ifdef TARGET_NR_set_robust_list
6969 case TARGET_NR_set_robust_list
:
6970 goto unimplemented_nowarn
;
6973 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6974 case TARGET_NR_utimensat
:
6976 struct timespec
*tsp
, ts
[2];
6980 target_to_host_timespec(ts
, arg3
);
6981 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
6985 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
6987 if (!(p
= lock_user_string(arg2
))) {
6988 ret
= -TARGET_EFAULT
;
6991 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
6992 unlock_user(p
, arg2
, 0);
6997 #if defined(CONFIG_USE_NPTL)
6998 case TARGET_NR_futex
:
6999 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7002 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7003 case TARGET_NR_inotify_init
:
7004 ret
= get_errno(sys_inotify_init());
7007 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7008 case TARGET_NR_inotify_add_watch
:
7009 p
= lock_user_string(arg2
);
7010 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7011 unlock_user(p
, arg2
, 0);
7014 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7015 case TARGET_NR_inotify_rm_watch
:
7016 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7020 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7021 case TARGET_NR_mq_open
:
7023 struct mq_attr posix_mq_attr
;
7025 p
= lock_user_string(arg1
- 1);
7027 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7028 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7029 unlock_user (p
, arg1
, 0);
7033 case TARGET_NR_mq_unlink
:
7034 p
= lock_user_string(arg1
- 1);
7035 ret
= get_errno(mq_unlink(p
));
7036 unlock_user (p
, arg1
, 0);
7039 case TARGET_NR_mq_timedsend
:
7043 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7045 target_to_host_timespec(&ts
, arg5
);
7046 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7047 host_to_target_timespec(arg5
, &ts
);
7050 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7051 unlock_user (p
, arg2
, arg3
);
7055 case TARGET_NR_mq_timedreceive
:
7060 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7062 target_to_host_timespec(&ts
, arg5
);
7063 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7064 host_to_target_timespec(arg5
, &ts
);
7067 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7068 unlock_user (p
, arg2
, arg3
);
7070 put_user_u32(prio
, arg4
);
7074 /* Not implemented for now... */
7075 /* case TARGET_NR_mq_notify: */
7078 case TARGET_NR_mq_getsetattr
:
7080 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7083 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7084 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7087 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7088 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7095 #ifdef CONFIG_SPLICE
7096 #ifdef TARGET_NR_tee
7099 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7103 #ifdef TARGET_NR_splice
7104 case TARGET_NR_splice
:
7106 loff_t loff_in
, loff_out
;
7107 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7109 get_user_u64(loff_in
, arg2
);
7110 ploff_in
= &loff_in
;
7113 get_user_u64(loff_out
, arg2
);
7114 ploff_out
= &loff_out
;
7116 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7120 #ifdef TARGET_NR_vmsplice
7121 case TARGET_NR_vmsplice
:
7126 vec
= alloca(count
* sizeof(struct iovec
));
7127 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7129 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7130 unlock_iovec(vec
, arg2
, count
, 0);
7134 #endif /* CONFIG_SPLICE */
7135 #ifdef CONFIG_EVENTFD
7136 #if defined(TARGET_NR_eventfd)
7137 case TARGET_NR_eventfd
:
7138 ret
= get_errno(eventfd(arg1
, 0));
7141 #if defined(TARGET_NR_eventfd2)
7142 case TARGET_NR_eventfd2
:
7143 ret
= get_errno(eventfd(arg1
, arg2
));
7146 #endif /* CONFIG_EVENTFD */
7147 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7148 case TARGET_NR_fallocate
:
7149 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7154 gemu_log("qemu: Unsupported syscall: %d\n", num
);
7155 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7156 unimplemented_nowarn
:
7158 ret
= -TARGET_ENOSYS
;
7163 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
7166 print_syscall_ret(num
, ret
);
7169 ret
= -TARGET_EFAULT
;