4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
44 #include <sys/socket.h>
48 #include <sys/times.h>
51 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/utsname.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <qemu-common.h>
63 #include <sys/eventfd.h>
66 #define termios host_termios
67 #define winsize host_winsize
68 #define termio host_termio
69 #define sgttyb host_sgttyb /* same as target */
70 #define tchars host_tchars /* same as target */
71 #define ltchars host_ltchars /* same as target */
73 #include <linux/termios.h>
74 #include <linux/unistd.h>
75 #include <linux/utsname.h>
76 #include <linux/cdrom.h>
77 #include <linux/hdreg.h>
78 #include <linux/soundcard.h>
80 #include <linux/mtio.h>
84 #include "linux_loop.h"
85 #include "cpu-uname.h"
88 #include "qemu-common.h"
90 #if defined(CONFIG_USE_NPTL)
91 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
92 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
94 /* XXX: Hardcode the above values. */
95 #define CLONE_NPTL_FLAGS2 0
100 //#include <linux/msdos_fs.h>
101 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
102 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
113 #define _syscall0(type,name) \
114 static type name (void) \
116 return syscall(__NR_##name); \
119 #define _syscall1(type,name,type1,arg1) \
120 static type name (type1 arg1) \
122 return syscall(__NR_##name, arg1); \
125 #define _syscall2(type,name,type1,arg1,type2,arg2) \
126 static type name (type1 arg1,type2 arg2) \
128 return syscall(__NR_##name, arg1, arg2); \
131 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
132 static type name (type1 arg1,type2 arg2,type3 arg3) \
134 return syscall(__NR_##name, arg1, arg2, arg3); \
137 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
138 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
140 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
143 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
145 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
147 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
151 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
152 type5,arg5,type6,arg6) \
153 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
156 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
160 #define __NR_sys_uname __NR_uname
161 #define __NR_sys_faccessat __NR_faccessat
162 #define __NR_sys_fchmodat __NR_fchmodat
163 #define __NR_sys_fchownat __NR_fchownat
164 #define __NR_sys_fstatat64 __NR_fstatat64
165 #define __NR_sys_futimesat __NR_futimesat
166 #define __NR_sys_getcwd1 __NR_getcwd
167 #define __NR_sys_getdents __NR_getdents
168 #define __NR_sys_getdents64 __NR_getdents64
169 #define __NR_sys_getpriority __NR_getpriority
170 #define __NR_sys_linkat __NR_linkat
171 #define __NR_sys_mkdirat __NR_mkdirat
172 #define __NR_sys_mknodat __NR_mknodat
173 #define __NR_sys_newfstatat __NR_newfstatat
174 #define __NR_sys_openat __NR_openat
175 #define __NR_sys_readlinkat __NR_readlinkat
176 #define __NR_sys_renameat __NR_renameat
177 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
178 #define __NR_sys_symlinkat __NR_symlinkat
179 #define __NR_sys_syslog __NR_syslog
180 #define __NR_sys_tgkill __NR_tgkill
181 #define __NR_sys_tkill __NR_tkill
182 #define __NR_sys_unlinkat __NR_unlinkat
183 #define __NR_sys_utimensat __NR_utimensat
184 #define __NR_sys_futex __NR_futex
185 #define __NR_sys_inotify_init __NR_inotify_init
186 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
187 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
189 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
190 #define __NR__llseek __NR_lseek
194 _syscall0(int, gettid
)
196 /* This is a replacement for the host gettid() and must return a host
198 static int gettid(void) {
202 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
203 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
204 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
206 _syscall2(int, sys_getpriority
, int, which
, int, who
);
207 #if defined(TARGET_NR__llseek) && !defined (__x86_64__)
208 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
209 loff_t
*, res
, uint
, wh
);
211 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
212 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
213 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
214 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
216 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
217 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
219 #ifdef __NR_exit_group
220 _syscall1(int,exit_group
,int,error_code
)
222 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
223 _syscall1(int,set_tid_address
,int *,tidptr
)
225 #if defined(CONFIG_USE_NPTL)
226 #if defined(TARGET_NR_futex) && defined(__NR_futex)
227 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
228 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
232 static bitmask_transtbl fcntl_flags_tbl
[] = {
233 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
234 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
235 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
236 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
237 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
238 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
239 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
240 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
241 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
242 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
243 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
244 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
245 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
246 #if defined(O_DIRECT)
247 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
252 #define COPY_UTSNAME_FIELD(dest, src) \
254 /* __NEW_UTS_LEN doesn't include terminating null */ \
255 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
256 (dest)[__NEW_UTS_LEN] = '\0'; \
259 static int sys_uname(struct new_utsname
*buf
)
261 struct utsname uts_buf
;
263 if (uname(&uts_buf
) < 0)
267 * Just in case these have some differences, we
268 * translate utsname to new_utsname (which is the
269 * struct linux kernel uses).
272 bzero(buf
, sizeof (*buf
));
273 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
274 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
275 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
276 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
277 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
279 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
283 #undef COPY_UTSNAME_FIELD
286 static int sys_getcwd1(char *buf
, size_t size
)
288 if (getcwd(buf
, size
) == NULL
) {
289 /* getcwd() sets errno */
292 return strlen(buf
)+1;
297 * Host system seems to have atfile syscall stubs available. We
298 * now enable them one by one as specified by target syscall_nr.h.
301 #ifdef TARGET_NR_faccessat
302 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
304 return (faccessat(dirfd
, pathname
, mode
, 0));
307 #ifdef TARGET_NR_fchmodat
308 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
310 return (fchmodat(dirfd
, pathname
, mode
, 0));
313 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
314 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
315 gid_t group
, int flags
)
317 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
320 #ifdef __NR_fstatat64
321 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
324 return (fstatat(dirfd
, pathname
, buf
, flags
));
327 #ifdef __NR_newfstatat
328 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
331 return (fstatat(dirfd
, pathname
, buf
, flags
));
334 #ifdef TARGET_NR_futimesat
335 static int sys_futimesat(int dirfd
, const char *pathname
,
336 const struct timeval times
[2])
338 return (futimesat(dirfd
, pathname
, times
));
341 #ifdef TARGET_NR_linkat
342 static int sys_linkat(int olddirfd
, const char *oldpath
,
343 int newdirfd
, const char *newpath
, int flags
)
345 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
348 #ifdef TARGET_NR_mkdirat
349 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
351 return (mkdirat(dirfd
, pathname
, mode
));
354 #ifdef TARGET_NR_mknodat
355 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
358 return (mknodat(dirfd
, pathname
, mode
, dev
));
361 #ifdef TARGET_NR_openat
362 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
365 * open(2) has extra parameter 'mode' when called with
368 if ((flags
& O_CREAT
) != 0) {
373 * Get the 'mode' parameter and translate it to
377 mode
= va_arg(ap
, mode_t
);
378 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
381 return (openat(dirfd
, pathname
, flags
, mode
));
383 return (openat(dirfd
, pathname
, flags
));
386 #ifdef TARGET_NR_readlinkat
387 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
389 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
392 #ifdef TARGET_NR_renameat
393 static int sys_renameat(int olddirfd
, const char *oldpath
,
394 int newdirfd
, const char *newpath
)
396 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
399 #ifdef TARGET_NR_symlinkat
400 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
402 return (symlinkat(oldpath
, newdirfd
, newpath
));
405 #ifdef TARGET_NR_unlinkat
406 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
408 return (unlinkat(dirfd
, pathname
, flags
));
411 #else /* !CONFIG_ATFILE */
414 * Try direct syscalls instead
416 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
417 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
419 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
420 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
422 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
423 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
424 uid_t
,owner
,gid_t
,group
,int,flags
)
426 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
427 defined(__NR_fstatat64)
428 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
429 struct stat
*,buf
,int,flags
)
431 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
432 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
433 const struct timeval
*,times
)
435 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
436 defined(__NR_newfstatat)
437 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
438 struct stat
*,buf
,int,flags
)
440 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
441 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
442 int,newdirfd
,const char *,newpath
,int,flags
)
444 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
445 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
447 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
448 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
449 mode_t
,mode
,dev_t
,dev
)
451 #if defined(TARGET_NR_openat) && defined(__NR_openat)
452 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
454 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
455 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
456 char *,buf
,size_t,bufsize
)
458 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
459 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
460 int,newdirfd
,const char *,newpath
)
462 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
463 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
464 int,newdirfd
,const char *,newpath
)
466 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
467 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
470 #endif /* CONFIG_ATFILE */
472 #ifdef CONFIG_UTIMENSAT
473 static int sys_utimensat(int dirfd
, const char *pathname
,
474 const struct timespec times
[2], int flags
)
476 if (pathname
== NULL
)
477 return futimens(dirfd
, times
);
479 return utimensat(dirfd
, pathname
, times
, flags
);
482 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
483 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
484 const struct timespec
*,tsp
,int,flags
)
486 #endif /* CONFIG_UTIMENSAT */
488 #ifdef CONFIG_INOTIFY
489 #include <sys/inotify.h>
491 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
492 static int sys_inotify_init(void)
494 return (inotify_init());
497 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
498 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
500 return (inotify_add_watch(fd
, pathname
, mask
));
503 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
504 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
506 return (inotify_rm_watch(fd
, wd
));
510 /* Userspace can usually survive runtime without inotify */
511 #undef TARGET_NR_inotify_init
512 #undef TARGET_NR_inotify_add_watch
513 #undef TARGET_NR_inotify_rm_watch
514 #endif /* CONFIG_INOTIFY */
517 extern int personality(int);
518 extern int flock(int, int);
519 extern int setfsuid(int);
520 extern int setfsgid(int);
521 extern int setgroups(int, gid_t
*);
523 #define ERRNO_TABLE_SIZE 1200
525 /* target_to_host_errno_table[] is initialized from
526 * host_to_target_errno_table[] in syscall_init(). */
527 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
531 * This list is the union of errno values overridden in asm-<arch>/errno.h
532 * minus the errnos that are not actually generic to all archs.
534 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
535 [EIDRM
] = TARGET_EIDRM
,
536 [ECHRNG
] = TARGET_ECHRNG
,
537 [EL2NSYNC
] = TARGET_EL2NSYNC
,
538 [EL3HLT
] = TARGET_EL3HLT
,
539 [EL3RST
] = TARGET_EL3RST
,
540 [ELNRNG
] = TARGET_ELNRNG
,
541 [EUNATCH
] = TARGET_EUNATCH
,
542 [ENOCSI
] = TARGET_ENOCSI
,
543 [EL2HLT
] = TARGET_EL2HLT
,
544 [EDEADLK
] = TARGET_EDEADLK
,
545 [ENOLCK
] = TARGET_ENOLCK
,
546 [EBADE
] = TARGET_EBADE
,
547 [EBADR
] = TARGET_EBADR
,
548 [EXFULL
] = TARGET_EXFULL
,
549 [ENOANO
] = TARGET_ENOANO
,
550 [EBADRQC
] = TARGET_EBADRQC
,
551 [EBADSLT
] = TARGET_EBADSLT
,
552 [EBFONT
] = TARGET_EBFONT
,
553 [ENOSTR
] = TARGET_ENOSTR
,
554 [ENODATA
] = TARGET_ENODATA
,
555 [ETIME
] = TARGET_ETIME
,
556 [ENOSR
] = TARGET_ENOSR
,
557 [ENONET
] = TARGET_ENONET
,
558 [ENOPKG
] = TARGET_ENOPKG
,
559 [EREMOTE
] = TARGET_EREMOTE
,
560 [ENOLINK
] = TARGET_ENOLINK
,
561 [EADV
] = TARGET_EADV
,
562 [ESRMNT
] = TARGET_ESRMNT
,
563 [ECOMM
] = TARGET_ECOMM
,
564 [EPROTO
] = TARGET_EPROTO
,
565 [EDOTDOT
] = TARGET_EDOTDOT
,
566 [EMULTIHOP
] = TARGET_EMULTIHOP
,
567 [EBADMSG
] = TARGET_EBADMSG
,
568 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
569 [EOVERFLOW
] = TARGET_EOVERFLOW
,
570 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
571 [EBADFD
] = TARGET_EBADFD
,
572 [EREMCHG
] = TARGET_EREMCHG
,
573 [ELIBACC
] = TARGET_ELIBACC
,
574 [ELIBBAD
] = TARGET_ELIBBAD
,
575 [ELIBSCN
] = TARGET_ELIBSCN
,
576 [ELIBMAX
] = TARGET_ELIBMAX
,
577 [ELIBEXEC
] = TARGET_ELIBEXEC
,
578 [EILSEQ
] = TARGET_EILSEQ
,
579 [ENOSYS
] = TARGET_ENOSYS
,
580 [ELOOP
] = TARGET_ELOOP
,
581 [ERESTART
] = TARGET_ERESTART
,
582 [ESTRPIPE
] = TARGET_ESTRPIPE
,
583 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
584 [EUSERS
] = TARGET_EUSERS
,
585 [ENOTSOCK
] = TARGET_ENOTSOCK
,
586 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
587 [EMSGSIZE
] = TARGET_EMSGSIZE
,
588 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
589 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
590 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
591 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
592 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
593 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
594 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
595 [EADDRINUSE
] = TARGET_EADDRINUSE
,
596 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
597 [ENETDOWN
] = TARGET_ENETDOWN
,
598 [ENETUNREACH
] = TARGET_ENETUNREACH
,
599 [ENETRESET
] = TARGET_ENETRESET
,
600 [ECONNABORTED
] = TARGET_ECONNABORTED
,
601 [ECONNRESET
] = TARGET_ECONNRESET
,
602 [ENOBUFS
] = TARGET_ENOBUFS
,
603 [EISCONN
] = TARGET_EISCONN
,
604 [ENOTCONN
] = TARGET_ENOTCONN
,
605 [EUCLEAN
] = TARGET_EUCLEAN
,
606 [ENOTNAM
] = TARGET_ENOTNAM
,
607 [ENAVAIL
] = TARGET_ENAVAIL
,
608 [EISNAM
] = TARGET_EISNAM
,
609 [EREMOTEIO
] = TARGET_EREMOTEIO
,
610 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
611 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
612 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
613 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
614 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
615 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
616 [EALREADY
] = TARGET_EALREADY
,
617 [EINPROGRESS
] = TARGET_EINPROGRESS
,
618 [ESTALE
] = TARGET_ESTALE
,
619 [ECANCELED
] = TARGET_ECANCELED
,
620 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
621 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
623 [ENOKEY
] = TARGET_ENOKEY
,
626 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
629 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
632 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
635 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
637 #ifdef ENOTRECOVERABLE
638 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
642 static inline int host_to_target_errno(int err
)
644 if(host_to_target_errno_table
[err
])
645 return host_to_target_errno_table
[err
];
649 static inline int target_to_host_errno(int err
)
651 if (target_to_host_errno_table
[err
])
652 return target_to_host_errno_table
[err
];
656 static inline abi_long
get_errno(abi_long ret
)
659 return -host_to_target_errno(errno
);
664 static inline int is_error(abi_long ret
)
666 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
669 char *target_strerror(int err
)
671 return strerror(target_to_host_errno(err
));
674 static abi_ulong target_brk
;
675 static abi_ulong target_original_brk
;
677 void target_set_brk(abi_ulong new_brk
)
679 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
682 /* do_brk() must return target values and target errnos. */
683 abi_long
do_brk(abi_ulong new_brk
)
686 abi_long mapped_addr
;
691 if (new_brk
< target_original_brk
)
694 brk_page
= HOST_PAGE_ALIGN(target_brk
);
696 /* If the new brk is less than this, set it and we're done... */
697 if (new_brk
< brk_page
) {
698 target_brk
= new_brk
;
702 /* We need to allocate more memory after the brk... */
703 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
704 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
705 PROT_READ
|PROT_WRITE
,
706 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
708 if (!is_error(mapped_addr
))
709 target_brk
= new_brk
;
714 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
715 abi_ulong target_fds_addr
,
719 abi_ulong b
, *target_fds
;
721 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
722 if (!(target_fds
= lock_user(VERIFY_READ
,
724 sizeof(abi_ulong
) * nw
,
726 return -TARGET_EFAULT
;
730 for (i
= 0; i
< nw
; i
++) {
731 /* grab the abi_ulong */
732 __get_user(b
, &target_fds
[i
]);
733 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
734 /* check the bit inside the abi_ulong */
741 unlock_user(target_fds
, target_fds_addr
, 0);
746 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
752 abi_ulong
*target_fds
;
754 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
755 if (!(target_fds
= lock_user(VERIFY_WRITE
,
757 sizeof(abi_ulong
) * nw
,
759 return -TARGET_EFAULT
;
762 for (i
= 0; i
< nw
; i
++) {
764 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
765 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
768 __put_user(v
, &target_fds
[i
]);
771 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
776 #if defined(__alpha__)
782 static inline abi_long
host_to_target_clock_t(long ticks
)
784 #if HOST_HZ == TARGET_HZ
787 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
791 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
792 const struct rusage
*rusage
)
794 struct target_rusage
*target_rusage
;
796 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
797 return -TARGET_EFAULT
;
798 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
799 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
800 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
801 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
802 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
803 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
804 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
805 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
806 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
807 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
808 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
809 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
810 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
811 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
812 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
813 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
814 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
815 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
816 unlock_user_struct(target_rusage
, target_addr
, 1);
821 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
822 abi_ulong target_tv_addr
)
824 struct target_timeval
*target_tv
;
826 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
827 return -TARGET_EFAULT
;
829 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
830 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
832 unlock_user_struct(target_tv
, target_tv_addr
, 0);
837 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
838 const struct timeval
*tv
)
840 struct target_timeval
*target_tv
;
842 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
843 return -TARGET_EFAULT
;
845 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
846 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
848 unlock_user_struct(target_tv
, target_tv_addr
, 1);
853 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
856 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
857 abi_ulong target_mq_attr_addr
)
859 struct target_mq_attr
*target_mq_attr
;
861 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
862 target_mq_attr_addr
, 1))
863 return -TARGET_EFAULT
;
865 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
866 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
867 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
868 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
870 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
875 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
876 const struct mq_attr
*attr
)
878 struct target_mq_attr
*target_mq_attr
;
880 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
881 target_mq_attr_addr
, 0))
882 return -TARGET_EFAULT
;
884 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
885 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
886 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
887 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
889 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
895 /* do_select() must return target values and target errnos. */
896 static abi_long
do_select(int n
,
897 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
898 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
900 fd_set rfds
, wfds
, efds
;
901 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
902 struct timeval tv
, *tv_ptr
;
906 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
907 return -TARGET_EFAULT
;
913 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
914 return -TARGET_EFAULT
;
920 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
921 return -TARGET_EFAULT
;
927 if (target_tv_addr
) {
928 if (copy_from_user_timeval(&tv
, target_tv_addr
))
929 return -TARGET_EFAULT
;
935 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
937 if (!is_error(ret
)) {
938 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
939 return -TARGET_EFAULT
;
940 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
941 return -TARGET_EFAULT
;
942 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
943 return -TARGET_EFAULT
;
945 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
946 return -TARGET_EFAULT
;
952 static abi_long
do_pipe2(int host_pipe
[], int flags
)
955 return pipe2(host_pipe
, flags
);
961 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
, int flags
)
965 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
968 return get_errno(ret
);
969 #if defined(TARGET_MIPS)
970 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
972 #elif defined(TARGET_SH4)
973 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
976 if (put_user_s32(host_pipe
[0], pipedes
)
977 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
978 return -TARGET_EFAULT
;
980 return get_errno(ret
);
983 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
984 abi_ulong target_addr
,
987 struct target_ip_mreqn
*target_smreqn
;
989 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
991 return -TARGET_EFAULT
;
992 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
993 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
994 if (len
== sizeof(struct target_ip_mreqn
))
995 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
996 unlock_user(target_smreqn
, target_addr
, 0);
1001 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1002 abi_ulong target_addr
,
1005 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1006 sa_family_t sa_family
;
1007 struct target_sockaddr
*target_saddr
;
1009 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1011 return -TARGET_EFAULT
;
1013 sa_family
= tswap16(target_saddr
->sa_family
);
1015 /* Oops. The caller might send a incomplete sun_path; sun_path
1016 * must be terminated by \0 (see the manual page), but
1017 * unfortunately it is quite common to specify sockaddr_un
1018 * length as "strlen(x->sun_path)" while it should be
1019 * "strlen(...) + 1". We'll fix that here if needed.
1020 * Linux kernel has a similar feature.
1023 if (sa_family
== AF_UNIX
) {
1024 if (len
< unix_maxlen
&& len
> 0) {
1025 char *cp
= (char*)target_saddr
;
1027 if ( cp
[len
-1] && !cp
[len
] )
1030 if (len
> unix_maxlen
)
1034 memcpy(addr
, target_saddr
, len
);
1035 addr
->sa_family
= sa_family
;
1036 unlock_user(target_saddr
, target_addr
, 0);
1041 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1042 struct sockaddr
*addr
,
1045 struct target_sockaddr
*target_saddr
;
1047 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1049 return -TARGET_EFAULT
;
1050 memcpy(target_saddr
, addr
, len
);
1051 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1052 unlock_user(target_saddr
, target_addr
, len
);
1057 /* ??? Should this also swap msgh->name? */
1058 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1059 struct target_msghdr
*target_msgh
)
1061 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1062 abi_long msg_controllen
;
1063 abi_ulong target_cmsg_addr
;
1064 struct target_cmsghdr
*target_cmsg
;
1065 socklen_t space
= 0;
1067 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1068 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1070 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1071 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1073 return -TARGET_EFAULT
;
1075 while (cmsg
&& target_cmsg
) {
1076 void *data
= CMSG_DATA(cmsg
);
1077 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1079 int len
= tswapl(target_cmsg
->cmsg_len
)
1080 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1082 space
+= CMSG_SPACE(len
);
1083 if (space
> msgh
->msg_controllen
) {
1084 space
-= CMSG_SPACE(len
);
1085 gemu_log("Host cmsg overflow\n");
1089 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1090 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1091 cmsg
->cmsg_len
= CMSG_LEN(len
);
1093 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1094 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1095 memcpy(data
, target_data
, len
);
1097 int *fd
= (int *)data
;
1098 int *target_fd
= (int *)target_data
;
1099 int i
, numfds
= len
/ sizeof(int);
1101 for (i
= 0; i
< numfds
; i
++)
1102 fd
[i
] = tswap32(target_fd
[i
]);
1105 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1106 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1108 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1110 msgh
->msg_controllen
= space
;
1114 /* ??? Should this also swap msgh->name? */
1115 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1116 struct msghdr
*msgh
)
1118 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1119 abi_long msg_controllen
;
1120 abi_ulong target_cmsg_addr
;
1121 struct target_cmsghdr
*target_cmsg
;
1122 socklen_t space
= 0;
1124 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1125 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1127 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1128 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1130 return -TARGET_EFAULT
;
1132 while (cmsg
&& target_cmsg
) {
1133 void *data
= CMSG_DATA(cmsg
);
1134 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1136 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1138 space
+= TARGET_CMSG_SPACE(len
);
1139 if (space
> msg_controllen
) {
1140 space
-= TARGET_CMSG_SPACE(len
);
1141 gemu_log("Target cmsg overflow\n");
1145 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1146 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1147 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1149 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1150 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1151 memcpy(target_data
, data
, len
);
1153 int *fd
= (int *)data
;
1154 int *target_fd
= (int *)target_data
;
1155 int i
, numfds
= len
/ sizeof(int);
1157 for (i
= 0; i
< numfds
; i
++)
1158 target_fd
[i
] = tswap32(fd
[i
]);
1161 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1162 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1164 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1166 target_msgh
->msg_controllen
= tswapl(space
);
1170 /* do_setsockopt() Must return target values and target errnos. */
1171 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1172 abi_ulong optval_addr
, socklen_t optlen
)
1176 struct ip_mreqn
*ip_mreq
;
1177 struct ip_mreq_source
*ip_mreq_source
;
1181 /* TCP options all take an 'int' value. */
1182 if (optlen
< sizeof(uint32_t))
1183 return -TARGET_EINVAL
;
1185 if (get_user_u32(val
, optval_addr
))
1186 return -TARGET_EFAULT
;
1187 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1194 case IP_ROUTER_ALERT
:
1198 case IP_MTU_DISCOVER
:
1204 case IP_MULTICAST_TTL
:
1205 case IP_MULTICAST_LOOP
:
1207 if (optlen
>= sizeof(uint32_t)) {
1208 if (get_user_u32(val
, optval_addr
))
1209 return -TARGET_EFAULT
;
1210 } else if (optlen
>= 1) {
1211 if (get_user_u8(val
, optval_addr
))
1212 return -TARGET_EFAULT
;
1214 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1216 case IP_ADD_MEMBERSHIP
:
1217 case IP_DROP_MEMBERSHIP
:
1218 if (optlen
< sizeof (struct target_ip_mreq
) ||
1219 optlen
> sizeof (struct target_ip_mreqn
))
1220 return -TARGET_EINVAL
;
1222 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1223 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1224 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1227 case IP_BLOCK_SOURCE
:
1228 case IP_UNBLOCK_SOURCE
:
1229 case IP_ADD_SOURCE_MEMBERSHIP
:
1230 case IP_DROP_SOURCE_MEMBERSHIP
:
1231 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1232 return -TARGET_EINVAL
;
1234 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1235 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1236 unlock_user (ip_mreq_source
, optval_addr
, 0);
1243 case TARGET_SOL_SOCKET
:
1245 /* Options with 'int' argument. */
1246 case TARGET_SO_DEBUG
:
1249 case TARGET_SO_REUSEADDR
:
1250 optname
= SO_REUSEADDR
;
1252 case TARGET_SO_TYPE
:
1255 case TARGET_SO_ERROR
:
1258 case TARGET_SO_DONTROUTE
:
1259 optname
= SO_DONTROUTE
;
1261 case TARGET_SO_BROADCAST
:
1262 optname
= SO_BROADCAST
;
1264 case TARGET_SO_SNDBUF
:
1265 optname
= SO_SNDBUF
;
1267 case TARGET_SO_RCVBUF
:
1268 optname
= SO_RCVBUF
;
1270 case TARGET_SO_KEEPALIVE
:
1271 optname
= SO_KEEPALIVE
;
1273 case TARGET_SO_OOBINLINE
:
1274 optname
= SO_OOBINLINE
;
1276 case TARGET_SO_NO_CHECK
:
1277 optname
= SO_NO_CHECK
;
1279 case TARGET_SO_PRIORITY
:
1280 optname
= SO_PRIORITY
;
1283 case TARGET_SO_BSDCOMPAT
:
1284 optname
= SO_BSDCOMPAT
;
1287 case TARGET_SO_PASSCRED
:
1288 optname
= SO_PASSCRED
;
1290 case TARGET_SO_TIMESTAMP
:
1291 optname
= SO_TIMESTAMP
;
1293 case TARGET_SO_RCVLOWAT
:
1294 optname
= SO_RCVLOWAT
;
1296 case TARGET_SO_RCVTIMEO
:
1297 optname
= SO_RCVTIMEO
;
1299 case TARGET_SO_SNDTIMEO
:
1300 optname
= SO_SNDTIMEO
;
1306 if (optlen
< sizeof(uint32_t))
1307 return -TARGET_EINVAL
;
1309 if (get_user_u32(val
, optval_addr
))
1310 return -TARGET_EFAULT
;
1311 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1315 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1316 ret
= -TARGET_ENOPROTOOPT
;
1321 /* do_getsockopt() Must return target values and target errnos. */
1322 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1323 abi_ulong optval_addr
, abi_ulong optlen
)
1330 case TARGET_SOL_SOCKET
:
1333 case TARGET_SO_LINGER
:
1334 case TARGET_SO_RCVTIMEO
:
1335 case TARGET_SO_SNDTIMEO
:
1336 case TARGET_SO_PEERCRED
:
1337 case TARGET_SO_PEERNAME
:
1338 /* These don't just return a single integer */
1345 /* TCP options all take an 'int' value. */
1347 if (get_user_u32(len
, optlen
))
1348 return -TARGET_EFAULT
;
1350 return -TARGET_EINVAL
;
1352 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1358 if (put_user_u32(val
, optval_addr
))
1359 return -TARGET_EFAULT
;
1361 if (put_user_u8(val
, optval_addr
))
1362 return -TARGET_EFAULT
;
1364 if (put_user_u32(len
, optlen
))
1365 return -TARGET_EFAULT
;
1372 case IP_ROUTER_ALERT
:
1376 case IP_MTU_DISCOVER
:
1382 case IP_MULTICAST_TTL
:
1383 case IP_MULTICAST_LOOP
:
1384 if (get_user_u32(len
, optlen
))
1385 return -TARGET_EFAULT
;
1387 return -TARGET_EINVAL
;
1389 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1392 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1394 if (put_user_u32(len
, optlen
)
1395 || put_user_u8(val
, optval_addr
))
1396 return -TARGET_EFAULT
;
1398 if (len
> sizeof(int))
1400 if (put_user_u32(len
, optlen
)
1401 || put_user_u32(val
, optval_addr
))
1402 return -TARGET_EFAULT
;
1406 ret
= -TARGET_ENOPROTOOPT
;
1412 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1414 ret
= -TARGET_EOPNOTSUPP
;
1421 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1422 * other lock functions have a return code of 0 for failure.
1424 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1425 int count
, int copy
)
1427 struct target_iovec
*target_vec
;
1431 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1433 return -TARGET_EFAULT
;
1434 for(i
= 0;i
< count
; i
++) {
1435 base
= tswapl(target_vec
[i
].iov_base
);
1436 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1437 if (vec
[i
].iov_len
!= 0) {
1438 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1439 /* Don't check lock_user return value. We must call writev even
1440 if a element has invalid base address. */
1442 /* zero length pointer is ignored */
1443 vec
[i
].iov_base
= NULL
;
1446 unlock_user (target_vec
, target_addr
, 0);
1450 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1451 int count
, int copy
)
1453 struct target_iovec
*target_vec
;
1457 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1459 return -TARGET_EFAULT
;
1460 for(i
= 0;i
< count
; i
++) {
1461 if (target_vec
[i
].iov_base
) {
1462 base
= tswapl(target_vec
[i
].iov_base
);
1463 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1466 unlock_user (target_vec
, target_addr
, 0);
1471 /* do_socket() Must return target values and target errnos. */
1472 static abi_long
do_socket(int domain
, int type
, int protocol
)
1474 #if defined(TARGET_MIPS)
1476 case TARGET_SOCK_DGRAM
:
1479 case TARGET_SOCK_STREAM
:
1482 case TARGET_SOCK_RAW
:
1485 case TARGET_SOCK_RDM
:
1488 case TARGET_SOCK_SEQPACKET
:
1489 type
= SOCK_SEQPACKET
;
1491 case TARGET_SOCK_PACKET
:
1496 if (domain
== PF_NETLINK
)
1497 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1498 return get_errno(socket(domain
, type
, protocol
));
1501 /* do_bind() Must return target values and target errnos. */
1502 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1509 return -TARGET_EINVAL
;
1511 addr
= alloca(addrlen
+1);
1513 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1517 return get_errno(bind(sockfd
, addr
, addrlen
));
1520 /* do_connect() Must return target values and target errnos. */
1521 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1528 return -TARGET_EINVAL
;
1530 addr
= alloca(addrlen
);
1532 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1536 return get_errno(connect(sockfd
, addr
, addrlen
));
1539 /* do_sendrecvmsg() Must return target values and target errnos. */
1540 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1541 int flags
, int send
)
1544 struct target_msghdr
*msgp
;
1548 abi_ulong target_vec
;
1551 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1555 return -TARGET_EFAULT
;
1556 if (msgp
->msg_name
) {
1557 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1558 msg
.msg_name
= alloca(msg
.msg_namelen
);
1559 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1562 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1566 msg
.msg_name
= NULL
;
1567 msg
.msg_namelen
= 0;
1569 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1570 msg
.msg_control
= alloca(msg
.msg_controllen
);
1571 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1573 count
= tswapl(msgp
->msg_iovlen
);
1574 vec
= alloca(count
* sizeof(struct iovec
));
1575 target_vec
= tswapl(msgp
->msg_iov
);
1576 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1577 msg
.msg_iovlen
= count
;
1581 ret
= target_to_host_cmsg(&msg
, msgp
);
1583 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1585 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1586 if (!is_error(ret
)) {
1588 ret
= host_to_target_cmsg(msgp
, &msg
);
1593 unlock_iovec(vec
, target_vec
, count
, !send
);
1594 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1598 /* do_accept() Must return target values and target errnos. */
1599 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1600 abi_ulong target_addrlen_addr
)
1606 if (target_addr
== 0)
1607 return get_errno(accept(fd
, NULL
, NULL
));
1609 /* linux returns EINVAL if addrlen pointer is invalid */
1610 if (get_user_u32(addrlen
, target_addrlen_addr
))
1611 return -TARGET_EINVAL
;
1614 return -TARGET_EINVAL
;
1616 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1617 return -TARGET_EINVAL
;
1619 addr
= alloca(addrlen
);
1621 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1622 if (!is_error(ret
)) {
1623 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1624 if (put_user_u32(addrlen
, target_addrlen_addr
))
1625 ret
= -TARGET_EFAULT
;
1630 /* do_getpeername() Must return target values and target errnos. */
1631 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1632 abi_ulong target_addrlen_addr
)
1638 if (get_user_u32(addrlen
, target_addrlen_addr
))
1639 return -TARGET_EFAULT
;
1642 return -TARGET_EINVAL
;
1644 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1645 return -TARGET_EFAULT
;
1647 addr
= alloca(addrlen
);
1649 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1650 if (!is_error(ret
)) {
1651 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1652 if (put_user_u32(addrlen
, target_addrlen_addr
))
1653 ret
= -TARGET_EFAULT
;
1658 /* do_getsockname() Must return target values and target errnos. */
1659 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1660 abi_ulong target_addrlen_addr
)
1666 if (get_user_u32(addrlen
, target_addrlen_addr
))
1667 return -TARGET_EFAULT
;
1670 return -TARGET_EINVAL
;
1672 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1673 return -TARGET_EFAULT
;
1675 addr
= alloca(addrlen
);
1677 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1678 if (!is_error(ret
)) {
1679 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1680 if (put_user_u32(addrlen
, target_addrlen_addr
))
1681 ret
= -TARGET_EFAULT
;
1686 /* do_socketpair() Must return target values and target errnos. */
1687 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1688 abi_ulong target_tab_addr
)
1693 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1694 if (!is_error(ret
)) {
1695 if (put_user_s32(tab
[0], target_tab_addr
)
1696 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1697 ret
= -TARGET_EFAULT
;
1702 /* do_sendto() Must return target values and target errnos. */
1703 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1704 abi_ulong target_addr
, socklen_t addrlen
)
1711 return -TARGET_EINVAL
;
1713 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1715 return -TARGET_EFAULT
;
1717 addr
= alloca(addrlen
);
1718 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1720 unlock_user(host_msg
, msg
, 0);
1723 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1725 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1727 unlock_user(host_msg
, msg
, 0);
1731 /* do_recvfrom() Must return target values and target errnos. */
1732 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1733 abi_ulong target_addr
,
1734 abi_ulong target_addrlen
)
1741 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1743 return -TARGET_EFAULT
;
1745 if (get_user_u32(addrlen
, target_addrlen
)) {
1746 ret
= -TARGET_EFAULT
;
1750 ret
= -TARGET_EINVAL
;
1753 addr
= alloca(addrlen
);
1754 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1756 addr
= NULL
; /* To keep compiler quiet. */
1757 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1759 if (!is_error(ret
)) {
1761 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1762 if (put_user_u32(addrlen
, target_addrlen
)) {
1763 ret
= -TARGET_EFAULT
;
1767 unlock_user(host_msg
, msg
, len
);
1770 unlock_user(host_msg
, msg
, 0);
1775 #ifdef TARGET_NR_socketcall
1776 /* do_socketcall() Must return target values and target errnos. */
1777 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1780 const int n
= sizeof(abi_ulong
);
1785 abi_ulong domain
, type
, protocol
;
1787 if (get_user_ual(domain
, vptr
)
1788 || get_user_ual(type
, vptr
+ n
)
1789 || get_user_ual(protocol
, vptr
+ 2 * n
))
1790 return -TARGET_EFAULT
;
1792 ret
= do_socket(domain
, type
, protocol
);
1798 abi_ulong target_addr
;
1801 if (get_user_ual(sockfd
, vptr
)
1802 || get_user_ual(target_addr
, vptr
+ n
)
1803 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1804 return -TARGET_EFAULT
;
1806 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1809 case SOCKOP_connect
:
1812 abi_ulong target_addr
;
1815 if (get_user_ual(sockfd
, vptr
)
1816 || get_user_ual(target_addr
, vptr
+ n
)
1817 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1818 return -TARGET_EFAULT
;
1820 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1825 abi_ulong sockfd
, backlog
;
1827 if (get_user_ual(sockfd
, vptr
)
1828 || get_user_ual(backlog
, vptr
+ n
))
1829 return -TARGET_EFAULT
;
1831 ret
= get_errno(listen(sockfd
, backlog
));
1837 abi_ulong target_addr
, target_addrlen
;
1839 if (get_user_ual(sockfd
, vptr
)
1840 || get_user_ual(target_addr
, vptr
+ n
)
1841 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1842 return -TARGET_EFAULT
;
1844 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1847 case SOCKOP_getsockname
:
1850 abi_ulong target_addr
, target_addrlen
;
1852 if (get_user_ual(sockfd
, vptr
)
1853 || get_user_ual(target_addr
, vptr
+ n
)
1854 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1855 return -TARGET_EFAULT
;
1857 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1860 case SOCKOP_getpeername
:
1863 abi_ulong target_addr
, target_addrlen
;
1865 if (get_user_ual(sockfd
, vptr
)
1866 || get_user_ual(target_addr
, vptr
+ n
)
1867 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1868 return -TARGET_EFAULT
;
1870 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1873 case SOCKOP_socketpair
:
1875 abi_ulong domain
, type
, protocol
;
1878 if (get_user_ual(domain
, vptr
)
1879 || get_user_ual(type
, vptr
+ n
)
1880 || get_user_ual(protocol
, vptr
+ 2 * n
)
1881 || get_user_ual(tab
, vptr
+ 3 * n
))
1882 return -TARGET_EFAULT
;
1884 ret
= do_socketpair(domain
, type
, protocol
, tab
);
1894 if (get_user_ual(sockfd
, vptr
)
1895 || get_user_ual(msg
, vptr
+ n
)
1896 || get_user_ual(len
, vptr
+ 2 * n
)
1897 || get_user_ual(flags
, vptr
+ 3 * n
))
1898 return -TARGET_EFAULT
;
1900 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
1910 if (get_user_ual(sockfd
, vptr
)
1911 || get_user_ual(msg
, vptr
+ n
)
1912 || get_user_ual(len
, vptr
+ 2 * n
)
1913 || get_user_ual(flags
, vptr
+ 3 * n
))
1914 return -TARGET_EFAULT
;
1916 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
1928 if (get_user_ual(sockfd
, vptr
)
1929 || get_user_ual(msg
, vptr
+ n
)
1930 || get_user_ual(len
, vptr
+ 2 * n
)
1931 || get_user_ual(flags
, vptr
+ 3 * n
)
1932 || get_user_ual(addr
, vptr
+ 4 * n
)
1933 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1934 return -TARGET_EFAULT
;
1936 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1939 case SOCKOP_recvfrom
:
1948 if (get_user_ual(sockfd
, vptr
)
1949 || get_user_ual(msg
, vptr
+ n
)
1950 || get_user_ual(len
, vptr
+ 2 * n
)
1951 || get_user_ual(flags
, vptr
+ 3 * n
)
1952 || get_user_ual(addr
, vptr
+ 4 * n
)
1953 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1954 return -TARGET_EFAULT
;
1956 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1959 case SOCKOP_shutdown
:
1961 abi_ulong sockfd
, how
;
1963 if (get_user_ual(sockfd
, vptr
)
1964 || get_user_ual(how
, vptr
+ n
))
1965 return -TARGET_EFAULT
;
1967 ret
= get_errno(shutdown(sockfd
, how
));
1970 case SOCKOP_sendmsg
:
1971 case SOCKOP_recvmsg
:
1974 abi_ulong target_msg
;
1977 if (get_user_ual(fd
, vptr
)
1978 || get_user_ual(target_msg
, vptr
+ n
)
1979 || get_user_ual(flags
, vptr
+ 2 * n
))
1980 return -TARGET_EFAULT
;
1982 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
1983 (num
== SOCKOP_sendmsg
));
1986 case SOCKOP_setsockopt
:
1994 if (get_user_ual(sockfd
, vptr
)
1995 || get_user_ual(level
, vptr
+ n
)
1996 || get_user_ual(optname
, vptr
+ 2 * n
)
1997 || get_user_ual(optval
, vptr
+ 3 * n
)
1998 || get_user_ual(optlen
, vptr
+ 4 * n
))
1999 return -TARGET_EFAULT
;
2001 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2004 case SOCKOP_getsockopt
:
2012 if (get_user_ual(sockfd
, vptr
)
2013 || get_user_ual(level
, vptr
+ n
)
2014 || get_user_ual(optname
, vptr
+ 2 * n
)
2015 || get_user_ual(optval
, vptr
+ 3 * n
)
2016 || get_user_ual(optlen
, vptr
+ 4 * n
))
2017 return -TARGET_EFAULT
;
2019 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2023 gemu_log("Unsupported socketcall: %d\n", num
);
2024 ret
= -TARGET_ENOSYS
;
2031 #define N_SHM_REGIONS 32
2033 static struct shm_region
{
2036 } shm_regions
[N_SHM_REGIONS
];
2038 struct target_ipc_perm
2045 unsigned short int mode
;
2046 unsigned short int __pad1
;
2047 unsigned short int __seq
;
2048 unsigned short int __pad2
;
2049 abi_ulong __unused1
;
2050 abi_ulong __unused2
;
2053 struct target_semid_ds
2055 struct target_ipc_perm sem_perm
;
2056 abi_ulong sem_otime
;
2057 abi_ulong __unused1
;
2058 abi_ulong sem_ctime
;
2059 abi_ulong __unused2
;
2060 abi_ulong sem_nsems
;
2061 abi_ulong __unused3
;
2062 abi_ulong __unused4
;
2065 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2066 abi_ulong target_addr
)
2068 struct target_ipc_perm
*target_ip
;
2069 struct target_semid_ds
*target_sd
;
2071 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2072 return -TARGET_EFAULT
;
2073 target_ip
= &(target_sd
->sem_perm
);
2074 host_ip
->__key
= tswapl(target_ip
->__key
);
2075 host_ip
->uid
= tswapl(target_ip
->uid
);
2076 host_ip
->gid
= tswapl(target_ip
->gid
);
2077 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2078 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2079 host_ip
->mode
= tswapl(target_ip
->mode
);
2080 unlock_user_struct(target_sd
, target_addr
, 0);
2084 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2085 struct ipc_perm
*host_ip
)
2087 struct target_ipc_perm
*target_ip
;
2088 struct target_semid_ds
*target_sd
;
2090 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2091 return -TARGET_EFAULT
;
2092 target_ip
= &(target_sd
->sem_perm
);
2093 target_ip
->__key
= tswapl(host_ip
->__key
);
2094 target_ip
->uid
= tswapl(host_ip
->uid
);
2095 target_ip
->gid
= tswapl(host_ip
->gid
);
2096 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2097 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2098 target_ip
->mode
= tswapl(host_ip
->mode
);
2099 unlock_user_struct(target_sd
, target_addr
, 1);
2103 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2104 abi_ulong target_addr
)
2106 struct target_semid_ds
*target_sd
;
2108 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2109 return -TARGET_EFAULT
;
2110 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2111 return -TARGET_EFAULT
;
2112 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2113 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2114 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2115 unlock_user_struct(target_sd
, target_addr
, 0);
2119 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2120 struct semid_ds
*host_sd
)
2122 struct target_semid_ds
*target_sd
;
2124 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2125 return -TARGET_EFAULT
;
2126 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2127 return -TARGET_EFAULT
;;
2128 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2129 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2130 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2131 unlock_user_struct(target_sd
, target_addr
, 1);
2135 struct target_seminfo
{
2148 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2149 struct seminfo
*host_seminfo
)
2151 struct target_seminfo
*target_seminfo
;
2152 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2153 return -TARGET_EFAULT
;
2154 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2155 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2156 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2157 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2158 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2159 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2160 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2161 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2162 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2163 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2164 unlock_user_struct(target_seminfo
, target_addr
, 1);
2170 struct semid_ds
*buf
;
2171 unsigned short *array
;
2172 struct seminfo
*__buf
;
2175 union target_semun
{
2182 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2183 abi_ulong target_addr
)
2186 unsigned short *array
;
2188 struct semid_ds semid_ds
;
2191 semun
.buf
= &semid_ds
;
2193 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2195 return get_errno(ret
);
2197 nsems
= semid_ds
.sem_nsems
;
2199 *host_array
= malloc(nsems
*sizeof(unsigned short));
2200 array
= lock_user(VERIFY_READ
, target_addr
,
2201 nsems
*sizeof(unsigned short), 1);
2203 return -TARGET_EFAULT
;
2205 for(i
=0; i
<nsems
; i
++) {
2206 __get_user((*host_array
)[i
], &array
[i
]);
2208 unlock_user(array
, target_addr
, 0);
2213 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2214 unsigned short **host_array
)
2217 unsigned short *array
;
2219 struct semid_ds semid_ds
;
2222 semun
.buf
= &semid_ds
;
2224 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2226 return get_errno(ret
);
2228 nsems
= semid_ds
.sem_nsems
;
2230 array
= lock_user(VERIFY_WRITE
, target_addr
,
2231 nsems
*sizeof(unsigned short), 0);
2233 return -TARGET_EFAULT
;
2235 for(i
=0; i
<nsems
; i
++) {
2236 __put_user((*host_array
)[i
], &array
[i
]);
2239 unlock_user(array
, target_addr
, 1);
2244 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2245 union target_semun target_su
)
2248 struct semid_ds dsarg
;
2249 unsigned short *array
= NULL
;
2250 struct seminfo seminfo
;
2251 abi_long ret
= -TARGET_EINVAL
;
2258 arg
.val
= tswapl(target_su
.val
);
2259 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2260 target_su
.val
= tswapl(arg
.val
);
2264 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2268 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2269 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2276 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2280 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2281 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2287 arg
.__buf
= &seminfo
;
2288 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2289 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2297 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2304 struct target_sembuf
{
2305 unsigned short sem_num
;
2310 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2311 abi_ulong target_addr
,
2314 struct target_sembuf
*target_sembuf
;
2317 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2318 nsops
*sizeof(struct target_sembuf
), 1);
2320 return -TARGET_EFAULT
;
2322 for(i
=0; i
<nsops
; i
++) {
2323 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2324 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2325 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2328 unlock_user(target_sembuf
, target_addr
, 0);
2333 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2335 struct sembuf sops
[nsops
];
2337 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2338 return -TARGET_EFAULT
;
2340 return semop(semid
, sops
, nsops
);
2343 struct target_msqid_ds
2345 struct target_ipc_perm msg_perm
;
2346 abi_ulong msg_stime
;
2347 #if TARGET_ABI_BITS == 32
2348 abi_ulong __unused1
;
2350 abi_ulong msg_rtime
;
2351 #if TARGET_ABI_BITS == 32
2352 abi_ulong __unused2
;
2354 abi_ulong msg_ctime
;
2355 #if TARGET_ABI_BITS == 32
2356 abi_ulong __unused3
;
2358 abi_ulong __msg_cbytes
;
2360 abi_ulong msg_qbytes
;
2361 abi_ulong msg_lspid
;
2362 abi_ulong msg_lrpid
;
2363 abi_ulong __unused4
;
2364 abi_ulong __unused5
;
2367 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2368 abi_ulong target_addr
)
2370 struct target_msqid_ds
*target_md
;
2372 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2373 return -TARGET_EFAULT
;
2374 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2375 return -TARGET_EFAULT
;
2376 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2377 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2378 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2379 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2380 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2381 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2382 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2383 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2384 unlock_user_struct(target_md
, target_addr
, 0);
2388 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2389 struct msqid_ds
*host_md
)
2391 struct target_msqid_ds
*target_md
;
2393 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2394 return -TARGET_EFAULT
;
2395 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2396 return -TARGET_EFAULT
;
2397 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2398 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2399 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2400 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2401 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2402 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2403 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2404 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2405 unlock_user_struct(target_md
, target_addr
, 1);
2409 struct target_msginfo
{
2417 unsigned short int msgseg
;
2420 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2421 struct msginfo
*host_msginfo
)
2423 struct target_msginfo
*target_msginfo
;
2424 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2425 return -TARGET_EFAULT
;
2426 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2427 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2428 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2429 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2430 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2431 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2432 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2433 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2434 unlock_user_struct(target_msginfo
, target_addr
, 1);
2438 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2440 struct msqid_ds dsarg
;
2441 struct msginfo msginfo
;
2442 abi_long ret
= -TARGET_EINVAL
;
2450 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2451 return -TARGET_EFAULT
;
2452 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2453 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2454 return -TARGET_EFAULT
;
2457 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2461 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2462 if (host_to_target_msginfo(ptr
, &msginfo
))
2463 return -TARGET_EFAULT
;
2470 struct target_msgbuf
{
2475 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2476 unsigned int msgsz
, int msgflg
)
2478 struct target_msgbuf
*target_mb
;
2479 struct msgbuf
*host_mb
;
2482 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2483 return -TARGET_EFAULT
;
2484 host_mb
= malloc(msgsz
+sizeof(long));
2485 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2486 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2487 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2489 unlock_user_struct(target_mb
, msgp
, 0);
2494 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2495 unsigned int msgsz
, abi_long msgtyp
,
2498 struct target_msgbuf
*target_mb
;
2500 struct msgbuf
*host_mb
;
2503 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2504 return -TARGET_EFAULT
;
2506 host_mb
= malloc(msgsz
+sizeof(long));
2507 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2510 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2511 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2512 if (!target_mtext
) {
2513 ret
= -TARGET_EFAULT
;
2516 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2517 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2520 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2525 unlock_user_struct(target_mb
, msgp
, 1);
2529 struct target_shmid_ds
2531 struct target_ipc_perm shm_perm
;
2532 abi_ulong shm_segsz
;
2533 abi_ulong shm_atime
;
2534 #if TARGET_ABI_BITS == 32
2535 abi_ulong __unused1
;
2537 abi_ulong shm_dtime
;
2538 #if TARGET_ABI_BITS == 32
2539 abi_ulong __unused2
;
2541 abi_ulong shm_ctime
;
2542 #if TARGET_ABI_BITS == 32
2543 abi_ulong __unused3
;
2547 abi_ulong shm_nattch
;
2548 unsigned long int __unused4
;
2549 unsigned long int __unused5
;
2552 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2553 abi_ulong target_addr
)
2555 struct target_shmid_ds
*target_sd
;
2557 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2558 return -TARGET_EFAULT
;
2559 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2560 return -TARGET_EFAULT
;
2561 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2562 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2563 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2564 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2565 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2566 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2567 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2568 unlock_user_struct(target_sd
, target_addr
, 0);
2572 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2573 struct shmid_ds
*host_sd
)
2575 struct target_shmid_ds
*target_sd
;
2577 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2578 return -TARGET_EFAULT
;
2579 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2580 return -TARGET_EFAULT
;
2581 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2582 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2583 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2584 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2585 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2586 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2587 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2588 unlock_user_struct(target_sd
, target_addr
, 1);
2592 struct target_shminfo
{
2600 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2601 struct shminfo
*host_shminfo
)
2603 struct target_shminfo
*target_shminfo
;
2604 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2605 return -TARGET_EFAULT
;
2606 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2607 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2608 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2609 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2610 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2611 unlock_user_struct(target_shminfo
, target_addr
, 1);
2615 struct target_shm_info
{
2620 abi_ulong swap_attempts
;
2621 abi_ulong swap_successes
;
2624 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2625 struct shm_info
*host_shm_info
)
2627 struct target_shm_info
*target_shm_info
;
2628 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2629 return -TARGET_EFAULT
;
2630 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2631 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2632 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2633 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2634 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2635 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2636 unlock_user_struct(target_shm_info
, target_addr
, 1);
2640 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2642 struct shmid_ds dsarg
;
2643 struct shminfo shminfo
;
2644 struct shm_info shm_info
;
2645 abi_long ret
= -TARGET_EINVAL
;
2653 if (target_to_host_shmid_ds(&dsarg
, buf
))
2654 return -TARGET_EFAULT
;
2655 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2656 if (host_to_target_shmid_ds(buf
, &dsarg
))
2657 return -TARGET_EFAULT
;
2660 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2661 if (host_to_target_shminfo(buf
, &shminfo
))
2662 return -TARGET_EFAULT
;
2665 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2666 if (host_to_target_shm_info(buf
, &shm_info
))
2667 return -TARGET_EFAULT
;
2672 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2679 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2683 struct shmid_ds shm_info
;
2686 /* find out the length of the shared memory segment */
2687 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2688 if (is_error(ret
)) {
2689 /* can't get length, bail out */
2696 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2698 abi_ulong mmap_start
;
2700 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2702 if (mmap_start
== -1) {
2704 host_raddr
= (void *)-1;
2706 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2709 if (host_raddr
== (void *)-1) {
2711 return get_errno((long)host_raddr
);
2713 raddr
=h2g((unsigned long)host_raddr
);
2715 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2716 PAGE_VALID
| PAGE_READ
|
2717 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2719 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2720 if (shm_regions
[i
].start
== 0) {
2721 shm_regions
[i
].start
= raddr
;
2722 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2732 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2736 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2737 if (shm_regions
[i
].start
== shmaddr
) {
2738 shm_regions
[i
].start
= 0;
2739 page_set_flags(shmaddr
, shm_regions
[i
].size
, 0);
2744 return get_errno(shmdt(g2h(shmaddr
)));
2747 #ifdef TARGET_NR_ipc
2748 /* ??? This only works with linear mappings. */
2749 /* do_ipc() must return target values and target errnos. */
2750 static abi_long
do_ipc(unsigned int call
, int first
,
2751 int second
, int third
,
2752 abi_long ptr
, abi_long fifth
)
2757 version
= call
>> 16;
2762 ret
= do_semop(first
, ptr
, second
);
2766 ret
= get_errno(semget(first
, second
, third
));
2770 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2774 ret
= get_errno(msgget(first
, second
));
2778 ret
= do_msgsnd(first
, ptr
, second
, third
);
2782 ret
= do_msgctl(first
, second
, ptr
);
2789 struct target_ipc_kludge
{
2794 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2795 ret
= -TARGET_EFAULT
;
2799 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2801 unlock_user_struct(tmp
, ptr
, 0);
2805 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2814 raddr
= do_shmat(first
, ptr
, second
);
2815 if (is_error(raddr
))
2816 return get_errno(raddr
);
2817 if (put_user_ual(raddr
, third
))
2818 return -TARGET_EFAULT
;
2822 ret
= -TARGET_EINVAL
;
2827 ret
= do_shmdt(ptr
);
2831 /* IPC_* flag values are the same on all linux platforms */
2832 ret
= get_errno(shmget(first
, second
, third
));
2835 /* IPC_* and SHM_* command values are the same on all linux platforms */
2837 ret
= do_shmctl(first
, second
, third
);
2840 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2841 ret
= -TARGET_ENOSYS
;
2848 /* kernel structure types definitions */
2851 #define STRUCT(name, ...) STRUCT_ ## name,
2852 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2854 #include "syscall_types.h"
2857 #undef STRUCT_SPECIAL
2859 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2860 #define STRUCT_SPECIAL(name)
2861 #include "syscall_types.h"
2863 #undef STRUCT_SPECIAL
2865 typedef struct IOCTLEntry
{
2866 unsigned int target_cmd
;
2867 unsigned int host_cmd
;
2870 const argtype arg_type
[5];
2873 #define IOC_R 0x0001
2874 #define IOC_W 0x0002
2875 #define IOC_RW (IOC_R | IOC_W)
2877 #define MAX_STRUCT_SIZE 4096
2879 static IOCTLEntry ioctl_entries
[] = {
2880 #define IOCTL(cmd, access, ...) \
2881 { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
2886 /* ??? Implement proper locking for ioctls. */
2887 /* do_ioctl() Must return target values and target errnos. */
2888 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
2890 const IOCTLEntry
*ie
;
2891 const argtype
*arg_type
;
2893 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
2899 if (ie
->target_cmd
== 0) {
2900 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
2901 return -TARGET_ENOSYS
;
2903 if (ie
->target_cmd
== cmd
)
2907 arg_type
= ie
->arg_type
;
2909 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
2911 switch(arg_type
[0]) {
2914 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
2919 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
2923 target_size
= thunk_type_size(arg_type
, 0);
2924 switch(ie
->access
) {
2926 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2927 if (!is_error(ret
)) {
2928 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2930 return -TARGET_EFAULT
;
2931 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2932 unlock_user(argptr
, arg
, target_size
);
2936 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2938 return -TARGET_EFAULT
;
2939 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2940 unlock_user(argptr
, arg
, 0);
2941 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2945 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2947 return -TARGET_EFAULT
;
2948 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2949 unlock_user(argptr
, arg
, 0);
2950 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2951 if (!is_error(ret
)) {
2952 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2954 return -TARGET_EFAULT
;
2955 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2956 unlock_user(argptr
, arg
, target_size
);
2962 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2963 (long)cmd
, arg_type
[0]);
2964 ret
= -TARGET_ENOSYS
;
2970 static const bitmask_transtbl iflag_tbl
[] = {
2971 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
2972 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
2973 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
2974 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
2975 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
2976 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
2977 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
2978 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
2979 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
2980 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
2981 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
2982 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
2983 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
2984 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
2988 static const bitmask_transtbl oflag_tbl
[] = {
2989 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
2990 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
2991 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
2992 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
2993 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
2994 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
2995 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
2996 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
2997 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
2998 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
2999 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3000 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3001 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3002 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3003 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3004 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3005 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3006 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3007 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3008 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3009 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3010 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3011 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3012 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3016 static const bitmask_transtbl cflag_tbl
[] = {
3017 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3018 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3019 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3020 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3021 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3022 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3023 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3024 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3025 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3026 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3027 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3028 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3029 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3030 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3031 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3032 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3033 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3034 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3035 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3036 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3037 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3038 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3039 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3040 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3041 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3042 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3043 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3044 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3045 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3046 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3047 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3051 static const bitmask_transtbl lflag_tbl
[] = {
3052 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3053 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3054 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3055 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3056 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3057 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3058 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3059 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3060 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3061 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3062 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3063 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3064 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3065 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3066 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3070 static void target_to_host_termios (void *dst
, const void *src
)
3072 struct host_termios
*host
= dst
;
3073 const struct target_termios
*target
= src
;
3076 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3078 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3080 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3082 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3083 host
->c_line
= target
->c_line
;
3085 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3086 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3087 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3088 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3089 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3090 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3091 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3092 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3093 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3094 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3095 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3096 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3097 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3098 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3099 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3100 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3101 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3102 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3105 static void host_to_target_termios (void *dst
, const void *src
)
3107 struct target_termios
*target
= dst
;
3108 const struct host_termios
*host
= src
;
3111 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3113 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3115 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3117 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3118 target
->c_line
= host
->c_line
;
3120 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3121 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3122 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3123 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3124 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3125 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3126 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3127 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3128 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3129 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3130 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3131 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3132 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3133 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3134 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3135 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3136 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3137 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3140 static const StructEntry struct_termios_def
= {
3141 .convert
= { host_to_target_termios
, target_to_host_termios
},
3142 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3143 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3146 static bitmask_transtbl mmap_flags_tbl
[] = {
3147 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3148 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3149 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3150 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3151 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3152 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3153 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3154 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3158 #if defined(TARGET_I386)
3160 /* NOTE: there is really one LDT for all the threads */
3161 static uint8_t *ldt_table
;
3163 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3170 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3171 if (size
> bytecount
)
3173 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3175 return -TARGET_EFAULT
;
3176 /* ??? Should this by byteswapped? */
3177 memcpy(p
, ldt_table
, size
);
3178 unlock_user(p
, ptr
, size
);
3182 /* XXX: add locking support */
3183 static abi_long
write_ldt(CPUX86State
*env
,
3184 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3186 struct target_modify_ldt_ldt_s ldt_info
;
3187 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3188 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3189 int seg_not_present
, useable
, lm
;
3190 uint32_t *lp
, entry_1
, entry_2
;
3192 if (bytecount
!= sizeof(ldt_info
))
3193 return -TARGET_EINVAL
;
3194 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3195 return -TARGET_EFAULT
;
3196 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3197 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3198 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3199 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3200 unlock_user_struct(target_ldt_info
, ptr
, 0);
3202 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3203 return -TARGET_EINVAL
;
3204 seg_32bit
= ldt_info
.flags
& 1;
3205 contents
= (ldt_info
.flags
>> 1) & 3;
3206 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3207 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3208 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3209 useable
= (ldt_info
.flags
>> 6) & 1;
3213 lm
= (ldt_info
.flags
>> 7) & 1;
3215 if (contents
== 3) {
3217 return -TARGET_EINVAL
;
3218 if (seg_not_present
== 0)
3219 return -TARGET_EINVAL
;
3221 /* allocate the LDT */
3223 env
->ldt
.base
= target_mmap(0,
3224 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3225 PROT_READ
|PROT_WRITE
,
3226 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3227 if (env
->ldt
.base
== -1)
3228 return -TARGET_ENOMEM
;
3229 memset(g2h(env
->ldt
.base
), 0,
3230 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3231 env
->ldt
.limit
= 0xffff;
3232 ldt_table
= g2h(env
->ldt
.base
);
3235 /* NOTE: same code as Linux kernel */
3236 /* Allow LDTs to be cleared by the user. */
3237 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3240 read_exec_only
== 1 &&
3242 limit_in_pages
== 0 &&
3243 seg_not_present
== 1 &&
3251 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3252 (ldt_info
.limit
& 0x0ffff);
3253 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3254 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3255 (ldt_info
.limit
& 0xf0000) |
3256 ((read_exec_only
^ 1) << 9) |
3258 ((seg_not_present
^ 1) << 15) |
3260 (limit_in_pages
<< 23) |
3264 entry_2
|= (useable
<< 20);
3266 /* Install the new entry ... */
3268 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3269 lp
[0] = tswap32(entry_1
);
3270 lp
[1] = tswap32(entry_2
);
3274 /* specific and weird i386 syscalls */
3275 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3276 unsigned long bytecount
)
3282 ret
= read_ldt(ptr
, bytecount
);
3285 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3288 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3291 ret
= -TARGET_ENOSYS
;
3297 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3298 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3300 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3301 struct target_modify_ldt_ldt_s ldt_info
;
3302 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3303 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3304 int seg_not_present
, useable
, lm
;
3305 uint32_t *lp
, entry_1
, entry_2
;
3308 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3309 if (!target_ldt_info
)
3310 return -TARGET_EFAULT
;
3311 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3312 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3313 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3314 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3315 if (ldt_info
.entry_number
== -1) {
3316 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3317 if (gdt_table
[i
] == 0) {
3318 ldt_info
.entry_number
= i
;
3319 target_ldt_info
->entry_number
= tswap32(i
);
3324 unlock_user_struct(target_ldt_info
, ptr
, 1);
3326 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3327 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3328 return -TARGET_EINVAL
;
3329 seg_32bit
= ldt_info
.flags
& 1;
3330 contents
= (ldt_info
.flags
>> 1) & 3;
3331 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3332 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3333 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3334 useable
= (ldt_info
.flags
>> 6) & 1;
3338 lm
= (ldt_info
.flags
>> 7) & 1;
3341 if (contents
== 3) {
3342 if (seg_not_present
== 0)
3343 return -TARGET_EINVAL
;
3346 /* NOTE: same code as Linux kernel */
3347 /* Allow LDTs to be cleared by the user. */
3348 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3349 if ((contents
== 0 &&
3350 read_exec_only
== 1 &&
3352 limit_in_pages
== 0 &&
3353 seg_not_present
== 1 &&
3361 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3362 (ldt_info
.limit
& 0x0ffff);
3363 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3364 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3365 (ldt_info
.limit
& 0xf0000) |
3366 ((read_exec_only
^ 1) << 9) |
3368 ((seg_not_present
^ 1) << 15) |
3370 (limit_in_pages
<< 23) |
3375 /* Install the new entry ... */
3377 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3378 lp
[0] = tswap32(entry_1
);
3379 lp
[1] = tswap32(entry_2
);
3383 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3385 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3386 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3387 uint32_t base_addr
, limit
, flags
;
3388 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3389 int seg_not_present
, useable
, lm
;
3390 uint32_t *lp
, entry_1
, entry_2
;
3392 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3393 if (!target_ldt_info
)
3394 return -TARGET_EFAULT
;
3395 idx
= tswap32(target_ldt_info
->entry_number
);
3396 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3397 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3398 unlock_user_struct(target_ldt_info
, ptr
, 1);
3399 return -TARGET_EINVAL
;
3401 lp
= (uint32_t *)(gdt_table
+ idx
);
3402 entry_1
= tswap32(lp
[0]);
3403 entry_2
= tswap32(lp
[1]);
3405 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3406 contents
= (entry_2
>> 10) & 3;
3407 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3408 seg_32bit
= (entry_2
>> 22) & 1;
3409 limit_in_pages
= (entry_2
>> 23) & 1;
3410 useable
= (entry_2
>> 20) & 1;
3414 lm
= (entry_2
>> 21) & 1;
3416 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3417 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3418 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3419 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3420 base_addr
= (entry_1
>> 16) |
3421 (entry_2
& 0xff000000) |
3422 ((entry_2
& 0xff) << 16);
3423 target_ldt_info
->base_addr
= tswapl(base_addr
);
3424 target_ldt_info
->limit
= tswap32(limit
);
3425 target_ldt_info
->flags
= tswap32(flags
);
3426 unlock_user_struct(target_ldt_info
, ptr
, 1);
3429 #endif /* TARGET_I386 && TARGET_ABI32 */
3431 #ifndef TARGET_ABI32
3432 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3439 case TARGET_ARCH_SET_GS
:
3440 case TARGET_ARCH_SET_FS
:
3441 if (code
== TARGET_ARCH_SET_GS
)
3445 cpu_x86_load_seg(env
, idx
, 0);
3446 env
->segs
[idx
].base
= addr
;
3448 case TARGET_ARCH_GET_GS
:
3449 case TARGET_ARCH_GET_FS
:
3450 if (code
== TARGET_ARCH_GET_GS
)
3454 val
= env
->segs
[idx
].base
;
3455 if (put_user(val
, addr
, abi_ulong
))
3456 return -TARGET_EFAULT
;
3459 ret
= -TARGET_EINVAL
;
3466 #endif /* defined(TARGET_I386) */
3468 #if defined(CONFIG_USE_NPTL)
3470 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3472 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3475 pthread_mutex_t mutex
;
3476 pthread_cond_t cond
;
3479 abi_ulong child_tidptr
;
3480 abi_ulong parent_tidptr
;
3484 static void *clone_func(void *arg
)
3486 new_thread_info
*info
= arg
;
3492 ts
= (TaskState
*)thread_env
->opaque
;
3493 info
->tid
= gettid();
3494 env
->host_tid
= info
->tid
;
3496 if (info
->child_tidptr
)
3497 put_user_u32(info
->tid
, info
->child_tidptr
);
3498 if (info
->parent_tidptr
)
3499 put_user_u32(info
->tid
, info
->parent_tidptr
);
3500 /* Enable signals. */
3501 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3502 /* Signal to the parent that we're ready. */
3503 pthread_mutex_lock(&info
->mutex
);
3504 pthread_cond_broadcast(&info
->cond
);
3505 pthread_mutex_unlock(&info
->mutex
);
3506 /* Wait until the parent has finshed initializing the tls state. */
3507 pthread_mutex_lock(&clone_lock
);
3508 pthread_mutex_unlock(&clone_lock
);
3514 /* this stack is the equivalent of the kernel stack associated with a
3516 #define NEW_STACK_SIZE 8192
3518 static int clone_func(void *arg
)
3520 CPUState
*env
= arg
;
3527 /* do_fork() Must return host values and target errnos (unlike most
3528 do_*() functions). */
3529 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3530 abi_ulong parent_tidptr
, target_ulong newtls
,
3531 abi_ulong child_tidptr
)
3537 #if defined(CONFIG_USE_NPTL)
3538 unsigned int nptl_flags
;
3542 /* Emulate vfork() with fork() */
3543 if (flags
& CLONE_VFORK
)
3544 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3546 if (flags
& CLONE_VM
) {
3547 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3548 #if defined(CONFIG_USE_NPTL)
3549 new_thread_info info
;
3550 pthread_attr_t attr
;
3552 ts
= qemu_mallocz(sizeof(TaskState
) + NEW_STACK_SIZE
);
3553 init_task_state(ts
);
3554 new_stack
= ts
->stack
;
3555 /* we create a new CPU instance. */
3556 new_env
= cpu_copy(env
);
3557 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3560 /* Init regs that differ from the parent. */
3561 cpu_clone_regs(new_env
, newsp
);
3562 new_env
->opaque
= ts
;
3563 ts
->bprm
= parent_ts
->bprm
;
3564 ts
->info
= parent_ts
->info
;
3565 #if defined(CONFIG_USE_NPTL)
3567 flags
&= ~CLONE_NPTL_FLAGS2
;
3569 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3570 ts
->child_tidptr
= child_tidptr
;
3573 if (nptl_flags
& CLONE_SETTLS
)
3574 cpu_set_tls (new_env
, newtls
);
3576 /* Grab a mutex so that thread setup appears atomic. */
3577 pthread_mutex_lock(&clone_lock
);
3579 memset(&info
, 0, sizeof(info
));
3580 pthread_mutex_init(&info
.mutex
, NULL
);
3581 pthread_mutex_lock(&info
.mutex
);
3582 pthread_cond_init(&info
.cond
, NULL
);
3584 if (nptl_flags
& CLONE_CHILD_SETTID
)
3585 info
.child_tidptr
= child_tidptr
;
3586 if (nptl_flags
& CLONE_PARENT_SETTID
)
3587 info
.parent_tidptr
= parent_tidptr
;
3589 ret
= pthread_attr_init(&attr
);
3590 ret
= pthread_attr_setstack(&attr
, new_stack
, NEW_STACK_SIZE
);
3591 /* It is not safe to deliver signals until the child has finished
3592 initializing, so temporarily block all signals. */
3593 sigfillset(&sigmask
);
3594 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3596 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3597 /* TODO: Free new CPU state if thread creation failed. */
3599 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3600 pthread_attr_destroy(&attr
);
3602 /* Wait for the child to initialize. */
3603 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3605 if (flags
& CLONE_PARENT_SETTID
)
3606 put_user_u32(ret
, parent_tidptr
);
3610 pthread_mutex_unlock(&info
.mutex
);
3611 pthread_cond_destroy(&info
.cond
);
3612 pthread_mutex_destroy(&info
.mutex
);
3613 pthread_mutex_unlock(&clone_lock
);
3615 if (flags
& CLONE_NPTL_FLAGS2
)
3617 /* This is probably going to die very quickly, but do it anyway. */
3619 ret
= __clone2(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3621 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3625 /* if no CLONE_VM, we consider it is a fork */
3626 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3631 /* Child Process. */
3632 cpu_clone_regs(env
, newsp
);
3634 #if defined(CONFIG_USE_NPTL)
3635 /* There is a race condition here. The parent process could
3636 theoretically read the TID in the child process before the child
3637 tid is set. This would require using either ptrace
3638 (not implemented) or having *_tidptr to point at a shared memory
3639 mapping. We can't repeat the spinlock hack used above because
3640 the child process gets its own copy of the lock. */
3641 if (flags
& CLONE_CHILD_SETTID
)
3642 put_user_u32(gettid(), child_tidptr
);
3643 if (flags
& CLONE_PARENT_SETTID
)
3644 put_user_u32(gettid(), parent_tidptr
);
3645 ts
= (TaskState
*)env
->opaque
;
3646 if (flags
& CLONE_SETTLS
)
3647 cpu_set_tls (env
, newtls
);
3648 if (flags
& CLONE_CHILD_CLEARTID
)
3649 ts
->child_tidptr
= child_tidptr
;
3658 /* warning : doesn't handle linux specific flags... */
3659 static int target_to_host_fcntl_cmd(int cmd
)
3662 case TARGET_F_DUPFD
:
3663 case TARGET_F_GETFD
:
3664 case TARGET_F_SETFD
:
3665 case TARGET_F_GETFL
:
3666 case TARGET_F_SETFL
:
3668 case TARGET_F_GETLK
:
3670 case TARGET_F_SETLK
:
3672 case TARGET_F_SETLKW
:
3674 case TARGET_F_GETOWN
:
3676 case TARGET_F_SETOWN
:
3678 case TARGET_F_GETSIG
:
3680 case TARGET_F_SETSIG
:
3682 #if TARGET_ABI_BITS == 32
3683 case TARGET_F_GETLK64
:
3685 case TARGET_F_SETLK64
:
3687 case TARGET_F_SETLKW64
:
3690 case TARGET_F_SETLEASE
:
3692 case TARGET_F_GETLEASE
:
3694 #ifdef F_DUPFD_CLOEXEC
3695 case TARGET_F_DUPFD_CLOEXEC
:
3696 return F_DUPFD_CLOEXEC
;
3698 case TARGET_F_NOTIFY
:
3701 return -TARGET_EINVAL
;
3703 return -TARGET_EINVAL
;
3706 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3709 struct target_flock
*target_fl
;
3710 struct flock64 fl64
;
3711 struct target_flock64
*target_fl64
;
3713 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
3715 if (host_cmd
== -TARGET_EINVAL
)
3719 case TARGET_F_GETLK
:
3720 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3721 return -TARGET_EFAULT
;
3722 fl
.l_type
= tswap16(target_fl
->l_type
);
3723 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3724 fl
.l_start
= tswapl(target_fl
->l_start
);
3725 fl
.l_len
= tswapl(target_fl
->l_len
);
3726 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3727 unlock_user_struct(target_fl
, arg
, 0);
3728 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3730 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3731 return -TARGET_EFAULT
;
3732 target_fl
->l_type
= tswap16(fl
.l_type
);
3733 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3734 target_fl
->l_start
= tswapl(fl
.l_start
);
3735 target_fl
->l_len
= tswapl(fl
.l_len
);
3736 target_fl
->l_pid
= tswap32(fl
.l_pid
);
3737 unlock_user_struct(target_fl
, arg
, 1);
3741 case TARGET_F_SETLK
:
3742 case TARGET_F_SETLKW
:
3743 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3744 return -TARGET_EFAULT
;
3745 fl
.l_type
= tswap16(target_fl
->l_type
);
3746 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3747 fl
.l_start
= tswapl(target_fl
->l_start
);
3748 fl
.l_len
= tswapl(target_fl
->l_len
);
3749 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3750 unlock_user_struct(target_fl
, arg
, 0);
3751 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3754 case TARGET_F_GETLK64
:
3755 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3756 return -TARGET_EFAULT
;
3757 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3758 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3759 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3760 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3761 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3762 unlock_user_struct(target_fl64
, arg
, 0);
3763 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3765 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3766 return -TARGET_EFAULT
;
3767 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3768 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3769 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3770 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3771 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
3772 unlock_user_struct(target_fl64
, arg
, 1);
3775 case TARGET_F_SETLK64
:
3776 case TARGET_F_SETLKW64
:
3777 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3778 return -TARGET_EFAULT
;
3779 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3780 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3781 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3782 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3783 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3784 unlock_user_struct(target_fl64
, arg
, 0);
3785 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3788 case TARGET_F_GETFL
:
3789 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3791 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
3795 case TARGET_F_SETFL
:
3796 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
3799 case TARGET_F_SETOWN
:
3800 case TARGET_F_GETOWN
:
3801 case TARGET_F_SETSIG
:
3802 case TARGET_F_GETSIG
:
3803 case TARGET_F_SETLEASE
:
3804 case TARGET_F_GETLEASE
:
3805 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3809 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3817 static inline int high2lowuid(int uid
)
3825 static inline int high2lowgid(int gid
)
3833 static inline int low2highuid(int uid
)
3835 if ((int16_t)uid
== -1)
3841 static inline int low2highgid(int gid
)
3843 if ((int16_t)gid
== -1)
3849 #endif /* USE_UID16 */
3851 void syscall_init(void)
3854 const argtype
*arg_type
;
3858 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3859 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3860 #include "syscall_types.h"
3862 #undef STRUCT_SPECIAL
3864 /* we patch the ioctl size if necessary. We rely on the fact that
3865 no ioctl has all the bits at '1' in the size field */
3867 while (ie
->target_cmd
!= 0) {
3868 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
3869 TARGET_IOC_SIZEMASK
) {
3870 arg_type
= ie
->arg_type
;
3871 if (arg_type
[0] != TYPE_PTR
) {
3872 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
3877 size
= thunk_type_size(arg_type
, 0);
3878 ie
->target_cmd
= (ie
->target_cmd
&
3879 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
3880 (size
<< TARGET_IOC_SIZESHIFT
);
3883 /* Build target_to_host_errno_table[] table from
3884 * host_to_target_errno_table[]. */
3885 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
3886 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
3888 /* automatic consistency check if same arch */
3889 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3890 (defined(__x86_64__) && defined(TARGET_X86_64))
3891 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
3892 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3893 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
3900 #if TARGET_ABI_BITS == 32
3901 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
3903 #ifdef TARGET_WORDS_BIGENDIAN
3904 return ((uint64_t)word0
<< 32) | word1
;
3906 return ((uint64_t)word1
<< 32) | word0
;
3909 #else /* TARGET_ABI_BITS == 32 */
3910 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
3914 #endif /* TARGET_ABI_BITS != 32 */
3916 #ifdef TARGET_NR_truncate64
3917 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
3923 if (((CPUARMState
*)cpu_env
)->eabi
)
3929 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
3933 #ifdef TARGET_NR_ftruncate64
3934 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
3940 if (((CPUARMState
*)cpu_env
)->eabi
)
3946 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
3950 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
3951 abi_ulong target_addr
)
3953 struct target_timespec
*target_ts
;
3955 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
3956 return -TARGET_EFAULT
;
3957 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
3958 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
3959 unlock_user_struct(target_ts
, target_addr
, 0);
3963 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
3964 struct timespec
*host_ts
)
3966 struct target_timespec
*target_ts
;
3968 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
3969 return -TARGET_EFAULT
;
3970 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
3971 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
3972 unlock_user_struct(target_ts
, target_addr
, 1);
3976 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3977 static inline abi_long
host_to_target_stat64(void *cpu_env
,
3978 abi_ulong target_addr
,
3979 struct stat
*host_st
)
3982 if (((CPUARMState
*)cpu_env
)->eabi
) {
3983 struct target_eabi_stat64
*target_st
;
3985 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3986 return -TARGET_EFAULT
;
3987 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
3988 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3989 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3990 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3991 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3993 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3994 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3995 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3996 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
3997 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
3998 __put_user(host_st
->st_size
, &target_st
->st_size
);
3999 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4000 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4001 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4002 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4003 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4004 unlock_user_struct(target_st
, target_addr
, 1);
4008 #if (TARGET_LONG_BITS == 64) && (!defined(TARGET_ALPHA))
4009 struct target_stat
*target_st
;
4011 struct target_stat64
*target_st
;
4014 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4015 return -TARGET_EFAULT
;
4016 memset(target_st
, 0, sizeof(*target_st
));
4017 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4018 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4019 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4020 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4022 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4023 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4024 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4025 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4026 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4027 /* XXX: better use of kernel struct */
4028 __put_user(host_st
->st_size
, &target_st
->st_size
);
4029 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4030 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4031 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4032 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4033 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4034 unlock_user_struct(target_st
, target_addr
, 1);
4041 #if defined(CONFIG_USE_NPTL)
4042 /* ??? Using host futex calls even when target atomic operations
4043 are not really atomic probably breaks things. However implementing
4044 futexes locally would make futexes shared between multiple processes
4045 tricky. However they're probably useless because guest atomic
4046 operations won't work either. */
4047 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4048 target_ulong uaddr2
, int val3
)
4050 struct timespec ts
, *pts
;
4053 /* ??? We assume FUTEX_* constants are the same on both host
4055 #ifdef FUTEX_CMD_MASK
4056 base_op
= op
& FUTEX_CMD_MASK
;
4064 target_to_host_timespec(pts
, timeout
);
4068 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4071 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4073 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4075 case FUTEX_CMP_REQUEUE
:
4077 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4078 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4079 But the prototype takes a `struct timespec *'; insert casts
4080 to satisfy the compiler. We do not need to tswap TIMEOUT
4081 since it's not compared to guest memory. */
4082 pts
= (struct timespec
*)(uintptr_t) timeout
;
4083 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4085 (base_op
== FUTEX_CMP_REQUEUE
4089 return -TARGET_ENOSYS
;
4094 /* Map host to target signal numbers for the wait family of syscalls.
4095 Assume all other status bits are the same. */
4096 static int host_to_target_waitstatus(int status
)
4098 if (WIFSIGNALED(status
)) {
4099 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4101 if (WIFSTOPPED(status
)) {
4102 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4108 int get_osversion(void)
4110 static int osversion
;
4111 struct new_utsname buf
;
4116 if (qemu_uname_release
&& *qemu_uname_release
) {
4117 s
= qemu_uname_release
;
4119 if (sys_uname(&buf
))
4124 for (i
= 0; i
< 3; i
++) {
4126 while (*s
>= '0' && *s
<= '9') {
4131 tmp
= (tmp
<< 8) + n
;
4139 /* do_syscall() should always have a single exit point at the end so
4140 that actions, such as logging of syscall results, can be performed.
4141 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4142 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4143 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4144 abi_long arg5
, abi_long arg6
)
4152 gemu_log("syscall %d", num
);
4155 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4158 case TARGET_NR_exit
:
4159 #ifdef CONFIG_USE_NPTL
4160 /* In old applications this may be used to implement _exit(2).
4161 However in threaded applictions it is used for thread termination,
4162 and _exit_group is used for application termination.
4163 Do thread termination if we have more then one thread. */
4164 /* FIXME: This probably breaks if a signal arrives. We should probably
4165 be disabling signals. */
4166 if (first_cpu
->next_cpu
) {
4174 while (p
&& p
!= (CPUState
*)cpu_env
) {
4175 lastp
= &p
->next_cpu
;
4178 /* If we didn't find the CPU for this thread then something is
4182 /* Remove the CPU from the list. */
4183 *lastp
= p
->next_cpu
;
4185 ts
= ((CPUState
*)cpu_env
)->opaque
;
4186 if (ts
->child_tidptr
) {
4187 put_user_u32(0, ts
->child_tidptr
);
4188 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4191 /* TODO: Free CPU state. */
4198 gdb_exit(cpu_env
, arg1
);
4200 ret
= 0; /* avoid warning */
4202 case TARGET_NR_read
:
4206 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4208 ret
= get_errno(read(arg1
, p
, arg3
));
4209 unlock_user(p
, arg2
, ret
);
4212 case TARGET_NR_write
:
4213 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4215 ret
= get_errno(write(arg1
, p
, arg3
));
4216 unlock_user(p
, arg2
, 0);
4218 case TARGET_NR_open
:
4219 if (!(p
= lock_user_string(arg1
)))
4221 ret
= get_errno(open(path(p
),
4222 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4224 unlock_user(p
, arg1
, 0);
4226 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4227 case TARGET_NR_openat
:
4228 if (!(p
= lock_user_string(arg2
)))
4230 ret
= get_errno(sys_openat(arg1
,
4232 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4234 unlock_user(p
, arg2
, 0);
4237 case TARGET_NR_close
:
4238 ret
= get_errno(close(arg1
));
4243 case TARGET_NR_fork
:
4244 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4246 #ifdef TARGET_NR_waitpid
4247 case TARGET_NR_waitpid
:
4250 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4251 if (!is_error(ret
) && arg2
4252 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4257 #ifdef TARGET_NR_waitid
4258 case TARGET_NR_waitid
:
4262 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4263 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4264 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4266 host_to_target_siginfo(p
, &info
);
4267 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4272 #ifdef TARGET_NR_creat /* not on alpha */
4273 case TARGET_NR_creat
:
4274 if (!(p
= lock_user_string(arg1
)))
4276 ret
= get_errno(creat(p
, arg2
));
4277 unlock_user(p
, arg1
, 0);
4280 case TARGET_NR_link
:
4283 p
= lock_user_string(arg1
);
4284 p2
= lock_user_string(arg2
);
4286 ret
= -TARGET_EFAULT
;
4288 ret
= get_errno(link(p
, p2
));
4289 unlock_user(p2
, arg2
, 0);
4290 unlock_user(p
, arg1
, 0);
4293 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4294 case TARGET_NR_linkat
:
4299 p
= lock_user_string(arg2
);
4300 p2
= lock_user_string(arg4
);
4302 ret
= -TARGET_EFAULT
;
4304 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4305 unlock_user(p
, arg2
, 0);
4306 unlock_user(p2
, arg4
, 0);
4310 case TARGET_NR_unlink
:
4311 if (!(p
= lock_user_string(arg1
)))
4313 ret
= get_errno(unlink(p
));
4314 unlock_user(p
, arg1
, 0);
4316 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4317 case TARGET_NR_unlinkat
:
4318 if (!(p
= lock_user_string(arg2
)))
4320 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4321 unlock_user(p
, arg2
, 0);
4324 case TARGET_NR_execve
:
4326 char **argp
, **envp
;
4329 abi_ulong guest_argp
;
4330 abi_ulong guest_envp
;
4336 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4337 if (get_user_ual(addr
, gp
))
4345 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4346 if (get_user_ual(addr
, gp
))
4353 argp
= alloca((argc
+ 1) * sizeof(void *));
4354 envp
= alloca((envc
+ 1) * sizeof(void *));
4356 for (gp
= guest_argp
, q
= argp
; gp
;
4357 gp
+= sizeof(abi_ulong
), q
++) {
4358 if (get_user_ual(addr
, gp
))
4362 if (!(*q
= lock_user_string(addr
)))
4367 for (gp
= guest_envp
, q
= envp
; gp
;
4368 gp
+= sizeof(abi_ulong
), q
++) {
4369 if (get_user_ual(addr
, gp
))
4373 if (!(*q
= lock_user_string(addr
)))
4378 if (!(p
= lock_user_string(arg1
)))
4380 ret
= get_errno(execve(p
, argp
, envp
));
4381 unlock_user(p
, arg1
, 0);
4386 ret
= -TARGET_EFAULT
;
4389 for (gp
= guest_argp
, q
= argp
; *q
;
4390 gp
+= sizeof(abi_ulong
), q
++) {
4391 if (get_user_ual(addr
, gp
)
4394 unlock_user(*q
, addr
, 0);
4396 for (gp
= guest_envp
, q
= envp
; *q
;
4397 gp
+= sizeof(abi_ulong
), q
++) {
4398 if (get_user_ual(addr
, gp
)
4401 unlock_user(*q
, addr
, 0);
4405 case TARGET_NR_chdir
:
4406 if (!(p
= lock_user_string(arg1
)))
4408 ret
= get_errno(chdir(p
));
4409 unlock_user(p
, arg1
, 0);
4411 #ifdef TARGET_NR_time
4412 case TARGET_NR_time
:
4415 ret
= get_errno(time(&host_time
));
4418 && put_user_sal(host_time
, arg1
))
4423 case TARGET_NR_mknod
:
4424 if (!(p
= lock_user_string(arg1
)))
4426 ret
= get_errno(mknod(p
, arg2
, arg3
));
4427 unlock_user(p
, arg1
, 0);
4429 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4430 case TARGET_NR_mknodat
:
4431 if (!(p
= lock_user_string(arg2
)))
4433 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4434 unlock_user(p
, arg2
, 0);
4437 case TARGET_NR_chmod
:
4438 if (!(p
= lock_user_string(arg1
)))
4440 ret
= get_errno(chmod(p
, arg2
));
4441 unlock_user(p
, arg1
, 0);
4443 #ifdef TARGET_NR_break
4444 case TARGET_NR_break
:
4447 #ifdef TARGET_NR_oldstat
4448 case TARGET_NR_oldstat
:
4451 case TARGET_NR_lseek
:
4452 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4454 #ifdef TARGET_NR_getxpid
4455 case TARGET_NR_getxpid
:
4457 case TARGET_NR_getpid
:
4459 ret
= get_errno(getpid());
4461 case TARGET_NR_mount
:
4463 /* need to look at the data field */
4465 p
= lock_user_string(arg1
);
4466 p2
= lock_user_string(arg2
);
4467 p3
= lock_user_string(arg3
);
4468 if (!p
|| !p2
|| !p3
)
4469 ret
= -TARGET_EFAULT
;
4471 /* FIXME - arg5 should be locked, but it isn't clear how to
4472 * do that since it's not guaranteed to be a NULL-terminated
4476 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4478 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4480 unlock_user(p
, arg1
, 0);
4481 unlock_user(p2
, arg2
, 0);
4482 unlock_user(p3
, arg3
, 0);
4485 #ifdef TARGET_NR_umount
4486 case TARGET_NR_umount
:
4487 if (!(p
= lock_user_string(arg1
)))
4489 ret
= get_errno(umount(p
));
4490 unlock_user(p
, arg1
, 0);
4493 #ifdef TARGET_NR_stime /* not on alpha */
4494 case TARGET_NR_stime
:
4497 if (get_user_sal(host_time
, arg1
))
4499 ret
= get_errno(stime(&host_time
));
4503 case TARGET_NR_ptrace
:
4505 #ifdef TARGET_NR_alarm /* not on alpha */
4506 case TARGET_NR_alarm
:
4510 #ifdef TARGET_NR_oldfstat
4511 case TARGET_NR_oldfstat
:
4514 #ifdef TARGET_NR_pause /* not on alpha */
4515 case TARGET_NR_pause
:
4516 ret
= get_errno(pause());
4519 #ifdef TARGET_NR_utime
4520 case TARGET_NR_utime
:
4522 struct utimbuf tbuf
, *host_tbuf
;
4523 struct target_utimbuf
*target_tbuf
;
4525 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4527 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4528 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4529 unlock_user_struct(target_tbuf
, arg2
, 0);
4534 if (!(p
= lock_user_string(arg1
)))
4536 ret
= get_errno(utime(p
, host_tbuf
));
4537 unlock_user(p
, arg1
, 0);
4541 case TARGET_NR_utimes
:
4543 struct timeval
*tvp
, tv
[2];
4545 if (copy_from_user_timeval(&tv
[0], arg2
)
4546 || copy_from_user_timeval(&tv
[1],
4547 arg2
+ sizeof(struct target_timeval
)))
4553 if (!(p
= lock_user_string(arg1
)))
4555 ret
= get_errno(utimes(p
, tvp
));
4556 unlock_user(p
, arg1
, 0);
4559 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4560 case TARGET_NR_futimesat
:
4562 struct timeval
*tvp
, tv
[2];
4564 if (copy_from_user_timeval(&tv
[0], arg3
)
4565 || copy_from_user_timeval(&tv
[1],
4566 arg3
+ sizeof(struct target_timeval
)))
4572 if (!(p
= lock_user_string(arg2
)))
4574 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4575 unlock_user(p
, arg2
, 0);
4579 #ifdef TARGET_NR_stty
4580 case TARGET_NR_stty
:
4583 #ifdef TARGET_NR_gtty
4584 case TARGET_NR_gtty
:
4587 case TARGET_NR_access
:
4588 if (!(p
= lock_user_string(arg1
)))
4590 ret
= get_errno(access(path(p
), arg2
));
4591 unlock_user(p
, arg1
, 0);
4593 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4594 case TARGET_NR_faccessat
:
4595 if (!(p
= lock_user_string(arg2
)))
4597 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4598 unlock_user(p
, arg2
, 0);
4601 #ifdef TARGET_NR_nice /* not on alpha */
4602 case TARGET_NR_nice
:
4603 ret
= get_errno(nice(arg1
));
4606 #ifdef TARGET_NR_ftime
4607 case TARGET_NR_ftime
:
4610 case TARGET_NR_sync
:
4614 case TARGET_NR_kill
:
4615 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4617 case TARGET_NR_rename
:
4620 p
= lock_user_string(arg1
);
4621 p2
= lock_user_string(arg2
);
4623 ret
= -TARGET_EFAULT
;
4625 ret
= get_errno(rename(p
, p2
));
4626 unlock_user(p2
, arg2
, 0);
4627 unlock_user(p
, arg1
, 0);
4630 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4631 case TARGET_NR_renameat
:
4634 p
= lock_user_string(arg2
);
4635 p2
= lock_user_string(arg4
);
4637 ret
= -TARGET_EFAULT
;
4639 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4640 unlock_user(p2
, arg4
, 0);
4641 unlock_user(p
, arg2
, 0);
4645 case TARGET_NR_mkdir
:
4646 if (!(p
= lock_user_string(arg1
)))
4648 ret
= get_errno(mkdir(p
, arg2
));
4649 unlock_user(p
, arg1
, 0);
4651 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4652 case TARGET_NR_mkdirat
:
4653 if (!(p
= lock_user_string(arg2
)))
4655 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4656 unlock_user(p
, arg2
, 0);
4659 case TARGET_NR_rmdir
:
4660 if (!(p
= lock_user_string(arg1
)))
4662 ret
= get_errno(rmdir(p
));
4663 unlock_user(p
, arg1
, 0);
4666 ret
= get_errno(dup(arg1
));
4668 case TARGET_NR_pipe
:
4669 ret
= do_pipe(cpu_env
, arg1
, 0);
4671 #ifdef TARGET_NR_pipe2
4672 case TARGET_NR_pipe2
:
4673 ret
= do_pipe(cpu_env
, arg1
, arg2
);
4676 case TARGET_NR_times
:
4678 struct target_tms
*tmsp
;
4680 ret
= get_errno(times(&tms
));
4682 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4685 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4686 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4687 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4688 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4691 ret
= host_to_target_clock_t(ret
);
4694 #ifdef TARGET_NR_prof
4695 case TARGET_NR_prof
:
4698 #ifdef TARGET_NR_signal
4699 case TARGET_NR_signal
:
4702 case TARGET_NR_acct
:
4704 ret
= get_errno(acct(NULL
));
4706 if (!(p
= lock_user_string(arg1
)))
4708 ret
= get_errno(acct(path(p
)));
4709 unlock_user(p
, arg1
, 0);
4712 #ifdef TARGET_NR_umount2 /* not on alpha */
4713 case TARGET_NR_umount2
:
4714 if (!(p
= lock_user_string(arg1
)))
4716 ret
= get_errno(umount2(p
, arg2
));
4717 unlock_user(p
, arg1
, 0);
4720 #ifdef TARGET_NR_lock
4721 case TARGET_NR_lock
:
4724 case TARGET_NR_ioctl
:
4725 ret
= do_ioctl(arg1
, arg2
, arg3
);
4727 case TARGET_NR_fcntl
:
4728 ret
= do_fcntl(arg1
, arg2
, arg3
);
4730 #ifdef TARGET_NR_mpx
4734 case TARGET_NR_setpgid
:
4735 ret
= get_errno(setpgid(arg1
, arg2
));
4737 #ifdef TARGET_NR_ulimit
4738 case TARGET_NR_ulimit
:
4741 #ifdef TARGET_NR_oldolduname
4742 case TARGET_NR_oldolduname
:
4745 case TARGET_NR_umask
:
4746 ret
= get_errno(umask(arg1
));
4748 case TARGET_NR_chroot
:
4749 if (!(p
= lock_user_string(arg1
)))
4751 ret
= get_errno(chroot(p
));
4752 unlock_user(p
, arg1
, 0);
4754 case TARGET_NR_ustat
:
4756 case TARGET_NR_dup2
:
4757 ret
= get_errno(dup2(arg1
, arg2
));
4759 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
4760 case TARGET_NR_dup3
:
4761 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
4764 #ifdef TARGET_NR_getppid /* not on alpha */
4765 case TARGET_NR_getppid
:
4766 ret
= get_errno(getppid());
4769 case TARGET_NR_getpgrp
:
4770 ret
= get_errno(getpgrp());
4772 case TARGET_NR_setsid
:
4773 ret
= get_errno(setsid());
4775 #ifdef TARGET_NR_sigaction
4776 case TARGET_NR_sigaction
:
4778 #if !defined(TARGET_MIPS)
4779 struct target_old_sigaction
*old_act
;
4780 struct target_sigaction act
, oact
, *pact
;
4782 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4784 act
._sa_handler
= old_act
->_sa_handler
;
4785 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4786 act
.sa_flags
= old_act
->sa_flags
;
4787 act
.sa_restorer
= old_act
->sa_restorer
;
4788 unlock_user_struct(old_act
, arg2
, 0);
4793 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4794 if (!is_error(ret
) && arg3
) {
4795 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4797 old_act
->_sa_handler
= oact
._sa_handler
;
4798 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4799 old_act
->sa_flags
= oact
.sa_flags
;
4800 old_act
->sa_restorer
= oact
.sa_restorer
;
4801 unlock_user_struct(old_act
, arg3
, 1);
4804 struct target_sigaction act
, oact
, *pact
, *old_act
;
4807 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4809 act
._sa_handler
= old_act
->_sa_handler
;
4810 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
4811 act
.sa_flags
= old_act
->sa_flags
;
4812 unlock_user_struct(old_act
, arg2
, 0);
4818 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4820 if (!is_error(ret
) && arg3
) {
4821 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4823 old_act
->_sa_handler
= oact
._sa_handler
;
4824 old_act
->sa_flags
= oact
.sa_flags
;
4825 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
4826 old_act
->sa_mask
.sig
[1] = 0;
4827 old_act
->sa_mask
.sig
[2] = 0;
4828 old_act
->sa_mask
.sig
[3] = 0;
4829 unlock_user_struct(old_act
, arg3
, 1);
4835 case TARGET_NR_rt_sigaction
:
4837 struct target_sigaction
*act
;
4838 struct target_sigaction
*oact
;
4841 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
4846 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
4847 ret
= -TARGET_EFAULT
;
4848 goto rt_sigaction_fail
;
4852 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
4855 unlock_user_struct(act
, arg2
, 0);
4857 unlock_user_struct(oact
, arg3
, 1);
4860 #ifdef TARGET_NR_sgetmask /* not on alpha */
4861 case TARGET_NR_sgetmask
:
4864 abi_ulong target_set
;
4865 sigprocmask(0, NULL
, &cur_set
);
4866 host_to_target_old_sigset(&target_set
, &cur_set
);
4871 #ifdef TARGET_NR_ssetmask /* not on alpha */
4872 case TARGET_NR_ssetmask
:
4874 sigset_t set
, oset
, cur_set
;
4875 abi_ulong target_set
= arg1
;
4876 sigprocmask(0, NULL
, &cur_set
);
4877 target_to_host_old_sigset(&set
, &target_set
);
4878 sigorset(&set
, &set
, &cur_set
);
4879 sigprocmask(SIG_SETMASK
, &set
, &oset
);
4880 host_to_target_old_sigset(&target_set
, &oset
);
4885 #ifdef TARGET_NR_sigprocmask
4886 case TARGET_NR_sigprocmask
:
4889 sigset_t set
, oldset
, *set_ptr
;
4893 case TARGET_SIG_BLOCK
:
4896 case TARGET_SIG_UNBLOCK
:
4899 case TARGET_SIG_SETMASK
:
4903 ret
= -TARGET_EINVAL
;
4906 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4908 target_to_host_old_sigset(&set
, p
);
4909 unlock_user(p
, arg2
, 0);
4915 ret
= get_errno(sigprocmask(arg1
, set_ptr
, &oldset
));
4916 if (!is_error(ret
) && arg3
) {
4917 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4919 host_to_target_old_sigset(p
, &oldset
);
4920 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4925 case TARGET_NR_rt_sigprocmask
:
4928 sigset_t set
, oldset
, *set_ptr
;
4932 case TARGET_SIG_BLOCK
:
4935 case TARGET_SIG_UNBLOCK
:
4938 case TARGET_SIG_SETMASK
:
4942 ret
= -TARGET_EINVAL
;
4945 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4947 target_to_host_sigset(&set
, p
);
4948 unlock_user(p
, arg2
, 0);
4954 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
4955 if (!is_error(ret
) && arg3
) {
4956 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4958 host_to_target_sigset(p
, &oldset
);
4959 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4963 #ifdef TARGET_NR_sigpending
4964 case TARGET_NR_sigpending
:
4967 ret
= get_errno(sigpending(&set
));
4968 if (!is_error(ret
)) {
4969 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4971 host_to_target_old_sigset(p
, &set
);
4972 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4977 case TARGET_NR_rt_sigpending
:
4980 ret
= get_errno(sigpending(&set
));
4981 if (!is_error(ret
)) {
4982 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4984 host_to_target_sigset(p
, &set
);
4985 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4989 #ifdef TARGET_NR_sigsuspend
4990 case TARGET_NR_sigsuspend
:
4993 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4995 target_to_host_old_sigset(&set
, p
);
4996 unlock_user(p
, arg1
, 0);
4997 ret
= get_errno(sigsuspend(&set
));
5001 case TARGET_NR_rt_sigsuspend
:
5004 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5006 target_to_host_sigset(&set
, p
);
5007 unlock_user(p
, arg1
, 0);
5008 ret
= get_errno(sigsuspend(&set
));
5011 case TARGET_NR_rt_sigtimedwait
:
5014 struct timespec uts
, *puts
;
5017 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5019 target_to_host_sigset(&set
, p
);
5020 unlock_user(p
, arg1
, 0);
5023 target_to_host_timespec(puts
, arg3
);
5027 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5028 if (!is_error(ret
) && arg2
) {
5029 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5031 host_to_target_siginfo(p
, &uinfo
);
5032 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5036 case TARGET_NR_rt_sigqueueinfo
:
5039 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5041 target_to_host_siginfo(&uinfo
, p
);
5042 unlock_user(p
, arg1
, 0);
5043 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5046 #ifdef TARGET_NR_sigreturn
5047 case TARGET_NR_sigreturn
:
5048 /* NOTE: ret is eax, so not transcoding must be done */
5049 ret
= do_sigreturn(cpu_env
);
5052 case TARGET_NR_rt_sigreturn
:
5053 /* NOTE: ret is eax, so not transcoding must be done */
5054 ret
= do_rt_sigreturn(cpu_env
);
5056 case TARGET_NR_sethostname
:
5057 if (!(p
= lock_user_string(arg1
)))
5059 ret
= get_errno(sethostname(p
, arg2
));
5060 unlock_user(p
, arg1
, 0);
5062 case TARGET_NR_setrlimit
:
5064 /* XXX: convert resource ? */
5065 int resource
= arg1
;
5066 struct target_rlimit
*target_rlim
;
5068 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5070 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
5071 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
5072 unlock_user_struct(target_rlim
, arg2
, 0);
5073 ret
= get_errno(setrlimit(resource
, &rlim
));
5076 case TARGET_NR_getrlimit
:
5078 /* XXX: convert resource ? */
5079 int resource
= arg1
;
5080 struct target_rlimit
*target_rlim
;
5083 ret
= get_errno(getrlimit(resource
, &rlim
));
5084 if (!is_error(ret
)) {
5085 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5087 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
5088 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
5089 unlock_user_struct(target_rlim
, arg2
, 1);
5093 case TARGET_NR_getrusage
:
5095 struct rusage rusage
;
5096 ret
= get_errno(getrusage(arg1
, &rusage
));
5097 if (!is_error(ret
)) {
5098 host_to_target_rusage(arg2
, &rusage
);
5102 case TARGET_NR_gettimeofday
:
5105 ret
= get_errno(gettimeofday(&tv
, NULL
));
5106 if (!is_error(ret
)) {
5107 if (copy_to_user_timeval(arg1
, &tv
))
5112 case TARGET_NR_settimeofday
:
5115 if (copy_from_user_timeval(&tv
, arg1
))
5117 ret
= get_errno(settimeofday(&tv
, NULL
));
5120 #ifdef TARGET_NR_select
5121 case TARGET_NR_select
:
5123 struct target_sel_arg_struct
*sel
;
5124 abi_ulong inp
, outp
, exp
, tvp
;
5127 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5129 nsel
= tswapl(sel
->n
);
5130 inp
= tswapl(sel
->inp
);
5131 outp
= tswapl(sel
->outp
);
5132 exp
= tswapl(sel
->exp
);
5133 tvp
= tswapl(sel
->tvp
);
5134 unlock_user_struct(sel
, arg1
, 0);
5135 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5139 case TARGET_NR_symlink
:
5142 p
= lock_user_string(arg1
);
5143 p2
= lock_user_string(arg2
);
5145 ret
= -TARGET_EFAULT
;
5147 ret
= get_errno(symlink(p
, p2
));
5148 unlock_user(p2
, arg2
, 0);
5149 unlock_user(p
, arg1
, 0);
5152 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5153 case TARGET_NR_symlinkat
:
5156 p
= lock_user_string(arg1
);
5157 p2
= lock_user_string(arg3
);
5159 ret
= -TARGET_EFAULT
;
5161 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5162 unlock_user(p2
, arg3
, 0);
5163 unlock_user(p
, arg1
, 0);
5167 #ifdef TARGET_NR_oldlstat
5168 case TARGET_NR_oldlstat
:
5171 case TARGET_NR_readlink
:
5174 p
= lock_user_string(arg1
);
5175 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5177 ret
= -TARGET_EFAULT
;
5179 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5180 char real
[PATH_MAX
];
5181 temp
= realpath(exec_path
,real
);
5182 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5183 snprintf((char *)p2
, arg3
, "%s", real
);
5186 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5188 unlock_user(p2
, arg2
, ret
);
5189 unlock_user(p
, arg1
, 0);
5192 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5193 case TARGET_NR_readlinkat
:
5196 p
= lock_user_string(arg2
);
5197 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5199 ret
= -TARGET_EFAULT
;
5201 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5202 unlock_user(p2
, arg3
, ret
);
5203 unlock_user(p
, arg2
, 0);
5207 #ifdef TARGET_NR_uselib
5208 case TARGET_NR_uselib
:
5211 #ifdef TARGET_NR_swapon
5212 case TARGET_NR_swapon
:
5213 if (!(p
= lock_user_string(arg1
)))
5215 ret
= get_errno(swapon(p
, arg2
));
5216 unlock_user(p
, arg1
, 0);
5219 case TARGET_NR_reboot
:
5221 #ifdef TARGET_NR_readdir
5222 case TARGET_NR_readdir
:
5225 #ifdef TARGET_NR_mmap
5226 case TARGET_NR_mmap
:
5227 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5230 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5231 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5239 unlock_user(v
, arg1
, 0);
5240 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5241 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5245 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5246 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5252 #ifdef TARGET_NR_mmap2
5253 case TARGET_NR_mmap2
:
5255 #define MMAP_SHIFT 12
5257 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5258 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5260 arg6
<< MMAP_SHIFT
));
5263 case TARGET_NR_munmap
:
5264 ret
= get_errno(target_munmap(arg1
, arg2
));
5266 case TARGET_NR_mprotect
:
5267 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5269 #ifdef TARGET_NR_mremap
5270 case TARGET_NR_mremap
:
5271 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5274 /* ??? msync/mlock/munlock are broken for softmmu. */
5275 #ifdef TARGET_NR_msync
5276 case TARGET_NR_msync
:
5277 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5280 #ifdef TARGET_NR_mlock
5281 case TARGET_NR_mlock
:
5282 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5285 #ifdef TARGET_NR_munlock
5286 case TARGET_NR_munlock
:
5287 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5290 #ifdef TARGET_NR_mlockall
5291 case TARGET_NR_mlockall
:
5292 ret
= get_errno(mlockall(arg1
));
5295 #ifdef TARGET_NR_munlockall
5296 case TARGET_NR_munlockall
:
5297 ret
= get_errno(munlockall());
5300 case TARGET_NR_truncate
:
5301 if (!(p
= lock_user_string(arg1
)))
5303 ret
= get_errno(truncate(p
, arg2
));
5304 unlock_user(p
, arg1
, 0);
5306 case TARGET_NR_ftruncate
:
5307 ret
= get_errno(ftruncate(arg1
, arg2
));
5309 case TARGET_NR_fchmod
:
5310 ret
= get_errno(fchmod(arg1
, arg2
));
5312 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5313 case TARGET_NR_fchmodat
:
5314 if (!(p
= lock_user_string(arg2
)))
5316 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5317 unlock_user(p
, arg2
, 0);
5320 case TARGET_NR_getpriority
:
5321 /* libc does special remapping of the return value of
5322 * sys_getpriority() so it's just easiest to call
5323 * sys_getpriority() directly rather than through libc. */
5324 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5326 case TARGET_NR_setpriority
:
5327 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5329 #ifdef TARGET_NR_profil
5330 case TARGET_NR_profil
:
5333 case TARGET_NR_statfs
:
5334 if (!(p
= lock_user_string(arg1
)))
5336 ret
= get_errno(statfs(path(p
), &stfs
));
5337 unlock_user(p
, arg1
, 0);
5339 if (!is_error(ret
)) {
5340 struct target_statfs
*target_stfs
;
5342 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5344 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5345 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5346 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5347 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5348 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5349 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5350 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5351 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5352 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5353 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5354 unlock_user_struct(target_stfs
, arg2
, 1);
5357 case TARGET_NR_fstatfs
:
5358 ret
= get_errno(fstatfs(arg1
, &stfs
));
5359 goto convert_statfs
;
5360 #ifdef TARGET_NR_statfs64
5361 case TARGET_NR_statfs64
:
5362 if (!(p
= lock_user_string(arg1
)))
5364 ret
= get_errno(statfs(path(p
), &stfs
));
5365 unlock_user(p
, arg1
, 0);
5367 if (!is_error(ret
)) {
5368 struct target_statfs64
*target_stfs
;
5370 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5372 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5373 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5374 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5375 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5376 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5377 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5378 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5379 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5380 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5381 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5382 unlock_user_struct(target_stfs
, arg3
, 1);
5385 case TARGET_NR_fstatfs64
:
5386 ret
= get_errno(fstatfs(arg1
, &stfs
));
5387 goto convert_statfs64
;
5389 #ifdef TARGET_NR_ioperm
5390 case TARGET_NR_ioperm
:
5393 #ifdef TARGET_NR_socketcall
5394 case TARGET_NR_socketcall
:
5395 ret
= do_socketcall(arg1
, arg2
);
5398 #ifdef TARGET_NR_accept
5399 case TARGET_NR_accept
:
5400 ret
= do_accept(arg1
, arg2
, arg3
);
5403 #ifdef TARGET_NR_bind
5404 case TARGET_NR_bind
:
5405 ret
= do_bind(arg1
, arg2
, arg3
);
5408 #ifdef TARGET_NR_connect
5409 case TARGET_NR_connect
:
5410 ret
= do_connect(arg1
, arg2
, arg3
);
5413 #ifdef TARGET_NR_getpeername
5414 case TARGET_NR_getpeername
:
5415 ret
= do_getpeername(arg1
, arg2
, arg3
);
5418 #ifdef TARGET_NR_getsockname
5419 case TARGET_NR_getsockname
:
5420 ret
= do_getsockname(arg1
, arg2
, arg3
);
5423 #ifdef TARGET_NR_getsockopt
5424 case TARGET_NR_getsockopt
:
5425 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5428 #ifdef TARGET_NR_listen
5429 case TARGET_NR_listen
:
5430 ret
= get_errno(listen(arg1
, arg2
));
5433 #ifdef TARGET_NR_recv
5434 case TARGET_NR_recv
:
5435 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5438 #ifdef TARGET_NR_recvfrom
5439 case TARGET_NR_recvfrom
:
5440 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5443 #ifdef TARGET_NR_recvmsg
5444 case TARGET_NR_recvmsg
:
5445 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5448 #ifdef TARGET_NR_send
5449 case TARGET_NR_send
:
5450 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5453 #ifdef TARGET_NR_sendmsg
5454 case TARGET_NR_sendmsg
:
5455 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5458 #ifdef TARGET_NR_sendto
5459 case TARGET_NR_sendto
:
5460 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5463 #ifdef TARGET_NR_shutdown
5464 case TARGET_NR_shutdown
:
5465 ret
= get_errno(shutdown(arg1
, arg2
));
5468 #ifdef TARGET_NR_socket
5469 case TARGET_NR_socket
:
5470 ret
= do_socket(arg1
, arg2
, arg3
);
5473 #ifdef TARGET_NR_socketpair
5474 case TARGET_NR_socketpair
:
5475 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5478 #ifdef TARGET_NR_setsockopt
5479 case TARGET_NR_setsockopt
:
5480 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5484 case TARGET_NR_syslog
:
5485 if (!(p
= lock_user_string(arg2
)))
5487 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5488 unlock_user(p
, arg2
, 0);
5491 case TARGET_NR_setitimer
:
5493 struct itimerval value
, ovalue
, *pvalue
;
5497 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5498 || copy_from_user_timeval(&pvalue
->it_value
,
5499 arg2
+ sizeof(struct target_timeval
)))
5504 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5505 if (!is_error(ret
) && arg3
) {
5506 if (copy_to_user_timeval(arg3
,
5507 &ovalue
.it_interval
)
5508 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5514 case TARGET_NR_getitimer
:
5516 struct itimerval value
;
5518 ret
= get_errno(getitimer(arg1
, &value
));
5519 if (!is_error(ret
) && arg2
) {
5520 if (copy_to_user_timeval(arg2
,
5522 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5528 case TARGET_NR_stat
:
5529 if (!(p
= lock_user_string(arg1
)))
5531 ret
= get_errno(stat(path(p
), &st
));
5532 unlock_user(p
, arg1
, 0);
5534 case TARGET_NR_lstat
:
5535 if (!(p
= lock_user_string(arg1
)))
5537 ret
= get_errno(lstat(path(p
), &st
));
5538 unlock_user(p
, arg1
, 0);
5540 case TARGET_NR_fstat
:
5542 ret
= get_errno(fstat(arg1
, &st
));
5544 if (!is_error(ret
)) {
5545 struct target_stat
*target_st
;
5547 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5549 memset(target_st
, 0, sizeof(*target_st
));
5550 __put_user(st
.st_dev
, &target_st
->st_dev
);
5551 __put_user(st
.st_ino
, &target_st
->st_ino
);
5552 __put_user(st
.st_mode
, &target_st
->st_mode
);
5553 __put_user(st
.st_uid
, &target_st
->st_uid
);
5554 __put_user(st
.st_gid
, &target_st
->st_gid
);
5555 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5556 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5557 __put_user(st
.st_size
, &target_st
->st_size
);
5558 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5559 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5560 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5561 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5562 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5563 unlock_user_struct(target_st
, arg2
, 1);
5567 #ifdef TARGET_NR_olduname
5568 case TARGET_NR_olduname
:
5571 #ifdef TARGET_NR_iopl
5572 case TARGET_NR_iopl
:
5575 case TARGET_NR_vhangup
:
5576 ret
= get_errno(vhangup());
5578 #ifdef TARGET_NR_idle
5579 case TARGET_NR_idle
:
5582 #ifdef TARGET_NR_syscall
5583 case TARGET_NR_syscall
:
5584 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5587 case TARGET_NR_wait4
:
5590 abi_long status_ptr
= arg2
;
5591 struct rusage rusage
, *rusage_ptr
;
5592 abi_ulong target_rusage
= arg4
;
5594 rusage_ptr
= &rusage
;
5597 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5598 if (!is_error(ret
)) {
5600 status
= host_to_target_waitstatus(status
);
5601 if (put_user_s32(status
, status_ptr
))
5605 host_to_target_rusage(target_rusage
, &rusage
);
5609 #ifdef TARGET_NR_swapoff
5610 case TARGET_NR_swapoff
:
5611 if (!(p
= lock_user_string(arg1
)))
5613 ret
= get_errno(swapoff(p
));
5614 unlock_user(p
, arg1
, 0);
5617 case TARGET_NR_sysinfo
:
5619 struct target_sysinfo
*target_value
;
5620 struct sysinfo value
;
5621 ret
= get_errno(sysinfo(&value
));
5622 if (!is_error(ret
) && arg1
)
5624 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5626 __put_user(value
.uptime
, &target_value
->uptime
);
5627 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5628 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5629 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5630 __put_user(value
.totalram
, &target_value
->totalram
);
5631 __put_user(value
.freeram
, &target_value
->freeram
);
5632 __put_user(value
.sharedram
, &target_value
->sharedram
);
5633 __put_user(value
.bufferram
, &target_value
->bufferram
);
5634 __put_user(value
.totalswap
, &target_value
->totalswap
);
5635 __put_user(value
.freeswap
, &target_value
->freeswap
);
5636 __put_user(value
.procs
, &target_value
->procs
);
5637 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5638 __put_user(value
.freehigh
, &target_value
->freehigh
);
5639 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5640 unlock_user_struct(target_value
, arg1
, 1);
5644 #ifdef TARGET_NR_ipc
5646 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5649 #ifdef TARGET_NR_semget
5650 case TARGET_NR_semget
:
5651 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5654 #ifdef TARGET_NR_semop
5655 case TARGET_NR_semop
:
5656 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5659 #ifdef TARGET_NR_semctl
5660 case TARGET_NR_semctl
:
5661 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5664 #ifdef TARGET_NR_msgctl
5665 case TARGET_NR_msgctl
:
5666 ret
= do_msgctl(arg1
, arg2
, arg3
);
5669 #ifdef TARGET_NR_msgget
5670 case TARGET_NR_msgget
:
5671 ret
= get_errno(msgget(arg1
, arg2
));
5674 #ifdef TARGET_NR_msgrcv
5675 case TARGET_NR_msgrcv
:
5676 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
5679 #ifdef TARGET_NR_msgsnd
5680 case TARGET_NR_msgsnd
:
5681 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
5684 #ifdef TARGET_NR_shmget
5685 case TARGET_NR_shmget
:
5686 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
5689 #ifdef TARGET_NR_shmctl
5690 case TARGET_NR_shmctl
:
5691 ret
= do_shmctl(arg1
, arg2
, arg3
);
5694 #ifdef TARGET_NR_shmat
5695 case TARGET_NR_shmat
:
5696 ret
= do_shmat(arg1
, arg2
, arg3
);
5699 #ifdef TARGET_NR_shmdt
5700 case TARGET_NR_shmdt
:
5701 ret
= do_shmdt(arg1
);
5704 case TARGET_NR_fsync
:
5705 ret
= get_errno(fsync(arg1
));
5707 case TARGET_NR_clone
:
5708 #if defined(TARGET_SH4)
5709 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
5710 #elif defined(TARGET_CRIS)
5711 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
5713 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
5716 #ifdef __NR_exit_group
5717 /* new thread calls */
5718 case TARGET_NR_exit_group
:
5722 gdb_exit(cpu_env
, arg1
);
5723 ret
= get_errno(exit_group(arg1
));
5726 case TARGET_NR_setdomainname
:
5727 if (!(p
= lock_user_string(arg1
)))
5729 ret
= get_errno(setdomainname(p
, arg2
));
5730 unlock_user(p
, arg1
, 0);
5732 case TARGET_NR_uname
:
5733 /* no need to transcode because we use the linux syscall */
5735 struct new_utsname
* buf
;
5737 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
5739 ret
= get_errno(sys_uname(buf
));
5740 if (!is_error(ret
)) {
5741 /* Overrite the native machine name with whatever is being
5743 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
5744 /* Allow the user to override the reported release. */
5745 if (qemu_uname_release
&& *qemu_uname_release
)
5746 strcpy (buf
->release
, qemu_uname_release
);
5748 unlock_user_struct(buf
, arg1
, 1);
5752 case TARGET_NR_modify_ldt
:
5753 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
5755 #if !defined(TARGET_X86_64)
5756 case TARGET_NR_vm86old
:
5758 case TARGET_NR_vm86
:
5759 ret
= do_vm86(cpu_env
, arg1
, arg2
);
5763 case TARGET_NR_adjtimex
:
5765 #ifdef TARGET_NR_create_module
5766 case TARGET_NR_create_module
:
5768 case TARGET_NR_init_module
:
5769 case TARGET_NR_delete_module
:
5770 #ifdef TARGET_NR_get_kernel_syms
5771 case TARGET_NR_get_kernel_syms
:
5774 case TARGET_NR_quotactl
:
5776 case TARGET_NR_getpgid
:
5777 ret
= get_errno(getpgid(arg1
));
5779 case TARGET_NR_fchdir
:
5780 ret
= get_errno(fchdir(arg1
));
5782 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5783 case TARGET_NR_bdflush
:
5786 #ifdef TARGET_NR_sysfs
5787 case TARGET_NR_sysfs
:
5790 case TARGET_NR_personality
:
5791 ret
= get_errno(personality(arg1
));
5793 #ifdef TARGET_NR_afs_syscall
5794 case TARGET_NR_afs_syscall
:
5797 #ifdef TARGET_NR__llseek /* Not on alpha */
5798 case TARGET_NR__llseek
:
5800 #if defined (__x86_64__)
5801 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
5802 if (put_user_s64(ret
, arg4
))
5806 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
5807 if (put_user_s64(res
, arg4
))
5813 case TARGET_NR_getdents
:
5814 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5816 struct target_dirent
*target_dirp
;
5817 struct linux_dirent
*dirp
;
5818 abi_long count
= arg3
;
5820 dirp
= malloc(count
);
5822 ret
= -TARGET_ENOMEM
;
5826 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5827 if (!is_error(ret
)) {
5828 struct linux_dirent
*de
;
5829 struct target_dirent
*tde
;
5831 int reclen
, treclen
;
5832 int count1
, tnamelen
;
5836 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5840 reclen
= de
->d_reclen
;
5841 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
5842 tde
->d_reclen
= tswap16(treclen
);
5843 tde
->d_ino
= tswapl(de
->d_ino
);
5844 tde
->d_off
= tswapl(de
->d_off
);
5845 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
5848 /* XXX: may not be correct */
5849 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
5850 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5852 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
5856 unlock_user(target_dirp
, arg2
, ret
);
5862 struct linux_dirent
*dirp
;
5863 abi_long count
= arg3
;
5865 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5867 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5868 if (!is_error(ret
)) {
5869 struct linux_dirent
*de
;
5874 reclen
= de
->d_reclen
;
5877 de
->d_reclen
= tswap16(reclen
);
5878 tswapls(&de
->d_ino
);
5879 tswapls(&de
->d_off
);
5880 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5884 unlock_user(dirp
, arg2
, ret
);
5888 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5889 case TARGET_NR_getdents64
:
5891 struct linux_dirent64
*dirp
;
5892 abi_long count
= arg3
;
5893 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5895 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
5896 if (!is_error(ret
)) {
5897 struct linux_dirent64
*de
;
5902 reclen
= de
->d_reclen
;
5905 de
->d_reclen
= tswap16(reclen
);
5906 tswap64s((uint64_t *)&de
->d_ino
);
5907 tswap64s((uint64_t *)&de
->d_off
);
5908 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
5912 unlock_user(dirp
, arg2
, ret
);
5915 #endif /* TARGET_NR_getdents64 */
5916 #ifdef TARGET_NR__newselect
5917 case TARGET_NR__newselect
:
5918 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
5921 #ifdef TARGET_NR_poll
5922 case TARGET_NR_poll
:
5924 struct target_pollfd
*target_pfd
;
5925 unsigned int nfds
= arg2
;
5930 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
5933 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
5934 for(i
= 0; i
< nfds
; i
++) {
5935 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
5936 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
5938 ret
= get_errno(poll(pfd
, nfds
, timeout
));
5939 if (!is_error(ret
)) {
5940 for(i
= 0; i
< nfds
; i
++) {
5941 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
5943 ret
+= nfds
* (sizeof(struct target_pollfd
)
5944 - sizeof(struct pollfd
));
5946 unlock_user(target_pfd
, arg1
, ret
);
5950 case TARGET_NR_flock
:
5951 /* NOTE: the flock constant seems to be the same for every
5953 ret
= get_errno(flock(arg1
, arg2
));
5955 case TARGET_NR_readv
:
5960 vec
= alloca(count
* sizeof(struct iovec
));
5961 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
5963 ret
= get_errno(readv(arg1
, vec
, count
));
5964 unlock_iovec(vec
, arg2
, count
, 1);
5967 case TARGET_NR_writev
:
5972 vec
= alloca(count
* sizeof(struct iovec
));
5973 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
5975 ret
= get_errno(writev(arg1
, vec
, count
));
5976 unlock_iovec(vec
, arg2
, count
, 0);
5979 case TARGET_NR_getsid
:
5980 ret
= get_errno(getsid(arg1
));
5982 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5983 case TARGET_NR_fdatasync
:
5984 ret
= get_errno(fdatasync(arg1
));
5987 case TARGET_NR__sysctl
:
5988 /* We don't implement this, but ENOTDIR is always a safe
5990 ret
= -TARGET_ENOTDIR
;
5992 case TARGET_NR_sched_setparam
:
5994 struct sched_param
*target_schp
;
5995 struct sched_param schp
;
5997 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
5999 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6000 unlock_user_struct(target_schp
, arg2
, 0);
6001 ret
= get_errno(sched_setparam(arg1
, &schp
));
6004 case TARGET_NR_sched_getparam
:
6006 struct sched_param
*target_schp
;
6007 struct sched_param schp
;
6008 ret
= get_errno(sched_getparam(arg1
, &schp
));
6009 if (!is_error(ret
)) {
6010 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6012 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6013 unlock_user_struct(target_schp
, arg2
, 1);
6017 case TARGET_NR_sched_setscheduler
:
6019 struct sched_param
*target_schp
;
6020 struct sched_param schp
;
6021 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6023 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6024 unlock_user_struct(target_schp
, arg3
, 0);
6025 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6028 case TARGET_NR_sched_getscheduler
:
6029 ret
= get_errno(sched_getscheduler(arg1
));
6031 case TARGET_NR_sched_yield
:
6032 ret
= get_errno(sched_yield());
6034 case TARGET_NR_sched_get_priority_max
:
6035 ret
= get_errno(sched_get_priority_max(arg1
));
6037 case TARGET_NR_sched_get_priority_min
:
6038 ret
= get_errno(sched_get_priority_min(arg1
));
6040 case TARGET_NR_sched_rr_get_interval
:
6043 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6044 if (!is_error(ret
)) {
6045 host_to_target_timespec(arg2
, &ts
);
6049 case TARGET_NR_nanosleep
:
6051 struct timespec req
, rem
;
6052 target_to_host_timespec(&req
, arg1
);
6053 ret
= get_errno(nanosleep(&req
, &rem
));
6054 if (is_error(ret
) && arg2
) {
6055 host_to_target_timespec(arg2
, &rem
);
6059 #ifdef TARGET_NR_query_module
6060 case TARGET_NR_query_module
:
6063 #ifdef TARGET_NR_nfsservctl
6064 case TARGET_NR_nfsservctl
:
6067 case TARGET_NR_prctl
:
6070 case PR_GET_PDEATHSIG
:
6073 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6074 if (!is_error(ret
) && arg2
6075 && put_user_ual(deathsig
, arg2
))
6080 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6084 #ifdef TARGET_NR_arch_prctl
6085 case TARGET_NR_arch_prctl
:
6086 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6087 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6093 #ifdef TARGET_NR_pread
6094 case TARGET_NR_pread
:
6096 if (((CPUARMState
*)cpu_env
)->eabi
)
6099 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6101 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6102 unlock_user(p
, arg2
, ret
);
6104 case TARGET_NR_pwrite
:
6106 if (((CPUARMState
*)cpu_env
)->eabi
)
6109 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6111 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6112 unlock_user(p
, arg2
, 0);
6115 #ifdef TARGET_NR_pread64
6116 case TARGET_NR_pread64
:
6117 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6119 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6120 unlock_user(p
, arg2
, ret
);
6122 case TARGET_NR_pwrite64
:
6123 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6125 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6126 unlock_user(p
, arg2
, 0);
6129 case TARGET_NR_getcwd
:
6130 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6132 ret
= get_errno(sys_getcwd1(p
, arg2
));
6133 unlock_user(p
, arg1
, ret
);
6135 case TARGET_NR_capget
:
6137 case TARGET_NR_capset
:
6139 case TARGET_NR_sigaltstack
:
6140 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6141 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6142 defined(TARGET_M68K)
6143 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6148 case TARGET_NR_sendfile
:
6150 #ifdef TARGET_NR_getpmsg
6151 case TARGET_NR_getpmsg
:
6154 #ifdef TARGET_NR_putpmsg
6155 case TARGET_NR_putpmsg
:
6158 #ifdef TARGET_NR_vfork
6159 case TARGET_NR_vfork
:
6160 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6164 #ifdef TARGET_NR_ugetrlimit
6165 case TARGET_NR_ugetrlimit
:
6168 ret
= get_errno(getrlimit(arg1
, &rlim
));
6169 if (!is_error(ret
)) {
6170 struct target_rlimit
*target_rlim
;
6171 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6173 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
6174 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
6175 unlock_user_struct(target_rlim
, arg2
, 1);
6180 #ifdef TARGET_NR_truncate64
6181 case TARGET_NR_truncate64
:
6182 if (!(p
= lock_user_string(arg1
)))
6184 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6185 unlock_user(p
, arg1
, 0);
6188 #ifdef TARGET_NR_ftruncate64
6189 case TARGET_NR_ftruncate64
:
6190 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6193 #ifdef TARGET_NR_stat64
6194 case TARGET_NR_stat64
:
6195 if (!(p
= lock_user_string(arg1
)))
6197 ret
= get_errno(stat(path(p
), &st
));
6198 unlock_user(p
, arg1
, 0);
6200 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6203 #ifdef TARGET_NR_lstat64
6204 case TARGET_NR_lstat64
:
6205 if (!(p
= lock_user_string(arg1
)))
6207 ret
= get_errno(lstat(path(p
), &st
));
6208 unlock_user(p
, arg1
, 0);
6210 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6213 #ifdef TARGET_NR_fstat64
6214 case TARGET_NR_fstat64
:
6215 ret
= get_errno(fstat(arg1
, &st
));
6217 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6220 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6221 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6222 #ifdef TARGET_NR_fstatat64
6223 case TARGET_NR_fstatat64
:
6225 #ifdef TARGET_NR_newfstatat
6226 case TARGET_NR_newfstatat
:
6228 if (!(p
= lock_user_string(arg2
)))
6230 #ifdef __NR_fstatat64
6231 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6233 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6236 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6240 case TARGET_NR_lchown
:
6241 if (!(p
= lock_user_string(arg1
)))
6243 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6244 unlock_user(p
, arg1
, 0);
6246 case TARGET_NR_getuid
:
6247 ret
= get_errno(high2lowuid(getuid()));
6249 case TARGET_NR_getgid
:
6250 ret
= get_errno(high2lowgid(getgid()));
6252 case TARGET_NR_geteuid
:
6253 ret
= get_errno(high2lowuid(geteuid()));
6255 case TARGET_NR_getegid
:
6256 ret
= get_errno(high2lowgid(getegid()));
6258 case TARGET_NR_setreuid
:
6259 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6261 case TARGET_NR_setregid
:
6262 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6264 case TARGET_NR_getgroups
:
6266 int gidsetsize
= arg1
;
6267 uint16_t *target_grouplist
;
6271 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6272 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6273 if (gidsetsize
== 0)
6275 if (!is_error(ret
)) {
6276 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6277 if (!target_grouplist
)
6279 for(i
= 0;i
< ret
; i
++)
6280 target_grouplist
[i
] = tswap16(grouplist
[i
]);
6281 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6285 case TARGET_NR_setgroups
:
6287 int gidsetsize
= arg1
;
6288 uint16_t *target_grouplist
;
6292 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6293 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6294 if (!target_grouplist
) {
6295 ret
= -TARGET_EFAULT
;
6298 for(i
= 0;i
< gidsetsize
; i
++)
6299 grouplist
[i
] = tswap16(target_grouplist
[i
]);
6300 unlock_user(target_grouplist
, arg2
, 0);
6301 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6304 case TARGET_NR_fchown
:
6305 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6307 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6308 case TARGET_NR_fchownat
:
6309 if (!(p
= lock_user_string(arg2
)))
6311 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6312 unlock_user(p
, arg2
, 0);
6315 #ifdef TARGET_NR_setresuid
6316 case TARGET_NR_setresuid
:
6317 ret
= get_errno(setresuid(low2highuid(arg1
),
6319 low2highuid(arg3
)));
6322 #ifdef TARGET_NR_getresuid
6323 case TARGET_NR_getresuid
:
6325 uid_t ruid
, euid
, suid
;
6326 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6327 if (!is_error(ret
)) {
6328 if (put_user_u16(high2lowuid(ruid
), arg1
)
6329 || put_user_u16(high2lowuid(euid
), arg2
)
6330 || put_user_u16(high2lowuid(suid
), arg3
))
6336 #ifdef TARGET_NR_getresgid
6337 case TARGET_NR_setresgid
:
6338 ret
= get_errno(setresgid(low2highgid(arg1
),
6340 low2highgid(arg3
)));
6343 #ifdef TARGET_NR_getresgid
6344 case TARGET_NR_getresgid
:
6346 gid_t rgid
, egid
, sgid
;
6347 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6348 if (!is_error(ret
)) {
6349 if (put_user_u16(high2lowgid(rgid
), arg1
)
6350 || put_user_u16(high2lowgid(egid
), arg2
)
6351 || put_user_u16(high2lowgid(sgid
), arg3
))
6357 case TARGET_NR_chown
:
6358 if (!(p
= lock_user_string(arg1
)))
6360 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6361 unlock_user(p
, arg1
, 0);
6363 case TARGET_NR_setuid
:
6364 ret
= get_errno(setuid(low2highuid(arg1
)));
6366 case TARGET_NR_setgid
:
6367 ret
= get_errno(setgid(low2highgid(arg1
)));
6369 case TARGET_NR_setfsuid
:
6370 ret
= get_errno(setfsuid(arg1
));
6372 case TARGET_NR_setfsgid
:
6373 ret
= get_errno(setfsgid(arg1
));
6375 #endif /* USE_UID16 */
6377 #ifdef TARGET_NR_lchown32
6378 case TARGET_NR_lchown32
:
6379 if (!(p
= lock_user_string(arg1
)))
6381 ret
= get_errno(lchown(p
, arg2
, arg3
));
6382 unlock_user(p
, arg1
, 0);
6385 #ifdef TARGET_NR_getuid32
6386 case TARGET_NR_getuid32
:
6387 ret
= get_errno(getuid());
6391 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6392 /* Alpha specific */
6393 case TARGET_NR_getxuid
:
6397 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6399 ret
= get_errno(getuid());
6402 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6403 /* Alpha specific */
6404 case TARGET_NR_getxgid
:
6408 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6410 ret
= get_errno(getgid());
6413 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6414 /* Alpha specific */
6415 case TARGET_NR_osf_getsysinfo
:
6416 ret
= -TARGET_EOPNOTSUPP
;
6418 case TARGET_GSI_IEEE_FP_CONTROL
:
6420 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6422 /* Copied from linux ieee_fpcr_to_swcr. */
6423 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
6424 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
6425 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
6426 | SWCR_TRAP_ENABLE_DZE
6427 | SWCR_TRAP_ENABLE_OVF
);
6428 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
6429 | SWCR_TRAP_ENABLE_INE
);
6430 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
6431 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
6433 if (put_user_u64 (swcr
, arg2
))
6439 /* case GSI_IEEE_STATE_AT_SIGNAL:
6440 -- Not implemented in linux kernel.
6442 -- Retrieves current unaligned access state; not much used.
6444 -- Retrieves implver information; surely not used.
6446 -- Grabs a copy of the HWRPB; surely not used.
6451 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
6452 /* Alpha specific */
6453 case TARGET_NR_osf_setsysinfo
:
6454 ret
= -TARGET_EOPNOTSUPP
;
6456 case TARGET_SSI_IEEE_FP_CONTROL
:
6457 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
6459 uint64_t swcr
, fpcr
, orig_fpcr
;
6461 if (get_user_u64 (swcr
, arg2
))
6463 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6464 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
6466 /* Copied from linux ieee_swcr_to_fpcr. */
6467 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
6468 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
6469 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
6470 | SWCR_TRAP_ENABLE_DZE
6471 | SWCR_TRAP_ENABLE_OVF
)) << 48;
6472 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
6473 | SWCR_TRAP_ENABLE_INE
)) << 57;
6474 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
6475 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
6477 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
6480 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
6481 /* Old exceptions are not signaled. */
6482 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
6484 /* If any exceptions set by this call, and are unmasked,
6491 /* case SSI_NVPAIRS:
6492 -- Used with SSIN_UACPROC to enable unaligned accesses.
6493 case SSI_IEEE_STATE_AT_SIGNAL:
6494 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
6495 -- Not implemented in linux kernel
6500 #ifdef TARGET_NR_osf_sigprocmask
6501 /* Alpha specific. */
6502 case TARGET_NR_osf_sigprocmask
:
6506 sigset_t set
, oldset
;
6509 case TARGET_SIG_BLOCK
:
6512 case TARGET_SIG_UNBLOCK
:
6515 case TARGET_SIG_SETMASK
:
6519 ret
= -TARGET_EINVAL
;
6523 target_to_host_old_sigset(&set
, &mask
);
6524 sigprocmask(arg1
, &set
, &oldset
);
6525 host_to_target_old_sigset(&mask
, &oldset
);
6531 #ifdef TARGET_NR_getgid32
6532 case TARGET_NR_getgid32
:
6533 ret
= get_errno(getgid());
6536 #ifdef TARGET_NR_geteuid32
6537 case TARGET_NR_geteuid32
:
6538 ret
= get_errno(geteuid());
6541 #ifdef TARGET_NR_getegid32
6542 case TARGET_NR_getegid32
:
6543 ret
= get_errno(getegid());
6546 #ifdef TARGET_NR_setreuid32
6547 case TARGET_NR_setreuid32
:
6548 ret
= get_errno(setreuid(arg1
, arg2
));
6551 #ifdef TARGET_NR_setregid32
6552 case TARGET_NR_setregid32
:
6553 ret
= get_errno(setregid(arg1
, arg2
));
6556 #ifdef TARGET_NR_getgroups32
6557 case TARGET_NR_getgroups32
:
6559 int gidsetsize
= arg1
;
6560 uint32_t *target_grouplist
;
6564 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6565 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6566 if (gidsetsize
== 0)
6568 if (!is_error(ret
)) {
6569 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
6570 if (!target_grouplist
) {
6571 ret
= -TARGET_EFAULT
;
6574 for(i
= 0;i
< ret
; i
++)
6575 target_grouplist
[i
] = tswap32(grouplist
[i
]);
6576 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
6581 #ifdef TARGET_NR_setgroups32
6582 case TARGET_NR_setgroups32
:
6584 int gidsetsize
= arg1
;
6585 uint32_t *target_grouplist
;
6589 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6590 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
6591 if (!target_grouplist
) {
6592 ret
= -TARGET_EFAULT
;
6595 for(i
= 0;i
< gidsetsize
; i
++)
6596 grouplist
[i
] = tswap32(target_grouplist
[i
]);
6597 unlock_user(target_grouplist
, arg2
, 0);
6598 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6602 #ifdef TARGET_NR_fchown32
6603 case TARGET_NR_fchown32
:
6604 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
6607 #ifdef TARGET_NR_setresuid32
6608 case TARGET_NR_setresuid32
:
6609 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
6612 #ifdef TARGET_NR_getresuid32
6613 case TARGET_NR_getresuid32
:
6615 uid_t ruid
, euid
, suid
;
6616 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6617 if (!is_error(ret
)) {
6618 if (put_user_u32(ruid
, arg1
)
6619 || put_user_u32(euid
, arg2
)
6620 || put_user_u32(suid
, arg3
))
6626 #ifdef TARGET_NR_setresgid32
6627 case TARGET_NR_setresgid32
:
6628 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
6631 #ifdef TARGET_NR_getresgid32
6632 case TARGET_NR_getresgid32
:
6634 gid_t rgid
, egid
, sgid
;
6635 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6636 if (!is_error(ret
)) {
6637 if (put_user_u32(rgid
, arg1
)
6638 || put_user_u32(egid
, arg2
)
6639 || put_user_u32(sgid
, arg3
))
6645 #ifdef TARGET_NR_chown32
6646 case TARGET_NR_chown32
:
6647 if (!(p
= lock_user_string(arg1
)))
6649 ret
= get_errno(chown(p
, arg2
, arg3
));
6650 unlock_user(p
, arg1
, 0);
6653 #ifdef TARGET_NR_setuid32
6654 case TARGET_NR_setuid32
:
6655 ret
= get_errno(setuid(arg1
));
6658 #ifdef TARGET_NR_setgid32
6659 case TARGET_NR_setgid32
:
6660 ret
= get_errno(setgid(arg1
));
6663 #ifdef TARGET_NR_setfsuid32
6664 case TARGET_NR_setfsuid32
:
6665 ret
= get_errno(setfsuid(arg1
));
6668 #ifdef TARGET_NR_setfsgid32
6669 case TARGET_NR_setfsgid32
:
6670 ret
= get_errno(setfsgid(arg1
));
6674 case TARGET_NR_pivot_root
:
6676 #ifdef TARGET_NR_mincore
6677 case TARGET_NR_mincore
:
6680 ret
= -TARGET_EFAULT
;
6681 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
6683 if (!(p
= lock_user_string(arg3
)))
6685 ret
= get_errno(mincore(a
, arg2
, p
));
6686 unlock_user(p
, arg3
, ret
);
6688 unlock_user(a
, arg1
, 0);
6692 #ifdef TARGET_NR_arm_fadvise64_64
6693 case TARGET_NR_arm_fadvise64_64
:
6696 * arm_fadvise64_64 looks like fadvise64_64 but
6697 * with different argument order
6705 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
6706 #ifdef TARGET_NR_fadvise64_64
6707 case TARGET_NR_fadvise64_64
:
6709 #ifdef TARGET_NR_fadvise64
6710 case TARGET_NR_fadvise64
:
6714 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
6715 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
6716 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
6717 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
6721 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
6724 #ifdef TARGET_NR_madvise
6725 case TARGET_NR_madvise
:
6726 /* A straight passthrough may not be safe because qemu sometimes
6727 turns private flie-backed mappings into anonymous mappings.
6728 This will break MADV_DONTNEED.
6729 This is a hint, so ignoring and returning success is ok. */
6733 #if TARGET_ABI_BITS == 32
6734 case TARGET_NR_fcntl64
:
6738 struct target_flock64
*target_fl
;
6740 struct target_eabi_flock64
*target_efl
;
6743 cmd
= target_to_host_fcntl_cmd(arg2
);
6744 if (cmd
== -TARGET_EINVAL
)
6748 case TARGET_F_GETLK64
:
6750 if (((CPUARMState
*)cpu_env
)->eabi
) {
6751 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6753 fl
.l_type
= tswap16(target_efl
->l_type
);
6754 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6755 fl
.l_start
= tswap64(target_efl
->l_start
);
6756 fl
.l_len
= tswap64(target_efl
->l_len
);
6757 fl
.l_pid
= tswap32(target_efl
->l_pid
);
6758 unlock_user_struct(target_efl
, arg3
, 0);
6762 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6764 fl
.l_type
= tswap16(target_fl
->l_type
);
6765 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6766 fl
.l_start
= tswap64(target_fl
->l_start
);
6767 fl
.l_len
= tswap64(target_fl
->l_len
);
6768 fl
.l_pid
= tswap32(target_fl
->l_pid
);
6769 unlock_user_struct(target_fl
, arg3
, 0);
6771 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6774 if (((CPUARMState
*)cpu_env
)->eabi
) {
6775 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
6777 target_efl
->l_type
= tswap16(fl
.l_type
);
6778 target_efl
->l_whence
= tswap16(fl
.l_whence
);
6779 target_efl
->l_start
= tswap64(fl
.l_start
);
6780 target_efl
->l_len
= tswap64(fl
.l_len
);
6781 target_efl
->l_pid
= tswap32(fl
.l_pid
);
6782 unlock_user_struct(target_efl
, arg3
, 1);
6786 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
6788 target_fl
->l_type
= tswap16(fl
.l_type
);
6789 target_fl
->l_whence
= tswap16(fl
.l_whence
);
6790 target_fl
->l_start
= tswap64(fl
.l_start
);
6791 target_fl
->l_len
= tswap64(fl
.l_len
);
6792 target_fl
->l_pid
= tswap32(fl
.l_pid
);
6793 unlock_user_struct(target_fl
, arg3
, 1);
6798 case TARGET_F_SETLK64
:
6799 case TARGET_F_SETLKW64
:
6801 if (((CPUARMState
*)cpu_env
)->eabi
) {
6802 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6804 fl
.l_type
= tswap16(target_efl
->l_type
);
6805 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6806 fl
.l_start
= tswap64(target_efl
->l_start
);
6807 fl
.l_len
= tswap64(target_efl
->l_len
);
6808 fl
.l_pid
= tswap32(target_efl
->l_pid
);
6809 unlock_user_struct(target_efl
, arg3
, 0);
6813 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6815 fl
.l_type
= tswap16(target_fl
->l_type
);
6816 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6817 fl
.l_start
= tswap64(target_fl
->l_start
);
6818 fl
.l_len
= tswap64(target_fl
->l_len
);
6819 fl
.l_pid
= tswap32(target_fl
->l_pid
);
6820 unlock_user_struct(target_fl
, arg3
, 0);
6822 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6825 ret
= do_fcntl(arg1
, arg2
, arg3
);
6831 #ifdef TARGET_NR_cacheflush
6832 case TARGET_NR_cacheflush
:
6833 /* self-modifying code is handled automatically, so nothing needed */
6837 #ifdef TARGET_NR_security
6838 case TARGET_NR_security
:
6841 #ifdef TARGET_NR_getpagesize
6842 case TARGET_NR_getpagesize
:
6843 ret
= TARGET_PAGE_SIZE
;
6846 case TARGET_NR_gettid
:
6847 ret
= get_errno(gettid());
6849 #ifdef TARGET_NR_readahead
6850 case TARGET_NR_readahead
:
6851 #if TARGET_ABI_BITS == 32
6853 if (((CPUARMState
*)cpu_env
)->eabi
)
6860 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
6862 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
6866 #ifdef TARGET_NR_setxattr
6867 case TARGET_NR_setxattr
:
6868 case TARGET_NR_lsetxattr
:
6869 case TARGET_NR_fsetxattr
:
6870 case TARGET_NR_getxattr
:
6871 case TARGET_NR_lgetxattr
:
6872 case TARGET_NR_fgetxattr
:
6873 case TARGET_NR_listxattr
:
6874 case TARGET_NR_llistxattr
:
6875 case TARGET_NR_flistxattr
:
6876 case TARGET_NR_removexattr
:
6877 case TARGET_NR_lremovexattr
:
6878 case TARGET_NR_fremovexattr
:
6879 ret
= -TARGET_EOPNOTSUPP
;
6882 #ifdef TARGET_NR_set_thread_area
6883 case TARGET_NR_set_thread_area
:
6884 #if defined(TARGET_MIPS)
6885 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
6888 #elif defined(TARGET_CRIS)
6890 ret
= -TARGET_EINVAL
;
6892 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
6896 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6897 ret
= do_set_thread_area(cpu_env
, arg1
);
6900 goto unimplemented_nowarn
;
6903 #ifdef TARGET_NR_get_thread_area
6904 case TARGET_NR_get_thread_area
:
6905 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6906 ret
= do_get_thread_area(cpu_env
, arg1
);
6908 goto unimplemented_nowarn
;
6911 #ifdef TARGET_NR_getdomainname
6912 case TARGET_NR_getdomainname
:
6913 goto unimplemented_nowarn
;
6916 #ifdef TARGET_NR_clock_gettime
6917 case TARGET_NR_clock_gettime
:
6920 ret
= get_errno(clock_gettime(arg1
, &ts
));
6921 if (!is_error(ret
)) {
6922 host_to_target_timespec(arg2
, &ts
);
6927 #ifdef TARGET_NR_clock_getres
6928 case TARGET_NR_clock_getres
:
6931 ret
= get_errno(clock_getres(arg1
, &ts
));
6932 if (!is_error(ret
)) {
6933 host_to_target_timespec(arg2
, &ts
);
6938 #ifdef TARGET_NR_clock_nanosleep
6939 case TARGET_NR_clock_nanosleep
:
6942 target_to_host_timespec(&ts
, arg3
);
6943 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
6945 host_to_target_timespec(arg4
, &ts
);
6950 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6951 case TARGET_NR_set_tid_address
:
6952 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
6956 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6957 case TARGET_NR_tkill
:
6958 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
6962 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6963 case TARGET_NR_tgkill
:
6964 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
6965 target_to_host_signal(arg3
)));
6969 #ifdef TARGET_NR_set_robust_list
6970 case TARGET_NR_set_robust_list
:
6971 goto unimplemented_nowarn
;
6974 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6975 case TARGET_NR_utimensat
:
6977 struct timespec
*tsp
, ts
[2];
6981 target_to_host_timespec(ts
, arg3
);
6982 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
6986 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
6988 if (!(p
= lock_user_string(arg2
))) {
6989 ret
= -TARGET_EFAULT
;
6992 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
6993 unlock_user(p
, arg2
, 0);
6998 #if defined(CONFIG_USE_NPTL)
6999 case TARGET_NR_futex
:
7000 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7003 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7004 case TARGET_NR_inotify_init
:
7005 ret
= get_errno(sys_inotify_init());
7008 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7009 case TARGET_NR_inotify_add_watch
:
7010 p
= lock_user_string(arg2
);
7011 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7012 unlock_user(p
, arg2
, 0);
7015 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7016 case TARGET_NR_inotify_rm_watch
:
7017 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7021 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7022 case TARGET_NR_mq_open
:
7024 struct mq_attr posix_mq_attr
;
7026 p
= lock_user_string(arg1
- 1);
7028 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7029 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7030 unlock_user (p
, arg1
, 0);
7034 case TARGET_NR_mq_unlink
:
7035 p
= lock_user_string(arg1
- 1);
7036 ret
= get_errno(mq_unlink(p
));
7037 unlock_user (p
, arg1
, 0);
7040 case TARGET_NR_mq_timedsend
:
7044 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7046 target_to_host_timespec(&ts
, arg5
);
7047 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7048 host_to_target_timespec(arg5
, &ts
);
7051 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7052 unlock_user (p
, arg2
, arg3
);
7056 case TARGET_NR_mq_timedreceive
:
7061 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7063 target_to_host_timespec(&ts
, arg5
);
7064 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7065 host_to_target_timespec(arg5
, &ts
);
7068 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7069 unlock_user (p
, arg2
, arg3
);
7071 put_user_u32(prio
, arg4
);
7075 /* Not implemented for now... */
7076 /* case TARGET_NR_mq_notify: */
7079 case TARGET_NR_mq_getsetattr
:
7081 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7084 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7085 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7088 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7089 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7096 #ifdef CONFIG_SPLICE
7097 #ifdef TARGET_NR_tee
7100 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7104 #ifdef TARGET_NR_splice
7105 case TARGET_NR_splice
:
7107 loff_t loff_in
, loff_out
;
7108 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7110 get_user_u64(loff_in
, arg2
);
7111 ploff_in
= &loff_in
;
7114 get_user_u64(loff_out
, arg2
);
7115 ploff_out
= &loff_out
;
7117 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7121 #ifdef TARGET_NR_vmsplice
7122 case TARGET_NR_vmsplice
:
7127 vec
= alloca(count
* sizeof(struct iovec
));
7128 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7130 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7131 unlock_iovec(vec
, arg2
, count
, 0);
7135 #endif /* CONFIG_SPLICE */
7136 #ifdef CONFIG_EVENTFD
7137 #if defined(TARGET_NR_eventfd)
7138 case TARGET_NR_eventfd
:
7139 ret
= get_errno(eventfd(arg1
, 0));
7142 #if defined(TARGET_NR_eventfd2)
7143 case TARGET_NR_eventfd2
:
7144 ret
= get_errno(eventfd(arg1
, arg2
));
7147 #endif /* CONFIG_EVENTFD */
7148 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7149 case TARGET_NR_fallocate
:
7150 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7155 gemu_log("qemu: Unsupported syscall: %d\n", num
);
7156 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7157 unimplemented_nowarn
:
7159 ret
= -TARGET_ENOSYS
;
7164 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
7167 print_syscall_ret(num
, ret
);
7170 ret
= -TARGET_EFAULT
;