4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
44 #include <sys/socket.h>
48 #include <sys/times.h>
51 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/utsname.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <qemu-common.h>
63 #include <sys/eventfd.h>
66 #define termios host_termios
67 #define winsize host_winsize
68 #define termio host_termio
69 #define sgttyb host_sgttyb /* same as target */
70 #define tchars host_tchars /* same as target */
71 #define ltchars host_ltchars /* same as target */
73 #include <linux/termios.h>
74 #include <linux/unistd.h>
75 #include <linux/utsname.h>
76 #include <linux/cdrom.h>
77 #include <linux/hdreg.h>
78 #include <linux/soundcard.h>
80 #include <linux/mtio.h>
84 #include "linux_loop.h"
85 #include "cpu-uname.h"
88 #include "qemu-common.h"
90 #if defined(CONFIG_USE_NPTL)
91 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
92 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
94 /* XXX: Hardcode the above values. */
95 #define CLONE_NPTL_FLAGS2 0
100 //#include <linux/msdos_fs.h>
101 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
102 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
113 #define _syscall0(type,name) \
114 static type name (void) \
116 return syscall(__NR_##name); \
119 #define _syscall1(type,name,type1,arg1) \
120 static type name (type1 arg1) \
122 return syscall(__NR_##name, arg1); \
125 #define _syscall2(type,name,type1,arg1,type2,arg2) \
126 static type name (type1 arg1,type2 arg2) \
128 return syscall(__NR_##name, arg1, arg2); \
131 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
132 static type name (type1 arg1,type2 arg2,type3 arg3) \
134 return syscall(__NR_##name, arg1, arg2, arg3); \
137 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
138 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
140 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
143 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
145 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
147 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
151 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
152 type5,arg5,type6,arg6) \
153 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
156 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
160 #define __NR_sys_uname __NR_uname
161 #define __NR_sys_faccessat __NR_faccessat
162 #define __NR_sys_fchmodat __NR_fchmodat
163 #define __NR_sys_fchownat __NR_fchownat
164 #define __NR_sys_fstatat64 __NR_fstatat64
165 #define __NR_sys_futimesat __NR_futimesat
166 #define __NR_sys_getcwd1 __NR_getcwd
167 #define __NR_sys_getdents __NR_getdents
168 #define __NR_sys_getdents64 __NR_getdents64
169 #define __NR_sys_getpriority __NR_getpriority
170 #define __NR_sys_linkat __NR_linkat
171 #define __NR_sys_mkdirat __NR_mkdirat
172 #define __NR_sys_mknodat __NR_mknodat
173 #define __NR_sys_newfstatat __NR_newfstatat
174 #define __NR_sys_openat __NR_openat
175 #define __NR_sys_readlinkat __NR_readlinkat
176 #define __NR_sys_renameat __NR_renameat
177 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
178 #define __NR_sys_symlinkat __NR_symlinkat
179 #define __NR_sys_syslog __NR_syslog
180 #define __NR_sys_tgkill __NR_tgkill
181 #define __NR_sys_tkill __NR_tkill
182 #define __NR_sys_unlinkat __NR_unlinkat
183 #define __NR_sys_utimensat __NR_utimensat
184 #define __NR_sys_futex __NR_futex
185 #define __NR_sys_inotify_init __NR_inotify_init
186 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
187 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
189 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
190 #define __NR__llseek __NR_lseek
194 _syscall0(int, gettid
)
196 /* This is a replacement for the host gettid() and must return a host
198 static int gettid(void) {
202 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
203 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
204 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
206 _syscall2(int, sys_getpriority
, int, which
, int, who
);
207 #if defined(TARGET_NR__llseek) && !defined (__x86_64__)
208 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
209 loff_t
*, res
, uint
, wh
);
211 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
212 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
213 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
214 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
216 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
217 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
219 #ifdef __NR_exit_group
220 _syscall1(int,exit_group
,int,error_code
)
222 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
223 _syscall1(int,set_tid_address
,int *,tidptr
)
225 #if defined(CONFIG_USE_NPTL)
226 #if defined(TARGET_NR_futex) && defined(__NR_futex)
227 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
228 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
232 static bitmask_transtbl fcntl_flags_tbl
[] = {
233 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
234 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
235 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
236 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
237 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
238 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
239 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
240 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
241 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
242 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
243 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
244 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
245 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
246 #if defined(O_DIRECT)
247 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
252 #define COPY_UTSNAME_FIELD(dest, src) \
254 /* __NEW_UTS_LEN doesn't include terminating null */ \
255 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
256 (dest)[__NEW_UTS_LEN] = '\0'; \
259 static int sys_uname(struct new_utsname
*buf
)
261 struct utsname uts_buf
;
263 if (uname(&uts_buf
) < 0)
267 * Just in case these have some differences, we
268 * translate utsname to new_utsname (which is the
269 * struct linux kernel uses).
272 bzero(buf
, sizeof (*buf
));
273 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
274 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
275 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
276 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
277 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
279 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
283 #undef COPY_UTSNAME_FIELD
286 static int sys_getcwd1(char *buf
, size_t size
)
288 if (getcwd(buf
, size
) == NULL
) {
289 /* getcwd() sets errno */
292 return strlen(buf
)+1;
297 * Host system seems to have atfile syscall stubs available. We
298 * now enable them one by one as specified by target syscall_nr.h.
301 #ifdef TARGET_NR_faccessat
302 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
304 return (faccessat(dirfd
, pathname
, mode
, 0));
307 #ifdef TARGET_NR_fchmodat
308 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
310 return (fchmodat(dirfd
, pathname
, mode
, 0));
313 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
314 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
315 gid_t group
, int flags
)
317 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
320 #ifdef __NR_fstatat64
321 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
324 return (fstatat(dirfd
, pathname
, buf
, flags
));
327 #ifdef __NR_newfstatat
328 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
331 return (fstatat(dirfd
, pathname
, buf
, flags
));
334 #ifdef TARGET_NR_futimesat
335 static int sys_futimesat(int dirfd
, const char *pathname
,
336 const struct timeval times
[2])
338 return (futimesat(dirfd
, pathname
, times
));
341 #ifdef TARGET_NR_linkat
342 static int sys_linkat(int olddirfd
, const char *oldpath
,
343 int newdirfd
, const char *newpath
, int flags
)
345 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
348 #ifdef TARGET_NR_mkdirat
349 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
351 return (mkdirat(dirfd
, pathname
, mode
));
354 #ifdef TARGET_NR_mknodat
355 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
358 return (mknodat(dirfd
, pathname
, mode
, dev
));
361 #ifdef TARGET_NR_openat
362 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
365 * open(2) has extra parameter 'mode' when called with
368 if ((flags
& O_CREAT
) != 0) {
373 * Get the 'mode' parameter and translate it to
377 mode
= va_arg(ap
, mode_t
);
378 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
381 return (openat(dirfd
, pathname
, flags
, mode
));
383 return (openat(dirfd
, pathname
, flags
));
386 #ifdef TARGET_NR_readlinkat
387 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
389 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
392 #ifdef TARGET_NR_renameat
393 static int sys_renameat(int olddirfd
, const char *oldpath
,
394 int newdirfd
, const char *newpath
)
396 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
399 #ifdef TARGET_NR_symlinkat
400 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
402 return (symlinkat(oldpath
, newdirfd
, newpath
));
405 #ifdef TARGET_NR_unlinkat
406 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
408 return (unlinkat(dirfd
, pathname
, flags
));
411 #else /* !CONFIG_ATFILE */
414 * Try direct syscalls instead
416 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
417 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
419 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
420 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
422 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
423 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
424 uid_t
,owner
,gid_t
,group
,int,flags
)
426 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
427 defined(__NR_fstatat64)
428 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
429 struct stat
*,buf
,int,flags
)
431 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
432 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
433 const struct timeval
*,times
)
435 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
436 defined(__NR_newfstatat)
437 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
438 struct stat
*,buf
,int,flags
)
440 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
441 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
442 int,newdirfd
,const char *,newpath
,int,flags
)
444 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
445 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
447 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
448 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
449 mode_t
,mode
,dev_t
,dev
)
451 #if defined(TARGET_NR_openat) && defined(__NR_openat)
452 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
454 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
455 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
456 char *,buf
,size_t,bufsize
)
458 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
459 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
460 int,newdirfd
,const char *,newpath
)
462 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
463 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
464 int,newdirfd
,const char *,newpath
)
466 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
467 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
470 #endif /* CONFIG_ATFILE */
472 #ifdef CONFIG_UTIMENSAT
473 static int sys_utimensat(int dirfd
, const char *pathname
,
474 const struct timespec times
[2], int flags
)
476 if (pathname
== NULL
)
477 return futimens(dirfd
, times
);
479 return utimensat(dirfd
, pathname
, times
, flags
);
482 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
483 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
484 const struct timespec
*,tsp
,int,flags
)
486 #endif /* CONFIG_UTIMENSAT */
488 #ifdef CONFIG_INOTIFY
489 #include <sys/inotify.h>
491 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
492 static int sys_inotify_init(void)
494 return (inotify_init());
497 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
498 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
500 return (inotify_add_watch(fd
, pathname
, mask
));
503 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
504 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
506 return (inotify_rm_watch(fd
, wd
));
510 /* Userspace can usually survive runtime without inotify */
511 #undef TARGET_NR_inotify_init
512 #undef TARGET_NR_inotify_add_watch
513 #undef TARGET_NR_inotify_rm_watch
514 #endif /* CONFIG_INOTIFY */
517 extern int personality(int);
518 extern int flock(int, int);
519 extern int setfsuid(int);
520 extern int setfsgid(int);
521 extern int setgroups(int, gid_t
*);
523 #define ERRNO_TABLE_SIZE 1200
525 /* target_to_host_errno_table[] is initialized from
526 * host_to_target_errno_table[] in syscall_init(). */
527 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
531 * This list is the union of errno values overridden in asm-<arch>/errno.h
532 * minus the errnos that are not actually generic to all archs.
534 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
535 [EIDRM
] = TARGET_EIDRM
,
536 [ECHRNG
] = TARGET_ECHRNG
,
537 [EL2NSYNC
] = TARGET_EL2NSYNC
,
538 [EL3HLT
] = TARGET_EL3HLT
,
539 [EL3RST
] = TARGET_EL3RST
,
540 [ELNRNG
] = TARGET_ELNRNG
,
541 [EUNATCH
] = TARGET_EUNATCH
,
542 [ENOCSI
] = TARGET_ENOCSI
,
543 [EL2HLT
] = TARGET_EL2HLT
,
544 [EDEADLK
] = TARGET_EDEADLK
,
545 [ENOLCK
] = TARGET_ENOLCK
,
546 [EBADE
] = TARGET_EBADE
,
547 [EBADR
] = TARGET_EBADR
,
548 [EXFULL
] = TARGET_EXFULL
,
549 [ENOANO
] = TARGET_ENOANO
,
550 [EBADRQC
] = TARGET_EBADRQC
,
551 [EBADSLT
] = TARGET_EBADSLT
,
552 [EBFONT
] = TARGET_EBFONT
,
553 [ENOSTR
] = TARGET_ENOSTR
,
554 [ENODATA
] = TARGET_ENODATA
,
555 [ETIME
] = TARGET_ETIME
,
556 [ENOSR
] = TARGET_ENOSR
,
557 [ENONET
] = TARGET_ENONET
,
558 [ENOPKG
] = TARGET_ENOPKG
,
559 [EREMOTE
] = TARGET_EREMOTE
,
560 [ENOLINK
] = TARGET_ENOLINK
,
561 [EADV
] = TARGET_EADV
,
562 [ESRMNT
] = TARGET_ESRMNT
,
563 [ECOMM
] = TARGET_ECOMM
,
564 [EPROTO
] = TARGET_EPROTO
,
565 [EDOTDOT
] = TARGET_EDOTDOT
,
566 [EMULTIHOP
] = TARGET_EMULTIHOP
,
567 [EBADMSG
] = TARGET_EBADMSG
,
568 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
569 [EOVERFLOW
] = TARGET_EOVERFLOW
,
570 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
571 [EBADFD
] = TARGET_EBADFD
,
572 [EREMCHG
] = TARGET_EREMCHG
,
573 [ELIBACC
] = TARGET_ELIBACC
,
574 [ELIBBAD
] = TARGET_ELIBBAD
,
575 [ELIBSCN
] = TARGET_ELIBSCN
,
576 [ELIBMAX
] = TARGET_ELIBMAX
,
577 [ELIBEXEC
] = TARGET_ELIBEXEC
,
578 [EILSEQ
] = TARGET_EILSEQ
,
579 [ENOSYS
] = TARGET_ENOSYS
,
580 [ELOOP
] = TARGET_ELOOP
,
581 [ERESTART
] = TARGET_ERESTART
,
582 [ESTRPIPE
] = TARGET_ESTRPIPE
,
583 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
584 [EUSERS
] = TARGET_EUSERS
,
585 [ENOTSOCK
] = TARGET_ENOTSOCK
,
586 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
587 [EMSGSIZE
] = TARGET_EMSGSIZE
,
588 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
589 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
590 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
591 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
592 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
593 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
594 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
595 [EADDRINUSE
] = TARGET_EADDRINUSE
,
596 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
597 [ENETDOWN
] = TARGET_ENETDOWN
,
598 [ENETUNREACH
] = TARGET_ENETUNREACH
,
599 [ENETRESET
] = TARGET_ENETRESET
,
600 [ECONNABORTED
] = TARGET_ECONNABORTED
,
601 [ECONNRESET
] = TARGET_ECONNRESET
,
602 [ENOBUFS
] = TARGET_ENOBUFS
,
603 [EISCONN
] = TARGET_EISCONN
,
604 [ENOTCONN
] = TARGET_ENOTCONN
,
605 [EUCLEAN
] = TARGET_EUCLEAN
,
606 [ENOTNAM
] = TARGET_ENOTNAM
,
607 [ENAVAIL
] = TARGET_ENAVAIL
,
608 [EISNAM
] = TARGET_EISNAM
,
609 [EREMOTEIO
] = TARGET_EREMOTEIO
,
610 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
611 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
612 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
613 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
614 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
615 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
616 [EALREADY
] = TARGET_EALREADY
,
617 [EINPROGRESS
] = TARGET_EINPROGRESS
,
618 [ESTALE
] = TARGET_ESTALE
,
619 [ECANCELED
] = TARGET_ECANCELED
,
620 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
621 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
623 [ENOKEY
] = TARGET_ENOKEY
,
626 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
629 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
632 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
635 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
637 #ifdef ENOTRECOVERABLE
638 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
642 static inline int host_to_target_errno(int err
)
644 if(host_to_target_errno_table
[err
])
645 return host_to_target_errno_table
[err
];
649 static inline int target_to_host_errno(int err
)
651 if (target_to_host_errno_table
[err
])
652 return target_to_host_errno_table
[err
];
656 static inline abi_long
get_errno(abi_long ret
)
659 return -host_to_target_errno(errno
);
664 static inline int is_error(abi_long ret
)
666 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
669 char *target_strerror(int err
)
671 return strerror(target_to_host_errno(err
));
674 static abi_ulong target_brk
;
675 static abi_ulong target_original_brk
;
677 void target_set_brk(abi_ulong new_brk
)
679 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
682 /* do_brk() must return target values and target errnos. */
683 abi_long
do_brk(abi_ulong new_brk
)
686 abi_long mapped_addr
;
691 if (new_brk
< target_original_brk
)
694 brk_page
= HOST_PAGE_ALIGN(target_brk
);
696 /* If the new brk is less than this, set it and we're done... */
697 if (new_brk
< brk_page
) {
698 target_brk
= new_brk
;
702 /* We need to allocate more memory after the brk... */
703 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
704 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
705 PROT_READ
|PROT_WRITE
,
706 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
708 if (!is_error(mapped_addr
))
709 target_brk
= new_brk
;
714 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
715 abi_ulong target_fds_addr
,
719 abi_ulong b
, *target_fds
;
721 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
722 if (!(target_fds
= lock_user(VERIFY_READ
,
724 sizeof(abi_ulong
) * nw
,
726 return -TARGET_EFAULT
;
730 for (i
= 0; i
< nw
; i
++) {
731 /* grab the abi_ulong */
732 __get_user(b
, &target_fds
[i
]);
733 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
734 /* check the bit inside the abi_ulong */
741 unlock_user(target_fds
, target_fds_addr
, 0);
746 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
752 abi_ulong
*target_fds
;
754 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
755 if (!(target_fds
= lock_user(VERIFY_WRITE
,
757 sizeof(abi_ulong
) * nw
,
759 return -TARGET_EFAULT
;
762 for (i
= 0; i
< nw
; i
++) {
764 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
765 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
768 __put_user(v
, &target_fds
[i
]);
771 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
776 #if defined(__alpha__)
782 static inline abi_long
host_to_target_clock_t(long ticks
)
784 #if HOST_HZ == TARGET_HZ
787 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
791 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
792 const struct rusage
*rusage
)
794 struct target_rusage
*target_rusage
;
796 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
797 return -TARGET_EFAULT
;
798 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
799 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
800 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
801 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
802 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
803 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
804 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
805 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
806 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
807 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
808 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
809 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
810 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
811 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
812 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
813 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
814 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
815 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
816 unlock_user_struct(target_rusage
, target_addr
, 1);
821 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
822 abi_ulong target_tv_addr
)
824 struct target_timeval
*target_tv
;
826 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
827 return -TARGET_EFAULT
;
829 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
830 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
832 unlock_user_struct(target_tv
, target_tv_addr
, 0);
837 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
838 const struct timeval
*tv
)
840 struct target_timeval
*target_tv
;
842 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
843 return -TARGET_EFAULT
;
845 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
846 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
848 unlock_user_struct(target_tv
, target_tv_addr
, 1);
853 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
856 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
857 abi_ulong target_mq_attr_addr
)
859 struct target_mq_attr
*target_mq_attr
;
861 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
862 target_mq_attr_addr
, 1))
863 return -TARGET_EFAULT
;
865 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
866 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
867 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
868 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
870 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
875 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
876 const struct mq_attr
*attr
)
878 struct target_mq_attr
*target_mq_attr
;
880 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
881 target_mq_attr_addr
, 0))
882 return -TARGET_EFAULT
;
884 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
885 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
886 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
887 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
889 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
895 /* do_select() must return target values and target errnos. */
896 static abi_long
do_select(int n
,
897 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
898 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
900 fd_set rfds
, wfds
, efds
;
901 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
902 struct timeval tv
, *tv_ptr
;
906 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
907 return -TARGET_EFAULT
;
913 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
914 return -TARGET_EFAULT
;
920 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
921 return -TARGET_EFAULT
;
927 if (target_tv_addr
) {
928 if (copy_from_user_timeval(&tv
, target_tv_addr
))
929 return -TARGET_EFAULT
;
935 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
937 if (!is_error(ret
)) {
938 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
939 return -TARGET_EFAULT
;
940 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
941 return -TARGET_EFAULT
;
942 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
943 return -TARGET_EFAULT
;
945 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
946 return -TARGET_EFAULT
;
952 static abi_long
do_pipe2(int host_pipe
[], int flags
)
955 return pipe2(host_pipe
, flags
);
961 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
, int flags
)
965 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
968 return get_errno(ret
);
969 #if defined(TARGET_MIPS)
970 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
973 #if defined(TARGET_SH4)
975 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
979 if (put_user_s32(host_pipe
[0], pipedes
)
980 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
981 return -TARGET_EFAULT
;
983 return get_errno(ret
);
986 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
987 abi_ulong target_addr
,
990 struct target_ip_mreqn
*target_smreqn
;
992 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
994 return -TARGET_EFAULT
;
995 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
996 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
997 if (len
== sizeof(struct target_ip_mreqn
))
998 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
999 unlock_user(target_smreqn
, target_addr
, 0);
1004 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1005 abi_ulong target_addr
,
1008 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1009 sa_family_t sa_family
;
1010 struct target_sockaddr
*target_saddr
;
1012 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1014 return -TARGET_EFAULT
;
1016 sa_family
= tswap16(target_saddr
->sa_family
);
1018 /* Oops. The caller might send a incomplete sun_path; sun_path
1019 * must be terminated by \0 (see the manual page), but
1020 * unfortunately it is quite common to specify sockaddr_un
1021 * length as "strlen(x->sun_path)" while it should be
1022 * "strlen(...) + 1". We'll fix that here if needed.
1023 * Linux kernel has a similar feature.
1026 if (sa_family
== AF_UNIX
) {
1027 if (len
< unix_maxlen
&& len
> 0) {
1028 char *cp
= (char*)target_saddr
;
1030 if ( cp
[len
-1] && !cp
[len
] )
1033 if (len
> unix_maxlen
)
1037 memcpy(addr
, target_saddr
, len
);
1038 addr
->sa_family
= sa_family
;
1039 unlock_user(target_saddr
, target_addr
, 0);
1044 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1045 struct sockaddr
*addr
,
1048 struct target_sockaddr
*target_saddr
;
1050 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1052 return -TARGET_EFAULT
;
1053 memcpy(target_saddr
, addr
, len
);
1054 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1055 unlock_user(target_saddr
, target_addr
, len
);
1060 /* ??? Should this also swap msgh->name? */
1061 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1062 struct target_msghdr
*target_msgh
)
1064 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1065 abi_long msg_controllen
;
1066 abi_ulong target_cmsg_addr
;
1067 struct target_cmsghdr
*target_cmsg
;
1068 socklen_t space
= 0;
1070 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1071 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1073 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1074 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1076 return -TARGET_EFAULT
;
1078 while (cmsg
&& target_cmsg
) {
1079 void *data
= CMSG_DATA(cmsg
);
1080 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1082 int len
= tswapl(target_cmsg
->cmsg_len
)
1083 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1085 space
+= CMSG_SPACE(len
);
1086 if (space
> msgh
->msg_controllen
) {
1087 space
-= CMSG_SPACE(len
);
1088 gemu_log("Host cmsg overflow\n");
1092 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1093 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1094 cmsg
->cmsg_len
= CMSG_LEN(len
);
1096 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1097 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1098 memcpy(data
, target_data
, len
);
1100 int *fd
= (int *)data
;
1101 int *target_fd
= (int *)target_data
;
1102 int i
, numfds
= len
/ sizeof(int);
1104 for (i
= 0; i
< numfds
; i
++)
1105 fd
[i
] = tswap32(target_fd
[i
]);
1108 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1109 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1111 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1113 msgh
->msg_controllen
= space
;
1117 /* ??? Should this also swap msgh->name? */
1118 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1119 struct msghdr
*msgh
)
1121 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1122 abi_long msg_controllen
;
1123 abi_ulong target_cmsg_addr
;
1124 struct target_cmsghdr
*target_cmsg
;
1125 socklen_t space
= 0;
1127 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1128 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1130 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1131 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1133 return -TARGET_EFAULT
;
1135 while (cmsg
&& target_cmsg
) {
1136 void *data
= CMSG_DATA(cmsg
);
1137 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1139 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1141 space
+= TARGET_CMSG_SPACE(len
);
1142 if (space
> msg_controllen
) {
1143 space
-= TARGET_CMSG_SPACE(len
);
1144 gemu_log("Target cmsg overflow\n");
1148 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1149 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1150 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1152 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1153 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1154 memcpy(target_data
, data
, len
);
1156 int *fd
= (int *)data
;
1157 int *target_fd
= (int *)target_data
;
1158 int i
, numfds
= len
/ sizeof(int);
1160 for (i
= 0; i
< numfds
; i
++)
1161 target_fd
[i
] = tswap32(fd
[i
]);
1164 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1165 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1167 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1169 target_msgh
->msg_controllen
= tswapl(space
);
1173 /* do_setsockopt() Must return target values and target errnos. */
1174 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1175 abi_ulong optval_addr
, socklen_t optlen
)
1179 struct ip_mreqn
*ip_mreq
;
1180 struct ip_mreq_source
*ip_mreq_source
;
1184 /* TCP options all take an 'int' value. */
1185 if (optlen
< sizeof(uint32_t))
1186 return -TARGET_EINVAL
;
1188 if (get_user_u32(val
, optval_addr
))
1189 return -TARGET_EFAULT
;
1190 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1197 case IP_ROUTER_ALERT
:
1201 case IP_MTU_DISCOVER
:
1207 case IP_MULTICAST_TTL
:
1208 case IP_MULTICAST_LOOP
:
1210 if (optlen
>= sizeof(uint32_t)) {
1211 if (get_user_u32(val
, optval_addr
))
1212 return -TARGET_EFAULT
;
1213 } else if (optlen
>= 1) {
1214 if (get_user_u8(val
, optval_addr
))
1215 return -TARGET_EFAULT
;
1217 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1219 case IP_ADD_MEMBERSHIP
:
1220 case IP_DROP_MEMBERSHIP
:
1221 if (optlen
< sizeof (struct target_ip_mreq
) ||
1222 optlen
> sizeof (struct target_ip_mreqn
))
1223 return -TARGET_EINVAL
;
1225 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1226 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1227 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1230 case IP_BLOCK_SOURCE
:
1231 case IP_UNBLOCK_SOURCE
:
1232 case IP_ADD_SOURCE_MEMBERSHIP
:
1233 case IP_DROP_SOURCE_MEMBERSHIP
:
1234 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1235 return -TARGET_EINVAL
;
1237 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1238 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1239 unlock_user (ip_mreq_source
, optval_addr
, 0);
1246 case TARGET_SOL_SOCKET
:
1248 /* Options with 'int' argument. */
1249 case TARGET_SO_DEBUG
:
1252 case TARGET_SO_REUSEADDR
:
1253 optname
= SO_REUSEADDR
;
1255 case TARGET_SO_TYPE
:
1258 case TARGET_SO_ERROR
:
1261 case TARGET_SO_DONTROUTE
:
1262 optname
= SO_DONTROUTE
;
1264 case TARGET_SO_BROADCAST
:
1265 optname
= SO_BROADCAST
;
1267 case TARGET_SO_SNDBUF
:
1268 optname
= SO_SNDBUF
;
1270 case TARGET_SO_RCVBUF
:
1271 optname
= SO_RCVBUF
;
1273 case TARGET_SO_KEEPALIVE
:
1274 optname
= SO_KEEPALIVE
;
1276 case TARGET_SO_OOBINLINE
:
1277 optname
= SO_OOBINLINE
;
1279 case TARGET_SO_NO_CHECK
:
1280 optname
= SO_NO_CHECK
;
1282 case TARGET_SO_PRIORITY
:
1283 optname
= SO_PRIORITY
;
1286 case TARGET_SO_BSDCOMPAT
:
1287 optname
= SO_BSDCOMPAT
;
1290 case TARGET_SO_PASSCRED
:
1291 optname
= SO_PASSCRED
;
1293 case TARGET_SO_TIMESTAMP
:
1294 optname
= SO_TIMESTAMP
;
1296 case TARGET_SO_RCVLOWAT
:
1297 optname
= SO_RCVLOWAT
;
1299 case TARGET_SO_RCVTIMEO
:
1300 optname
= SO_RCVTIMEO
;
1302 case TARGET_SO_SNDTIMEO
:
1303 optname
= SO_SNDTIMEO
;
1309 if (optlen
< sizeof(uint32_t))
1310 return -TARGET_EINVAL
;
1312 if (get_user_u32(val
, optval_addr
))
1313 return -TARGET_EFAULT
;
1314 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1318 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1319 ret
= -TARGET_ENOPROTOOPT
;
1324 /* do_getsockopt() Must return target values and target errnos. */
1325 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1326 abi_ulong optval_addr
, abi_ulong optlen
)
1333 case TARGET_SOL_SOCKET
:
1336 case TARGET_SO_LINGER
:
1337 case TARGET_SO_RCVTIMEO
:
1338 case TARGET_SO_SNDTIMEO
:
1339 case TARGET_SO_PEERCRED
:
1340 case TARGET_SO_PEERNAME
:
1341 /* These don't just return a single integer */
1348 /* TCP options all take an 'int' value. */
1350 if (get_user_u32(len
, optlen
))
1351 return -TARGET_EFAULT
;
1353 return -TARGET_EINVAL
;
1355 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1361 if (put_user_u32(val
, optval_addr
))
1362 return -TARGET_EFAULT
;
1364 if (put_user_u8(val
, optval_addr
))
1365 return -TARGET_EFAULT
;
1367 if (put_user_u32(len
, optlen
))
1368 return -TARGET_EFAULT
;
1375 case IP_ROUTER_ALERT
:
1379 case IP_MTU_DISCOVER
:
1385 case IP_MULTICAST_TTL
:
1386 case IP_MULTICAST_LOOP
:
1387 if (get_user_u32(len
, optlen
))
1388 return -TARGET_EFAULT
;
1390 return -TARGET_EINVAL
;
1392 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1395 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1397 if (put_user_u32(len
, optlen
)
1398 || put_user_u8(val
, optval_addr
))
1399 return -TARGET_EFAULT
;
1401 if (len
> sizeof(int))
1403 if (put_user_u32(len
, optlen
)
1404 || put_user_u32(val
, optval_addr
))
1405 return -TARGET_EFAULT
;
1409 ret
= -TARGET_ENOPROTOOPT
;
1415 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1417 ret
= -TARGET_EOPNOTSUPP
;
1424 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1425 * other lock functions have a return code of 0 for failure.
1427 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1428 int count
, int copy
)
1430 struct target_iovec
*target_vec
;
1434 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1436 return -TARGET_EFAULT
;
1437 for(i
= 0;i
< count
; i
++) {
1438 base
= tswapl(target_vec
[i
].iov_base
);
1439 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1440 if (vec
[i
].iov_len
!= 0) {
1441 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1442 /* Don't check lock_user return value. We must call writev even
1443 if a element has invalid base address. */
1445 /* zero length pointer is ignored */
1446 vec
[i
].iov_base
= NULL
;
1449 unlock_user (target_vec
, target_addr
, 0);
1453 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1454 int count
, int copy
)
1456 struct target_iovec
*target_vec
;
1460 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1462 return -TARGET_EFAULT
;
1463 for(i
= 0;i
< count
; i
++) {
1464 if (target_vec
[i
].iov_base
) {
1465 base
= tswapl(target_vec
[i
].iov_base
);
1466 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1469 unlock_user (target_vec
, target_addr
, 0);
1474 /* do_socket() Must return target values and target errnos. */
1475 static abi_long
do_socket(int domain
, int type
, int protocol
)
1477 #if defined(TARGET_MIPS)
1479 case TARGET_SOCK_DGRAM
:
1482 case TARGET_SOCK_STREAM
:
1485 case TARGET_SOCK_RAW
:
1488 case TARGET_SOCK_RDM
:
1491 case TARGET_SOCK_SEQPACKET
:
1492 type
= SOCK_SEQPACKET
;
1494 case TARGET_SOCK_PACKET
:
1499 if (domain
== PF_NETLINK
)
1500 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1501 return get_errno(socket(domain
, type
, protocol
));
1504 /* do_bind() Must return target values and target errnos. */
1505 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1512 return -TARGET_EINVAL
;
1514 addr
= alloca(addrlen
+1);
1516 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1520 return get_errno(bind(sockfd
, addr
, addrlen
));
1523 /* do_connect() Must return target values and target errnos. */
1524 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1531 return -TARGET_EINVAL
;
1533 addr
= alloca(addrlen
);
1535 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1539 return get_errno(connect(sockfd
, addr
, addrlen
));
1542 /* do_sendrecvmsg() Must return target values and target errnos. */
1543 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1544 int flags
, int send
)
1547 struct target_msghdr
*msgp
;
1551 abi_ulong target_vec
;
1554 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1558 return -TARGET_EFAULT
;
1559 if (msgp
->msg_name
) {
1560 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1561 msg
.msg_name
= alloca(msg
.msg_namelen
);
1562 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1565 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1569 msg
.msg_name
= NULL
;
1570 msg
.msg_namelen
= 0;
1572 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1573 msg
.msg_control
= alloca(msg
.msg_controllen
);
1574 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1576 count
= tswapl(msgp
->msg_iovlen
);
1577 vec
= alloca(count
* sizeof(struct iovec
));
1578 target_vec
= tswapl(msgp
->msg_iov
);
1579 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1580 msg
.msg_iovlen
= count
;
1584 ret
= target_to_host_cmsg(&msg
, msgp
);
1586 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1588 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1589 if (!is_error(ret
)) {
1591 ret
= host_to_target_cmsg(msgp
, &msg
);
1596 unlock_iovec(vec
, target_vec
, count
, !send
);
1597 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1601 /* do_accept() Must return target values and target errnos. */
1602 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1603 abi_ulong target_addrlen_addr
)
1609 if (target_addr
== 0)
1610 return get_errno(accept(fd
, NULL
, NULL
));
1612 /* linux returns EINVAL if addrlen pointer is invalid */
1613 if (get_user_u32(addrlen
, target_addrlen_addr
))
1614 return -TARGET_EINVAL
;
1617 return -TARGET_EINVAL
;
1619 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1620 return -TARGET_EINVAL
;
1622 addr
= alloca(addrlen
);
1624 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1625 if (!is_error(ret
)) {
1626 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1627 if (put_user_u32(addrlen
, target_addrlen_addr
))
1628 ret
= -TARGET_EFAULT
;
1633 /* do_getpeername() Must return target values and target errnos. */
1634 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1635 abi_ulong target_addrlen_addr
)
1641 if (get_user_u32(addrlen
, target_addrlen_addr
))
1642 return -TARGET_EFAULT
;
1645 return -TARGET_EINVAL
;
1647 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1648 return -TARGET_EFAULT
;
1650 addr
= alloca(addrlen
);
1652 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1653 if (!is_error(ret
)) {
1654 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1655 if (put_user_u32(addrlen
, target_addrlen_addr
))
1656 ret
= -TARGET_EFAULT
;
1661 /* do_getsockname() Must return target values and target errnos. */
1662 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1663 abi_ulong target_addrlen_addr
)
1669 if (get_user_u32(addrlen
, target_addrlen_addr
))
1670 return -TARGET_EFAULT
;
1673 return -TARGET_EINVAL
;
1675 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1676 return -TARGET_EFAULT
;
1678 addr
= alloca(addrlen
);
1680 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1681 if (!is_error(ret
)) {
1682 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1683 if (put_user_u32(addrlen
, target_addrlen_addr
))
1684 ret
= -TARGET_EFAULT
;
1689 /* do_socketpair() Must return target values and target errnos. */
1690 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1691 abi_ulong target_tab_addr
)
1696 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1697 if (!is_error(ret
)) {
1698 if (put_user_s32(tab
[0], target_tab_addr
)
1699 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1700 ret
= -TARGET_EFAULT
;
1705 /* do_sendto() Must return target values and target errnos. */
1706 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1707 abi_ulong target_addr
, socklen_t addrlen
)
1714 return -TARGET_EINVAL
;
1716 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1718 return -TARGET_EFAULT
;
1720 addr
= alloca(addrlen
);
1721 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1723 unlock_user(host_msg
, msg
, 0);
1726 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1728 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1730 unlock_user(host_msg
, msg
, 0);
1734 /* do_recvfrom() Must return target values and target errnos. */
1735 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1736 abi_ulong target_addr
,
1737 abi_ulong target_addrlen
)
1744 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1746 return -TARGET_EFAULT
;
1748 if (get_user_u32(addrlen
, target_addrlen
)) {
1749 ret
= -TARGET_EFAULT
;
1753 ret
= -TARGET_EINVAL
;
1756 addr
= alloca(addrlen
);
1757 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1759 addr
= NULL
; /* To keep compiler quiet. */
1760 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1762 if (!is_error(ret
)) {
1764 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1765 if (put_user_u32(addrlen
, target_addrlen
)) {
1766 ret
= -TARGET_EFAULT
;
1770 unlock_user(host_msg
, msg
, len
);
1773 unlock_user(host_msg
, msg
, 0);
1778 #ifdef TARGET_NR_socketcall
1779 /* do_socketcall() Must return target values and target errnos. */
1780 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1783 const int n
= sizeof(abi_ulong
);
1788 abi_ulong domain
, type
, protocol
;
1790 if (get_user_ual(domain
, vptr
)
1791 || get_user_ual(type
, vptr
+ n
)
1792 || get_user_ual(protocol
, vptr
+ 2 * n
))
1793 return -TARGET_EFAULT
;
1795 ret
= do_socket(domain
, type
, protocol
);
1801 abi_ulong target_addr
;
1804 if (get_user_ual(sockfd
, vptr
)
1805 || get_user_ual(target_addr
, vptr
+ n
)
1806 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1807 return -TARGET_EFAULT
;
1809 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1812 case SOCKOP_connect
:
1815 abi_ulong target_addr
;
1818 if (get_user_ual(sockfd
, vptr
)
1819 || get_user_ual(target_addr
, vptr
+ n
)
1820 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1821 return -TARGET_EFAULT
;
1823 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1828 abi_ulong sockfd
, backlog
;
1830 if (get_user_ual(sockfd
, vptr
)
1831 || get_user_ual(backlog
, vptr
+ n
))
1832 return -TARGET_EFAULT
;
1834 ret
= get_errno(listen(sockfd
, backlog
));
1840 abi_ulong target_addr
, target_addrlen
;
1842 if (get_user_ual(sockfd
, vptr
)
1843 || get_user_ual(target_addr
, vptr
+ n
)
1844 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1845 return -TARGET_EFAULT
;
1847 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1850 case SOCKOP_getsockname
:
1853 abi_ulong target_addr
, target_addrlen
;
1855 if (get_user_ual(sockfd
, vptr
)
1856 || get_user_ual(target_addr
, vptr
+ n
)
1857 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1858 return -TARGET_EFAULT
;
1860 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1863 case SOCKOP_getpeername
:
1866 abi_ulong target_addr
, target_addrlen
;
1868 if (get_user_ual(sockfd
, vptr
)
1869 || get_user_ual(target_addr
, vptr
+ n
)
1870 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1871 return -TARGET_EFAULT
;
1873 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1876 case SOCKOP_socketpair
:
1878 abi_ulong domain
, type
, protocol
;
1881 if (get_user_ual(domain
, vptr
)
1882 || get_user_ual(type
, vptr
+ n
)
1883 || get_user_ual(protocol
, vptr
+ 2 * n
)
1884 || get_user_ual(tab
, vptr
+ 3 * n
))
1885 return -TARGET_EFAULT
;
1887 ret
= do_socketpair(domain
, type
, protocol
, tab
);
1897 if (get_user_ual(sockfd
, vptr
)
1898 || get_user_ual(msg
, vptr
+ n
)
1899 || get_user_ual(len
, vptr
+ 2 * n
)
1900 || get_user_ual(flags
, vptr
+ 3 * n
))
1901 return -TARGET_EFAULT
;
1903 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
1913 if (get_user_ual(sockfd
, vptr
)
1914 || get_user_ual(msg
, vptr
+ n
)
1915 || get_user_ual(len
, vptr
+ 2 * n
)
1916 || get_user_ual(flags
, vptr
+ 3 * n
))
1917 return -TARGET_EFAULT
;
1919 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
1931 if (get_user_ual(sockfd
, vptr
)
1932 || get_user_ual(msg
, vptr
+ n
)
1933 || get_user_ual(len
, vptr
+ 2 * n
)
1934 || get_user_ual(flags
, vptr
+ 3 * n
)
1935 || get_user_ual(addr
, vptr
+ 4 * n
)
1936 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1937 return -TARGET_EFAULT
;
1939 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1942 case SOCKOP_recvfrom
:
1951 if (get_user_ual(sockfd
, vptr
)
1952 || get_user_ual(msg
, vptr
+ n
)
1953 || get_user_ual(len
, vptr
+ 2 * n
)
1954 || get_user_ual(flags
, vptr
+ 3 * n
)
1955 || get_user_ual(addr
, vptr
+ 4 * n
)
1956 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1957 return -TARGET_EFAULT
;
1959 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1962 case SOCKOP_shutdown
:
1964 abi_ulong sockfd
, how
;
1966 if (get_user_ual(sockfd
, vptr
)
1967 || get_user_ual(how
, vptr
+ n
))
1968 return -TARGET_EFAULT
;
1970 ret
= get_errno(shutdown(sockfd
, how
));
1973 case SOCKOP_sendmsg
:
1974 case SOCKOP_recvmsg
:
1977 abi_ulong target_msg
;
1980 if (get_user_ual(fd
, vptr
)
1981 || get_user_ual(target_msg
, vptr
+ n
)
1982 || get_user_ual(flags
, vptr
+ 2 * n
))
1983 return -TARGET_EFAULT
;
1985 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
1986 (num
== SOCKOP_sendmsg
));
1989 case SOCKOP_setsockopt
:
1997 if (get_user_ual(sockfd
, vptr
)
1998 || get_user_ual(level
, vptr
+ n
)
1999 || get_user_ual(optname
, vptr
+ 2 * n
)
2000 || get_user_ual(optval
, vptr
+ 3 * n
)
2001 || get_user_ual(optlen
, vptr
+ 4 * n
))
2002 return -TARGET_EFAULT
;
2004 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2007 case SOCKOP_getsockopt
:
2015 if (get_user_ual(sockfd
, vptr
)
2016 || get_user_ual(level
, vptr
+ n
)
2017 || get_user_ual(optname
, vptr
+ 2 * n
)
2018 || get_user_ual(optval
, vptr
+ 3 * n
)
2019 || get_user_ual(optlen
, vptr
+ 4 * n
))
2020 return -TARGET_EFAULT
;
2022 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2026 gemu_log("Unsupported socketcall: %d\n", num
);
2027 ret
= -TARGET_ENOSYS
;
2034 #define N_SHM_REGIONS 32
2036 static struct shm_region
{
2039 } shm_regions
[N_SHM_REGIONS
];
2041 struct target_ipc_perm
2048 unsigned short int mode
;
2049 unsigned short int __pad1
;
2050 unsigned short int __seq
;
2051 unsigned short int __pad2
;
2052 abi_ulong __unused1
;
2053 abi_ulong __unused2
;
2056 struct target_semid_ds
2058 struct target_ipc_perm sem_perm
;
2059 abi_ulong sem_otime
;
2060 abi_ulong __unused1
;
2061 abi_ulong sem_ctime
;
2062 abi_ulong __unused2
;
2063 abi_ulong sem_nsems
;
2064 abi_ulong __unused3
;
2065 abi_ulong __unused4
;
2068 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2069 abi_ulong target_addr
)
2071 struct target_ipc_perm
*target_ip
;
2072 struct target_semid_ds
*target_sd
;
2074 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2075 return -TARGET_EFAULT
;
2076 target_ip
= &(target_sd
->sem_perm
);
2077 host_ip
->__key
= tswapl(target_ip
->__key
);
2078 host_ip
->uid
= tswapl(target_ip
->uid
);
2079 host_ip
->gid
= tswapl(target_ip
->gid
);
2080 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2081 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2082 host_ip
->mode
= tswapl(target_ip
->mode
);
2083 unlock_user_struct(target_sd
, target_addr
, 0);
2087 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2088 struct ipc_perm
*host_ip
)
2090 struct target_ipc_perm
*target_ip
;
2091 struct target_semid_ds
*target_sd
;
2093 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2094 return -TARGET_EFAULT
;
2095 target_ip
= &(target_sd
->sem_perm
);
2096 target_ip
->__key
= tswapl(host_ip
->__key
);
2097 target_ip
->uid
= tswapl(host_ip
->uid
);
2098 target_ip
->gid
= tswapl(host_ip
->gid
);
2099 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2100 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2101 target_ip
->mode
= tswapl(host_ip
->mode
);
2102 unlock_user_struct(target_sd
, target_addr
, 1);
2106 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2107 abi_ulong target_addr
)
2109 struct target_semid_ds
*target_sd
;
2111 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2112 return -TARGET_EFAULT
;
2113 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2114 return -TARGET_EFAULT
;
2115 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2116 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2117 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2118 unlock_user_struct(target_sd
, target_addr
, 0);
2122 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2123 struct semid_ds
*host_sd
)
2125 struct target_semid_ds
*target_sd
;
2127 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2128 return -TARGET_EFAULT
;
2129 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2130 return -TARGET_EFAULT
;;
2131 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2132 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2133 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2134 unlock_user_struct(target_sd
, target_addr
, 1);
2138 struct target_seminfo
{
2151 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2152 struct seminfo
*host_seminfo
)
2154 struct target_seminfo
*target_seminfo
;
2155 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2156 return -TARGET_EFAULT
;
2157 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2158 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2159 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2160 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2161 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2162 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2163 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2164 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2165 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2166 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2167 unlock_user_struct(target_seminfo
, target_addr
, 1);
2173 struct semid_ds
*buf
;
2174 unsigned short *array
;
2175 struct seminfo
*__buf
;
2178 union target_semun
{
2185 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2186 abi_ulong target_addr
)
2189 unsigned short *array
;
2191 struct semid_ds semid_ds
;
2194 semun
.buf
= &semid_ds
;
2196 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2198 return get_errno(ret
);
2200 nsems
= semid_ds
.sem_nsems
;
2202 *host_array
= malloc(nsems
*sizeof(unsigned short));
2203 array
= lock_user(VERIFY_READ
, target_addr
,
2204 nsems
*sizeof(unsigned short), 1);
2206 return -TARGET_EFAULT
;
2208 for(i
=0; i
<nsems
; i
++) {
2209 __get_user((*host_array
)[i
], &array
[i
]);
2211 unlock_user(array
, target_addr
, 0);
2216 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2217 unsigned short **host_array
)
2220 unsigned short *array
;
2222 struct semid_ds semid_ds
;
2225 semun
.buf
= &semid_ds
;
2227 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2229 return get_errno(ret
);
2231 nsems
= semid_ds
.sem_nsems
;
2233 array
= lock_user(VERIFY_WRITE
, target_addr
,
2234 nsems
*sizeof(unsigned short), 0);
2236 return -TARGET_EFAULT
;
2238 for(i
=0; i
<nsems
; i
++) {
2239 __put_user((*host_array
)[i
], &array
[i
]);
2242 unlock_user(array
, target_addr
, 1);
2247 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2248 union target_semun target_su
)
2251 struct semid_ds dsarg
;
2252 unsigned short *array
= NULL
;
2253 struct seminfo seminfo
;
2254 abi_long ret
= -TARGET_EINVAL
;
2261 arg
.val
= tswapl(target_su
.val
);
2262 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2263 target_su
.val
= tswapl(arg
.val
);
2267 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2271 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2272 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2279 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2283 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2284 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2290 arg
.__buf
= &seminfo
;
2291 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2292 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2300 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2307 struct target_sembuf
{
2308 unsigned short sem_num
;
2313 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2314 abi_ulong target_addr
,
2317 struct target_sembuf
*target_sembuf
;
2320 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2321 nsops
*sizeof(struct target_sembuf
), 1);
2323 return -TARGET_EFAULT
;
2325 for(i
=0; i
<nsops
; i
++) {
2326 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2327 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2328 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2331 unlock_user(target_sembuf
, target_addr
, 0);
2336 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2338 struct sembuf sops
[nsops
];
2340 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2341 return -TARGET_EFAULT
;
2343 return semop(semid
, sops
, nsops
);
2346 struct target_msqid_ds
2348 struct target_ipc_perm msg_perm
;
2349 abi_ulong msg_stime
;
2350 #if TARGET_ABI_BITS == 32
2351 abi_ulong __unused1
;
2353 abi_ulong msg_rtime
;
2354 #if TARGET_ABI_BITS == 32
2355 abi_ulong __unused2
;
2357 abi_ulong msg_ctime
;
2358 #if TARGET_ABI_BITS == 32
2359 abi_ulong __unused3
;
2361 abi_ulong __msg_cbytes
;
2363 abi_ulong msg_qbytes
;
2364 abi_ulong msg_lspid
;
2365 abi_ulong msg_lrpid
;
2366 abi_ulong __unused4
;
2367 abi_ulong __unused5
;
2370 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2371 abi_ulong target_addr
)
2373 struct target_msqid_ds
*target_md
;
2375 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2376 return -TARGET_EFAULT
;
2377 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2378 return -TARGET_EFAULT
;
2379 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2380 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2381 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2382 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2383 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2384 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2385 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2386 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2387 unlock_user_struct(target_md
, target_addr
, 0);
2391 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2392 struct msqid_ds
*host_md
)
2394 struct target_msqid_ds
*target_md
;
2396 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2397 return -TARGET_EFAULT
;
2398 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2399 return -TARGET_EFAULT
;
2400 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2401 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2402 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2403 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2404 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2405 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2406 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2407 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2408 unlock_user_struct(target_md
, target_addr
, 1);
2412 struct target_msginfo
{
2420 unsigned short int msgseg
;
2423 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2424 struct msginfo
*host_msginfo
)
2426 struct target_msginfo
*target_msginfo
;
2427 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2428 return -TARGET_EFAULT
;
2429 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2430 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2431 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2432 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2433 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2434 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2435 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2436 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2437 unlock_user_struct(target_msginfo
, target_addr
, 1);
2441 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2443 struct msqid_ds dsarg
;
2444 struct msginfo msginfo
;
2445 abi_long ret
= -TARGET_EINVAL
;
2453 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2454 return -TARGET_EFAULT
;
2455 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2456 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2457 return -TARGET_EFAULT
;
2460 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2464 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2465 if (host_to_target_msginfo(ptr
, &msginfo
))
2466 return -TARGET_EFAULT
;
2473 struct target_msgbuf
{
2478 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2479 unsigned int msgsz
, int msgflg
)
2481 struct target_msgbuf
*target_mb
;
2482 struct msgbuf
*host_mb
;
2485 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2486 return -TARGET_EFAULT
;
2487 host_mb
= malloc(msgsz
+sizeof(long));
2488 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2489 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2490 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2492 unlock_user_struct(target_mb
, msgp
, 0);
2497 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2498 unsigned int msgsz
, abi_long msgtyp
,
2501 struct target_msgbuf
*target_mb
;
2503 struct msgbuf
*host_mb
;
2506 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2507 return -TARGET_EFAULT
;
2509 host_mb
= malloc(msgsz
+sizeof(long));
2510 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2513 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2514 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2515 if (!target_mtext
) {
2516 ret
= -TARGET_EFAULT
;
2519 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2520 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2523 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2528 unlock_user_struct(target_mb
, msgp
, 1);
2532 struct target_shmid_ds
2534 struct target_ipc_perm shm_perm
;
2535 abi_ulong shm_segsz
;
2536 abi_ulong shm_atime
;
2537 #if TARGET_ABI_BITS == 32
2538 abi_ulong __unused1
;
2540 abi_ulong shm_dtime
;
2541 #if TARGET_ABI_BITS == 32
2542 abi_ulong __unused2
;
2544 abi_ulong shm_ctime
;
2545 #if TARGET_ABI_BITS == 32
2546 abi_ulong __unused3
;
2550 abi_ulong shm_nattch
;
2551 unsigned long int __unused4
;
2552 unsigned long int __unused5
;
2555 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2556 abi_ulong target_addr
)
2558 struct target_shmid_ds
*target_sd
;
2560 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2561 return -TARGET_EFAULT
;
2562 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2563 return -TARGET_EFAULT
;
2564 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2565 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2566 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2567 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2568 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2569 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2570 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2571 unlock_user_struct(target_sd
, target_addr
, 0);
2575 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2576 struct shmid_ds
*host_sd
)
2578 struct target_shmid_ds
*target_sd
;
2580 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2581 return -TARGET_EFAULT
;
2582 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2583 return -TARGET_EFAULT
;
2584 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2585 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2586 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2587 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2588 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2589 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2590 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2591 unlock_user_struct(target_sd
, target_addr
, 1);
2595 struct target_shminfo
{
2603 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2604 struct shminfo
*host_shminfo
)
2606 struct target_shminfo
*target_shminfo
;
2607 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2608 return -TARGET_EFAULT
;
2609 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2610 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2611 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2612 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2613 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2614 unlock_user_struct(target_shminfo
, target_addr
, 1);
2618 struct target_shm_info
{
2623 abi_ulong swap_attempts
;
2624 abi_ulong swap_successes
;
2627 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2628 struct shm_info
*host_shm_info
)
2630 struct target_shm_info
*target_shm_info
;
2631 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2632 return -TARGET_EFAULT
;
2633 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2634 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2635 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2636 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2637 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2638 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2639 unlock_user_struct(target_shm_info
, target_addr
, 1);
2643 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2645 struct shmid_ds dsarg
;
2646 struct shminfo shminfo
;
2647 struct shm_info shm_info
;
2648 abi_long ret
= -TARGET_EINVAL
;
2656 if (target_to_host_shmid_ds(&dsarg
, buf
))
2657 return -TARGET_EFAULT
;
2658 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2659 if (host_to_target_shmid_ds(buf
, &dsarg
))
2660 return -TARGET_EFAULT
;
2663 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2664 if (host_to_target_shminfo(buf
, &shminfo
))
2665 return -TARGET_EFAULT
;
2668 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2669 if (host_to_target_shm_info(buf
, &shm_info
))
2670 return -TARGET_EFAULT
;
2675 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2682 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2686 struct shmid_ds shm_info
;
2689 /* find out the length of the shared memory segment */
2690 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2691 if (is_error(ret
)) {
2692 /* can't get length, bail out */
2699 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2701 abi_ulong mmap_start
;
2703 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2705 if (mmap_start
== -1) {
2707 host_raddr
= (void *)-1;
2709 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2712 if (host_raddr
== (void *)-1) {
2714 return get_errno((long)host_raddr
);
2716 raddr
=h2g((unsigned long)host_raddr
);
2718 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2719 PAGE_VALID
| PAGE_READ
|
2720 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2722 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2723 if (shm_regions
[i
].start
== 0) {
2724 shm_regions
[i
].start
= raddr
;
2725 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2735 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2739 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2740 if (shm_regions
[i
].start
== shmaddr
) {
2741 shm_regions
[i
].start
= 0;
2742 page_set_flags(shmaddr
, shm_regions
[i
].size
, 0);
2747 return get_errno(shmdt(g2h(shmaddr
)));
2750 #ifdef TARGET_NR_ipc
2751 /* ??? This only works with linear mappings. */
2752 /* do_ipc() must return target values and target errnos. */
2753 static abi_long
do_ipc(unsigned int call
, int first
,
2754 int second
, int third
,
2755 abi_long ptr
, abi_long fifth
)
2760 version
= call
>> 16;
2765 ret
= do_semop(first
, ptr
, second
);
2769 ret
= get_errno(semget(first
, second
, third
));
2773 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2777 ret
= get_errno(msgget(first
, second
));
2781 ret
= do_msgsnd(first
, ptr
, second
, third
);
2785 ret
= do_msgctl(first
, second
, ptr
);
2792 struct target_ipc_kludge
{
2797 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2798 ret
= -TARGET_EFAULT
;
2802 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2804 unlock_user_struct(tmp
, ptr
, 0);
2808 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2817 raddr
= do_shmat(first
, ptr
, second
);
2818 if (is_error(raddr
))
2819 return get_errno(raddr
);
2820 if (put_user_ual(raddr
, third
))
2821 return -TARGET_EFAULT
;
2825 ret
= -TARGET_EINVAL
;
2830 ret
= do_shmdt(ptr
);
2834 /* IPC_* flag values are the same on all linux platforms */
2835 ret
= get_errno(shmget(first
, second
, third
));
2838 /* IPC_* and SHM_* command values are the same on all linux platforms */
2840 ret
= do_shmctl(first
, second
, third
);
2843 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2844 ret
= -TARGET_ENOSYS
;
2851 /* kernel structure types definitions */
2854 #define STRUCT(name, ...) STRUCT_ ## name,
2855 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2857 #include "syscall_types.h"
2860 #undef STRUCT_SPECIAL
2862 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2863 #define STRUCT_SPECIAL(name)
2864 #include "syscall_types.h"
2866 #undef STRUCT_SPECIAL
2868 typedef struct IOCTLEntry
{
2869 unsigned int target_cmd
;
2870 unsigned int host_cmd
;
2873 const argtype arg_type
[5];
2876 #define IOC_R 0x0001
2877 #define IOC_W 0x0002
2878 #define IOC_RW (IOC_R | IOC_W)
2880 #define MAX_STRUCT_SIZE 4096
2882 static IOCTLEntry ioctl_entries
[] = {
2883 #define IOCTL(cmd, access, ...) \
2884 { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
2889 /* ??? Implement proper locking for ioctls. */
2890 /* do_ioctl() Must return target values and target errnos. */
2891 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
2893 const IOCTLEntry
*ie
;
2894 const argtype
*arg_type
;
2896 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
2902 if (ie
->target_cmd
== 0) {
2903 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
2904 return -TARGET_ENOSYS
;
2906 if (ie
->target_cmd
== cmd
)
2910 arg_type
= ie
->arg_type
;
2912 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
2914 switch(arg_type
[0]) {
2917 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
2922 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
2926 target_size
= thunk_type_size(arg_type
, 0);
2927 switch(ie
->access
) {
2929 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2930 if (!is_error(ret
)) {
2931 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2933 return -TARGET_EFAULT
;
2934 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2935 unlock_user(argptr
, arg
, target_size
);
2939 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2941 return -TARGET_EFAULT
;
2942 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2943 unlock_user(argptr
, arg
, 0);
2944 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2948 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2950 return -TARGET_EFAULT
;
2951 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2952 unlock_user(argptr
, arg
, 0);
2953 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2954 if (!is_error(ret
)) {
2955 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2957 return -TARGET_EFAULT
;
2958 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2959 unlock_user(argptr
, arg
, target_size
);
2965 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2966 (long)cmd
, arg_type
[0]);
2967 ret
= -TARGET_ENOSYS
;
2973 static const bitmask_transtbl iflag_tbl
[] = {
2974 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
2975 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
2976 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
2977 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
2978 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
2979 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
2980 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
2981 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
2982 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
2983 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
2984 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
2985 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
2986 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
2987 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
2991 static const bitmask_transtbl oflag_tbl
[] = {
2992 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
2993 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
2994 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
2995 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
2996 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
2997 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
2998 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
2999 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3000 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3001 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3002 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3003 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3004 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3005 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3006 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3007 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3008 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3009 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3010 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3011 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3012 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3013 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3014 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3015 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3019 static const bitmask_transtbl cflag_tbl
[] = {
3020 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3021 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3022 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3023 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3024 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3025 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3026 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3027 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3028 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3029 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3030 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3031 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3032 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3033 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3034 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3035 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3036 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3037 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3038 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3039 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3040 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3041 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3042 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3043 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3044 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3045 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3046 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3047 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3048 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3049 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3050 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3054 static const bitmask_transtbl lflag_tbl
[] = {
3055 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3056 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3057 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3058 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3059 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3060 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3061 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3062 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3063 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3064 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3065 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3066 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3067 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3068 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3069 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3073 static void target_to_host_termios (void *dst
, const void *src
)
3075 struct host_termios
*host
= dst
;
3076 const struct target_termios
*target
= src
;
3079 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3081 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3083 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3085 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3086 host
->c_line
= target
->c_line
;
3088 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3089 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3090 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3091 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3092 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3093 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3094 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3095 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3096 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3097 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3098 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3099 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3100 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3101 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3102 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3103 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3104 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3105 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3108 static void host_to_target_termios (void *dst
, const void *src
)
3110 struct target_termios
*target
= dst
;
3111 const struct host_termios
*host
= src
;
3114 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3116 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3118 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3120 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3121 target
->c_line
= host
->c_line
;
3123 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3124 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3125 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3126 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3127 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3128 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3129 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3130 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3131 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3132 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3133 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3134 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3135 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3136 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3137 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3138 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3139 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3140 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3143 static const StructEntry struct_termios_def
= {
3144 .convert
= { host_to_target_termios
, target_to_host_termios
},
3145 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3146 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3149 static bitmask_transtbl mmap_flags_tbl
[] = {
3150 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3151 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3152 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3153 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3154 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3155 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3156 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3157 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3161 #if defined(TARGET_I386)
3163 /* NOTE: there is really one LDT for all the threads */
3164 static uint8_t *ldt_table
;
3166 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3173 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3174 if (size
> bytecount
)
3176 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3178 return -TARGET_EFAULT
;
3179 /* ??? Should this by byteswapped? */
3180 memcpy(p
, ldt_table
, size
);
3181 unlock_user(p
, ptr
, size
);
3185 /* XXX: add locking support */
3186 static abi_long
write_ldt(CPUX86State
*env
,
3187 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3189 struct target_modify_ldt_ldt_s ldt_info
;
3190 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3191 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3192 int seg_not_present
, useable
, lm
;
3193 uint32_t *lp
, entry_1
, entry_2
;
3195 if (bytecount
!= sizeof(ldt_info
))
3196 return -TARGET_EINVAL
;
3197 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3198 return -TARGET_EFAULT
;
3199 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3200 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3201 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3202 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3203 unlock_user_struct(target_ldt_info
, ptr
, 0);
3205 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3206 return -TARGET_EINVAL
;
3207 seg_32bit
= ldt_info
.flags
& 1;
3208 contents
= (ldt_info
.flags
>> 1) & 3;
3209 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3210 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3211 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3212 useable
= (ldt_info
.flags
>> 6) & 1;
3216 lm
= (ldt_info
.flags
>> 7) & 1;
3218 if (contents
== 3) {
3220 return -TARGET_EINVAL
;
3221 if (seg_not_present
== 0)
3222 return -TARGET_EINVAL
;
3224 /* allocate the LDT */
3226 env
->ldt
.base
= target_mmap(0,
3227 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3228 PROT_READ
|PROT_WRITE
,
3229 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3230 if (env
->ldt
.base
== -1)
3231 return -TARGET_ENOMEM
;
3232 memset(g2h(env
->ldt
.base
), 0,
3233 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3234 env
->ldt
.limit
= 0xffff;
3235 ldt_table
= g2h(env
->ldt
.base
);
3238 /* NOTE: same code as Linux kernel */
3239 /* Allow LDTs to be cleared by the user. */
3240 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3243 read_exec_only
== 1 &&
3245 limit_in_pages
== 0 &&
3246 seg_not_present
== 1 &&
3254 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3255 (ldt_info
.limit
& 0x0ffff);
3256 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3257 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3258 (ldt_info
.limit
& 0xf0000) |
3259 ((read_exec_only
^ 1) << 9) |
3261 ((seg_not_present
^ 1) << 15) |
3263 (limit_in_pages
<< 23) |
3267 entry_2
|= (useable
<< 20);
3269 /* Install the new entry ... */
3271 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3272 lp
[0] = tswap32(entry_1
);
3273 lp
[1] = tswap32(entry_2
);
3277 /* specific and weird i386 syscalls */
3278 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3279 unsigned long bytecount
)
3285 ret
= read_ldt(ptr
, bytecount
);
3288 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3291 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3294 ret
= -TARGET_ENOSYS
;
3300 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3301 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3303 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3304 struct target_modify_ldt_ldt_s ldt_info
;
3305 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3306 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3307 int seg_not_present
, useable
, lm
;
3308 uint32_t *lp
, entry_1
, entry_2
;
3311 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3312 if (!target_ldt_info
)
3313 return -TARGET_EFAULT
;
3314 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3315 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3316 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3317 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3318 if (ldt_info
.entry_number
== -1) {
3319 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3320 if (gdt_table
[i
] == 0) {
3321 ldt_info
.entry_number
= i
;
3322 target_ldt_info
->entry_number
= tswap32(i
);
3327 unlock_user_struct(target_ldt_info
, ptr
, 1);
3329 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3330 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3331 return -TARGET_EINVAL
;
3332 seg_32bit
= ldt_info
.flags
& 1;
3333 contents
= (ldt_info
.flags
>> 1) & 3;
3334 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3335 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3336 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3337 useable
= (ldt_info
.flags
>> 6) & 1;
3341 lm
= (ldt_info
.flags
>> 7) & 1;
3344 if (contents
== 3) {
3345 if (seg_not_present
== 0)
3346 return -TARGET_EINVAL
;
3349 /* NOTE: same code as Linux kernel */
3350 /* Allow LDTs to be cleared by the user. */
3351 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3352 if ((contents
== 0 &&
3353 read_exec_only
== 1 &&
3355 limit_in_pages
== 0 &&
3356 seg_not_present
== 1 &&
3364 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3365 (ldt_info
.limit
& 0x0ffff);
3366 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3367 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3368 (ldt_info
.limit
& 0xf0000) |
3369 ((read_exec_only
^ 1) << 9) |
3371 ((seg_not_present
^ 1) << 15) |
3373 (limit_in_pages
<< 23) |
3378 /* Install the new entry ... */
3380 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3381 lp
[0] = tswap32(entry_1
);
3382 lp
[1] = tswap32(entry_2
);
3386 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3388 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3389 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3390 uint32_t base_addr
, limit
, flags
;
3391 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3392 int seg_not_present
, useable
, lm
;
3393 uint32_t *lp
, entry_1
, entry_2
;
3395 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3396 if (!target_ldt_info
)
3397 return -TARGET_EFAULT
;
3398 idx
= tswap32(target_ldt_info
->entry_number
);
3399 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3400 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3401 unlock_user_struct(target_ldt_info
, ptr
, 1);
3402 return -TARGET_EINVAL
;
3404 lp
= (uint32_t *)(gdt_table
+ idx
);
3405 entry_1
= tswap32(lp
[0]);
3406 entry_2
= tswap32(lp
[1]);
3408 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3409 contents
= (entry_2
>> 10) & 3;
3410 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3411 seg_32bit
= (entry_2
>> 22) & 1;
3412 limit_in_pages
= (entry_2
>> 23) & 1;
3413 useable
= (entry_2
>> 20) & 1;
3417 lm
= (entry_2
>> 21) & 1;
3419 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3420 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3421 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3422 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3423 base_addr
= (entry_1
>> 16) |
3424 (entry_2
& 0xff000000) |
3425 ((entry_2
& 0xff) << 16);
3426 target_ldt_info
->base_addr
= tswapl(base_addr
);
3427 target_ldt_info
->limit
= tswap32(limit
);
3428 target_ldt_info
->flags
= tswap32(flags
);
3429 unlock_user_struct(target_ldt_info
, ptr
, 1);
3432 #endif /* TARGET_I386 && TARGET_ABI32 */
3434 #ifndef TARGET_ABI32
3435 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3442 case TARGET_ARCH_SET_GS
:
3443 case TARGET_ARCH_SET_FS
:
3444 if (code
== TARGET_ARCH_SET_GS
)
3448 cpu_x86_load_seg(env
, idx
, 0);
3449 env
->segs
[idx
].base
= addr
;
3451 case TARGET_ARCH_GET_GS
:
3452 case TARGET_ARCH_GET_FS
:
3453 if (code
== TARGET_ARCH_GET_GS
)
3457 val
= env
->segs
[idx
].base
;
3458 if (put_user(val
, addr
, abi_ulong
))
3459 return -TARGET_EFAULT
;
3462 ret
= -TARGET_EINVAL
;
3469 #endif /* defined(TARGET_I386) */
3471 #if defined(CONFIG_USE_NPTL)
3473 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3475 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3478 pthread_mutex_t mutex
;
3479 pthread_cond_t cond
;
3482 abi_ulong child_tidptr
;
3483 abi_ulong parent_tidptr
;
3487 static void *clone_func(void *arg
)
3489 new_thread_info
*info
= arg
;
3495 ts
= (TaskState
*)thread_env
->opaque
;
3496 info
->tid
= gettid();
3497 env
->host_tid
= info
->tid
;
3499 if (info
->child_tidptr
)
3500 put_user_u32(info
->tid
, info
->child_tidptr
);
3501 if (info
->parent_tidptr
)
3502 put_user_u32(info
->tid
, info
->parent_tidptr
);
3503 /* Enable signals. */
3504 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3505 /* Signal to the parent that we're ready. */
3506 pthread_mutex_lock(&info
->mutex
);
3507 pthread_cond_broadcast(&info
->cond
);
3508 pthread_mutex_unlock(&info
->mutex
);
3509 /* Wait until the parent has finshed initializing the tls state. */
3510 pthread_mutex_lock(&clone_lock
);
3511 pthread_mutex_unlock(&clone_lock
);
3517 /* this stack is the equivalent of the kernel stack associated with a
3519 #define NEW_STACK_SIZE 8192
3521 static int clone_func(void *arg
)
3523 CPUState
*env
= arg
;
3530 /* do_fork() Must return host values and target errnos (unlike most
3531 do_*() functions). */
3532 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3533 abi_ulong parent_tidptr
, target_ulong newtls
,
3534 abi_ulong child_tidptr
)
3540 #if defined(CONFIG_USE_NPTL)
3541 unsigned int nptl_flags
;
3545 /* Emulate vfork() with fork() */
3546 if (flags
& CLONE_VFORK
)
3547 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3549 if (flags
& CLONE_VM
) {
3550 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3551 #if defined(CONFIG_USE_NPTL)
3552 new_thread_info info
;
3553 pthread_attr_t attr
;
3555 ts
= qemu_mallocz(sizeof(TaskState
) + NEW_STACK_SIZE
);
3556 init_task_state(ts
);
3557 new_stack
= ts
->stack
;
3558 /* we create a new CPU instance. */
3559 new_env
= cpu_copy(env
);
3560 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3563 /* Init regs that differ from the parent. */
3564 cpu_clone_regs(new_env
, newsp
);
3565 new_env
->opaque
= ts
;
3566 ts
->bprm
= parent_ts
->bprm
;
3567 ts
->info
= parent_ts
->info
;
3568 #if defined(CONFIG_USE_NPTL)
3570 flags
&= ~CLONE_NPTL_FLAGS2
;
3572 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3573 ts
->child_tidptr
= child_tidptr
;
3576 if (nptl_flags
& CLONE_SETTLS
)
3577 cpu_set_tls (new_env
, newtls
);
3579 /* Grab a mutex so that thread setup appears atomic. */
3580 pthread_mutex_lock(&clone_lock
);
3582 memset(&info
, 0, sizeof(info
));
3583 pthread_mutex_init(&info
.mutex
, NULL
);
3584 pthread_mutex_lock(&info
.mutex
);
3585 pthread_cond_init(&info
.cond
, NULL
);
3587 if (nptl_flags
& CLONE_CHILD_SETTID
)
3588 info
.child_tidptr
= child_tidptr
;
3589 if (nptl_flags
& CLONE_PARENT_SETTID
)
3590 info
.parent_tidptr
= parent_tidptr
;
3592 ret
= pthread_attr_init(&attr
);
3593 ret
= pthread_attr_setstack(&attr
, new_stack
, NEW_STACK_SIZE
);
3594 /* It is not safe to deliver signals until the child has finished
3595 initializing, so temporarily block all signals. */
3596 sigfillset(&sigmask
);
3597 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3599 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3600 /* TODO: Free new CPU state if thread creation failed. */
3602 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3603 pthread_attr_destroy(&attr
);
3605 /* Wait for the child to initialize. */
3606 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3608 if (flags
& CLONE_PARENT_SETTID
)
3609 put_user_u32(ret
, parent_tidptr
);
3613 pthread_mutex_unlock(&info
.mutex
);
3614 pthread_cond_destroy(&info
.cond
);
3615 pthread_mutex_destroy(&info
.mutex
);
3616 pthread_mutex_unlock(&clone_lock
);
3618 if (flags
& CLONE_NPTL_FLAGS2
)
3620 /* This is probably going to die very quickly, but do it anyway. */
3622 ret
= __clone2(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3624 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3628 /* if no CLONE_VM, we consider it is a fork */
3629 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3634 /* Child Process. */
3635 cpu_clone_regs(env
, newsp
);
3637 #if defined(CONFIG_USE_NPTL)
3638 /* There is a race condition here. The parent process could
3639 theoretically read the TID in the child process before the child
3640 tid is set. This would require using either ptrace
3641 (not implemented) or having *_tidptr to point at a shared memory
3642 mapping. We can't repeat the spinlock hack used above because
3643 the child process gets its own copy of the lock. */
3644 if (flags
& CLONE_CHILD_SETTID
)
3645 put_user_u32(gettid(), child_tidptr
);
3646 if (flags
& CLONE_PARENT_SETTID
)
3647 put_user_u32(gettid(), parent_tidptr
);
3648 ts
= (TaskState
*)env
->opaque
;
3649 if (flags
& CLONE_SETTLS
)
3650 cpu_set_tls (env
, newtls
);
3651 if (flags
& CLONE_CHILD_CLEARTID
)
3652 ts
->child_tidptr
= child_tidptr
;
3661 /* warning : doesn't handle linux specific flags... */
3662 static int target_to_host_fcntl_cmd(int cmd
)
3665 case TARGET_F_DUPFD
:
3666 case TARGET_F_GETFD
:
3667 case TARGET_F_SETFD
:
3668 case TARGET_F_GETFL
:
3669 case TARGET_F_SETFL
:
3671 case TARGET_F_GETLK
:
3673 case TARGET_F_SETLK
:
3675 case TARGET_F_SETLKW
:
3677 case TARGET_F_GETOWN
:
3679 case TARGET_F_SETOWN
:
3681 case TARGET_F_GETSIG
:
3683 case TARGET_F_SETSIG
:
3685 #if TARGET_ABI_BITS == 32
3686 case TARGET_F_GETLK64
:
3688 case TARGET_F_SETLK64
:
3690 case TARGET_F_SETLKW64
:
3693 case TARGET_F_SETLEASE
:
3695 case TARGET_F_GETLEASE
:
3697 #ifdef F_DUPFD_CLOEXEC
3698 case TARGET_F_DUPFD_CLOEXEC
:
3699 return F_DUPFD_CLOEXEC
;
3701 case TARGET_F_NOTIFY
:
3704 return -TARGET_EINVAL
;
3706 return -TARGET_EINVAL
;
3709 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3712 struct target_flock
*target_fl
;
3713 struct flock64 fl64
;
3714 struct target_flock64
*target_fl64
;
3716 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
3718 if (host_cmd
== -TARGET_EINVAL
)
3722 case TARGET_F_GETLK
:
3723 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3724 return -TARGET_EFAULT
;
3725 fl
.l_type
= tswap16(target_fl
->l_type
);
3726 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3727 fl
.l_start
= tswapl(target_fl
->l_start
);
3728 fl
.l_len
= tswapl(target_fl
->l_len
);
3729 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3730 unlock_user_struct(target_fl
, arg
, 0);
3731 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3733 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3734 return -TARGET_EFAULT
;
3735 target_fl
->l_type
= tswap16(fl
.l_type
);
3736 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3737 target_fl
->l_start
= tswapl(fl
.l_start
);
3738 target_fl
->l_len
= tswapl(fl
.l_len
);
3739 target_fl
->l_pid
= tswap32(fl
.l_pid
);
3740 unlock_user_struct(target_fl
, arg
, 1);
3744 case TARGET_F_SETLK
:
3745 case TARGET_F_SETLKW
:
3746 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3747 return -TARGET_EFAULT
;
3748 fl
.l_type
= tswap16(target_fl
->l_type
);
3749 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3750 fl
.l_start
= tswapl(target_fl
->l_start
);
3751 fl
.l_len
= tswapl(target_fl
->l_len
);
3752 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3753 unlock_user_struct(target_fl
, arg
, 0);
3754 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3757 case TARGET_F_GETLK64
:
3758 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3759 return -TARGET_EFAULT
;
3760 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3761 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3762 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3763 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3764 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3765 unlock_user_struct(target_fl64
, arg
, 0);
3766 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3768 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3769 return -TARGET_EFAULT
;
3770 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3771 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3772 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3773 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3774 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
3775 unlock_user_struct(target_fl64
, arg
, 1);
3778 case TARGET_F_SETLK64
:
3779 case TARGET_F_SETLKW64
:
3780 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3781 return -TARGET_EFAULT
;
3782 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3783 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3784 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3785 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3786 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3787 unlock_user_struct(target_fl64
, arg
, 0);
3788 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3791 case TARGET_F_GETFL
:
3792 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3794 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
3798 case TARGET_F_SETFL
:
3799 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
3802 case TARGET_F_SETOWN
:
3803 case TARGET_F_GETOWN
:
3804 case TARGET_F_SETSIG
:
3805 case TARGET_F_GETSIG
:
3806 case TARGET_F_SETLEASE
:
3807 case TARGET_F_GETLEASE
:
3808 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3812 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3820 static inline int high2lowuid(int uid
)
3828 static inline int high2lowgid(int gid
)
3836 static inline int low2highuid(int uid
)
3838 if ((int16_t)uid
== -1)
3844 static inline int low2highgid(int gid
)
3846 if ((int16_t)gid
== -1)
3852 #endif /* USE_UID16 */
3854 void syscall_init(void)
3857 const argtype
*arg_type
;
3861 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3862 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3863 #include "syscall_types.h"
3865 #undef STRUCT_SPECIAL
3867 /* we patch the ioctl size if necessary. We rely on the fact that
3868 no ioctl has all the bits at '1' in the size field */
3870 while (ie
->target_cmd
!= 0) {
3871 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
3872 TARGET_IOC_SIZEMASK
) {
3873 arg_type
= ie
->arg_type
;
3874 if (arg_type
[0] != TYPE_PTR
) {
3875 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
3880 size
= thunk_type_size(arg_type
, 0);
3881 ie
->target_cmd
= (ie
->target_cmd
&
3882 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
3883 (size
<< TARGET_IOC_SIZESHIFT
);
3886 /* Build target_to_host_errno_table[] table from
3887 * host_to_target_errno_table[]. */
3888 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
3889 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
3891 /* automatic consistency check if same arch */
3892 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3893 (defined(__x86_64__) && defined(TARGET_X86_64))
3894 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
3895 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3896 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
3903 #if TARGET_ABI_BITS == 32
3904 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
3906 #ifdef TARGET_WORDS_BIGENDIAN
3907 return ((uint64_t)word0
<< 32) | word1
;
3909 return ((uint64_t)word1
<< 32) | word0
;
3912 #else /* TARGET_ABI_BITS == 32 */
3913 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
3917 #endif /* TARGET_ABI_BITS != 32 */
3919 #ifdef TARGET_NR_truncate64
3920 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
3926 if (((CPUARMState
*)cpu_env
)->eabi
)
3932 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
3936 #ifdef TARGET_NR_ftruncate64
3937 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
3943 if (((CPUARMState
*)cpu_env
)->eabi
)
3949 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
3953 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
3954 abi_ulong target_addr
)
3956 struct target_timespec
*target_ts
;
3958 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
3959 return -TARGET_EFAULT
;
3960 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
3961 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
3962 unlock_user_struct(target_ts
, target_addr
, 0);
3966 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
3967 struct timespec
*host_ts
)
3969 struct target_timespec
*target_ts
;
3971 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
3972 return -TARGET_EFAULT
;
3973 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
3974 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
3975 unlock_user_struct(target_ts
, target_addr
, 1);
3979 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3980 static inline abi_long
host_to_target_stat64(void *cpu_env
,
3981 abi_ulong target_addr
,
3982 struct stat
*host_st
)
3985 if (((CPUARMState
*)cpu_env
)->eabi
) {
3986 struct target_eabi_stat64
*target_st
;
3988 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3989 return -TARGET_EFAULT
;
3990 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
3991 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3992 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3993 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3994 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3996 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3997 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3998 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3999 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4000 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4001 __put_user(host_st
->st_size
, &target_st
->st_size
);
4002 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4003 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4004 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4005 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4006 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4007 unlock_user_struct(target_st
, target_addr
, 1);
4011 #if (TARGET_LONG_BITS == 64) && (!defined(TARGET_ALPHA))
4012 struct target_stat
*target_st
;
4014 struct target_stat64
*target_st
;
4017 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4018 return -TARGET_EFAULT
;
4019 memset(target_st
, 0, sizeof(*target_st
));
4020 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4021 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4022 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4023 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4025 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4026 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4027 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4028 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4029 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4030 /* XXX: better use of kernel struct */
4031 __put_user(host_st
->st_size
, &target_st
->st_size
);
4032 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4033 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4034 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4035 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4036 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4037 unlock_user_struct(target_st
, target_addr
, 1);
4044 #if defined(CONFIG_USE_NPTL)
4045 /* ??? Using host futex calls even when target atomic operations
4046 are not really atomic probably breaks things. However implementing
4047 futexes locally would make futexes shared between multiple processes
4048 tricky. However they're probably useless because guest atomic
4049 operations won't work either. */
4050 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4051 target_ulong uaddr2
, int val3
)
4053 struct timespec ts
, *pts
;
4056 /* ??? We assume FUTEX_* constants are the same on both host
4058 #ifdef FUTEX_CMD_MASK
4059 base_op
= op
& FUTEX_CMD_MASK
;
4067 target_to_host_timespec(pts
, timeout
);
4071 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4074 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4076 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4078 case FUTEX_CMP_REQUEUE
:
4080 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4081 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4082 But the prototype takes a `struct timespec *'; insert casts
4083 to satisfy the compiler. We do not need to tswap TIMEOUT
4084 since it's not compared to guest memory. */
4085 pts
= (struct timespec
*)(uintptr_t) timeout
;
4086 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4088 (base_op
== FUTEX_CMP_REQUEUE
4092 return -TARGET_ENOSYS
;
4097 /* Map host to target signal numbers for the wait family of syscalls.
4098 Assume all other status bits are the same. */
4099 static int host_to_target_waitstatus(int status
)
4101 if (WIFSIGNALED(status
)) {
4102 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4104 if (WIFSTOPPED(status
)) {
4105 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4111 int get_osversion(void)
4113 static int osversion
;
4114 struct new_utsname buf
;
4119 if (qemu_uname_release
&& *qemu_uname_release
) {
4120 s
= qemu_uname_release
;
4122 if (sys_uname(&buf
))
4127 for (i
= 0; i
< 3; i
++) {
4129 while (*s
>= '0' && *s
<= '9') {
4134 tmp
= (tmp
<< 8) + n
;
4142 /* do_syscall() should always have a single exit point at the end so
4143 that actions, such as logging of syscall results, can be performed.
4144 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4145 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4146 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4147 abi_long arg5
, abi_long arg6
)
4155 gemu_log("syscall %d", num
);
4158 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4161 case TARGET_NR_exit
:
4162 #ifdef CONFIG_USE_NPTL
4163 /* In old applications this may be used to implement _exit(2).
4164 However in threaded applictions it is used for thread termination,
4165 and _exit_group is used for application termination.
4166 Do thread termination if we have more then one thread. */
4167 /* FIXME: This probably breaks if a signal arrives. We should probably
4168 be disabling signals. */
4169 if (first_cpu
->next_cpu
) {
4177 while (p
&& p
!= (CPUState
*)cpu_env
) {
4178 lastp
= &p
->next_cpu
;
4181 /* If we didn't find the CPU for this thread then something is
4185 /* Remove the CPU from the list. */
4186 *lastp
= p
->next_cpu
;
4188 ts
= ((CPUState
*)cpu_env
)->opaque
;
4189 if (ts
->child_tidptr
) {
4190 put_user_u32(0, ts
->child_tidptr
);
4191 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4194 /* TODO: Free CPU state. */
4201 gdb_exit(cpu_env
, arg1
);
4203 ret
= 0; /* avoid warning */
4205 case TARGET_NR_read
:
4209 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4211 ret
= get_errno(read(arg1
, p
, arg3
));
4212 unlock_user(p
, arg2
, ret
);
4215 case TARGET_NR_write
:
4216 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4218 ret
= get_errno(write(arg1
, p
, arg3
));
4219 unlock_user(p
, arg2
, 0);
4221 case TARGET_NR_open
:
4222 if (!(p
= lock_user_string(arg1
)))
4224 ret
= get_errno(open(path(p
),
4225 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4227 unlock_user(p
, arg1
, 0);
4229 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4230 case TARGET_NR_openat
:
4231 if (!(p
= lock_user_string(arg2
)))
4233 ret
= get_errno(sys_openat(arg1
,
4235 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4237 unlock_user(p
, arg2
, 0);
4240 case TARGET_NR_close
:
4241 ret
= get_errno(close(arg1
));
4246 case TARGET_NR_fork
:
4247 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4249 #ifdef TARGET_NR_waitpid
4250 case TARGET_NR_waitpid
:
4253 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4254 if (!is_error(ret
) && arg2
4255 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4260 #ifdef TARGET_NR_waitid
4261 case TARGET_NR_waitid
:
4265 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4266 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4267 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4269 host_to_target_siginfo(p
, &info
);
4270 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4275 #ifdef TARGET_NR_creat /* not on alpha */
4276 case TARGET_NR_creat
:
4277 if (!(p
= lock_user_string(arg1
)))
4279 ret
= get_errno(creat(p
, arg2
));
4280 unlock_user(p
, arg1
, 0);
4283 case TARGET_NR_link
:
4286 p
= lock_user_string(arg1
);
4287 p2
= lock_user_string(arg2
);
4289 ret
= -TARGET_EFAULT
;
4291 ret
= get_errno(link(p
, p2
));
4292 unlock_user(p2
, arg2
, 0);
4293 unlock_user(p
, arg1
, 0);
4296 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4297 case TARGET_NR_linkat
:
4302 p
= lock_user_string(arg2
);
4303 p2
= lock_user_string(arg4
);
4305 ret
= -TARGET_EFAULT
;
4307 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4308 unlock_user(p
, arg2
, 0);
4309 unlock_user(p2
, arg4
, 0);
4313 case TARGET_NR_unlink
:
4314 if (!(p
= lock_user_string(arg1
)))
4316 ret
= get_errno(unlink(p
));
4317 unlock_user(p
, arg1
, 0);
4319 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4320 case TARGET_NR_unlinkat
:
4321 if (!(p
= lock_user_string(arg2
)))
4323 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4324 unlock_user(p
, arg2
, 0);
4327 case TARGET_NR_execve
:
4329 char **argp
, **envp
;
4332 abi_ulong guest_argp
;
4333 abi_ulong guest_envp
;
4339 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4340 if (get_user_ual(addr
, gp
))
4348 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4349 if (get_user_ual(addr
, gp
))
4356 argp
= alloca((argc
+ 1) * sizeof(void *));
4357 envp
= alloca((envc
+ 1) * sizeof(void *));
4359 for (gp
= guest_argp
, q
= argp
; gp
;
4360 gp
+= sizeof(abi_ulong
), q
++) {
4361 if (get_user_ual(addr
, gp
))
4365 if (!(*q
= lock_user_string(addr
)))
4370 for (gp
= guest_envp
, q
= envp
; gp
;
4371 gp
+= sizeof(abi_ulong
), q
++) {
4372 if (get_user_ual(addr
, gp
))
4376 if (!(*q
= lock_user_string(addr
)))
4381 if (!(p
= lock_user_string(arg1
)))
4383 ret
= get_errno(execve(p
, argp
, envp
));
4384 unlock_user(p
, arg1
, 0);
4389 ret
= -TARGET_EFAULT
;
4392 for (gp
= guest_argp
, q
= argp
; *q
;
4393 gp
+= sizeof(abi_ulong
), q
++) {
4394 if (get_user_ual(addr
, gp
)
4397 unlock_user(*q
, addr
, 0);
4399 for (gp
= guest_envp
, q
= envp
; *q
;
4400 gp
+= sizeof(abi_ulong
), q
++) {
4401 if (get_user_ual(addr
, gp
)
4404 unlock_user(*q
, addr
, 0);
4408 case TARGET_NR_chdir
:
4409 if (!(p
= lock_user_string(arg1
)))
4411 ret
= get_errno(chdir(p
));
4412 unlock_user(p
, arg1
, 0);
4414 #ifdef TARGET_NR_time
4415 case TARGET_NR_time
:
4418 ret
= get_errno(time(&host_time
));
4421 && put_user_sal(host_time
, arg1
))
4426 case TARGET_NR_mknod
:
4427 if (!(p
= lock_user_string(arg1
)))
4429 ret
= get_errno(mknod(p
, arg2
, arg3
));
4430 unlock_user(p
, arg1
, 0);
4432 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4433 case TARGET_NR_mknodat
:
4434 if (!(p
= lock_user_string(arg2
)))
4436 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4437 unlock_user(p
, arg2
, 0);
4440 case TARGET_NR_chmod
:
4441 if (!(p
= lock_user_string(arg1
)))
4443 ret
= get_errno(chmod(p
, arg2
));
4444 unlock_user(p
, arg1
, 0);
4446 #ifdef TARGET_NR_break
4447 case TARGET_NR_break
:
4450 #ifdef TARGET_NR_oldstat
4451 case TARGET_NR_oldstat
:
4454 case TARGET_NR_lseek
:
4455 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4457 #ifdef TARGET_NR_getxpid
4458 case TARGET_NR_getxpid
:
4460 case TARGET_NR_getpid
:
4462 ret
= get_errno(getpid());
4464 case TARGET_NR_mount
:
4466 /* need to look at the data field */
4468 p
= lock_user_string(arg1
);
4469 p2
= lock_user_string(arg2
);
4470 p3
= lock_user_string(arg3
);
4471 if (!p
|| !p2
|| !p3
)
4472 ret
= -TARGET_EFAULT
;
4474 /* FIXME - arg5 should be locked, but it isn't clear how to
4475 * do that since it's not guaranteed to be a NULL-terminated
4479 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4481 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4483 unlock_user(p
, arg1
, 0);
4484 unlock_user(p2
, arg2
, 0);
4485 unlock_user(p3
, arg3
, 0);
4488 #ifdef TARGET_NR_umount
4489 case TARGET_NR_umount
:
4490 if (!(p
= lock_user_string(arg1
)))
4492 ret
= get_errno(umount(p
));
4493 unlock_user(p
, arg1
, 0);
4496 #ifdef TARGET_NR_stime /* not on alpha */
4497 case TARGET_NR_stime
:
4500 if (get_user_sal(host_time
, arg1
))
4502 ret
= get_errno(stime(&host_time
));
4506 case TARGET_NR_ptrace
:
4508 #ifdef TARGET_NR_alarm /* not on alpha */
4509 case TARGET_NR_alarm
:
4513 #ifdef TARGET_NR_oldfstat
4514 case TARGET_NR_oldfstat
:
4517 #ifdef TARGET_NR_pause /* not on alpha */
4518 case TARGET_NR_pause
:
4519 ret
= get_errno(pause());
4522 #ifdef TARGET_NR_utime
4523 case TARGET_NR_utime
:
4525 struct utimbuf tbuf
, *host_tbuf
;
4526 struct target_utimbuf
*target_tbuf
;
4528 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4530 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4531 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4532 unlock_user_struct(target_tbuf
, arg2
, 0);
4537 if (!(p
= lock_user_string(arg1
)))
4539 ret
= get_errno(utime(p
, host_tbuf
));
4540 unlock_user(p
, arg1
, 0);
4544 case TARGET_NR_utimes
:
4546 struct timeval
*tvp
, tv
[2];
4548 if (copy_from_user_timeval(&tv
[0], arg2
)
4549 || copy_from_user_timeval(&tv
[1],
4550 arg2
+ sizeof(struct target_timeval
)))
4556 if (!(p
= lock_user_string(arg1
)))
4558 ret
= get_errno(utimes(p
, tvp
));
4559 unlock_user(p
, arg1
, 0);
4562 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4563 case TARGET_NR_futimesat
:
4565 struct timeval
*tvp
, tv
[2];
4567 if (copy_from_user_timeval(&tv
[0], arg3
)
4568 || copy_from_user_timeval(&tv
[1],
4569 arg3
+ sizeof(struct target_timeval
)))
4575 if (!(p
= lock_user_string(arg2
)))
4577 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4578 unlock_user(p
, arg2
, 0);
4582 #ifdef TARGET_NR_stty
4583 case TARGET_NR_stty
:
4586 #ifdef TARGET_NR_gtty
4587 case TARGET_NR_gtty
:
4590 case TARGET_NR_access
:
4591 if (!(p
= lock_user_string(arg1
)))
4593 ret
= get_errno(access(path(p
), arg2
));
4594 unlock_user(p
, arg1
, 0);
4596 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4597 case TARGET_NR_faccessat
:
4598 if (!(p
= lock_user_string(arg2
)))
4600 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4601 unlock_user(p
, arg2
, 0);
4604 #ifdef TARGET_NR_nice /* not on alpha */
4605 case TARGET_NR_nice
:
4606 ret
= get_errno(nice(arg1
));
4609 #ifdef TARGET_NR_ftime
4610 case TARGET_NR_ftime
:
4613 case TARGET_NR_sync
:
4617 case TARGET_NR_kill
:
4618 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4620 case TARGET_NR_rename
:
4623 p
= lock_user_string(arg1
);
4624 p2
= lock_user_string(arg2
);
4626 ret
= -TARGET_EFAULT
;
4628 ret
= get_errno(rename(p
, p2
));
4629 unlock_user(p2
, arg2
, 0);
4630 unlock_user(p
, arg1
, 0);
4633 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4634 case TARGET_NR_renameat
:
4637 p
= lock_user_string(arg2
);
4638 p2
= lock_user_string(arg4
);
4640 ret
= -TARGET_EFAULT
;
4642 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4643 unlock_user(p2
, arg4
, 0);
4644 unlock_user(p
, arg2
, 0);
4648 case TARGET_NR_mkdir
:
4649 if (!(p
= lock_user_string(arg1
)))
4651 ret
= get_errno(mkdir(p
, arg2
));
4652 unlock_user(p
, arg1
, 0);
4654 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4655 case TARGET_NR_mkdirat
:
4656 if (!(p
= lock_user_string(arg2
)))
4658 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4659 unlock_user(p
, arg2
, 0);
4662 case TARGET_NR_rmdir
:
4663 if (!(p
= lock_user_string(arg1
)))
4665 ret
= get_errno(rmdir(p
));
4666 unlock_user(p
, arg1
, 0);
4669 ret
= get_errno(dup(arg1
));
4671 case TARGET_NR_pipe
:
4672 ret
= do_pipe(cpu_env
, arg1
, 0);
4674 #ifdef TARGET_NR_pipe2
4675 case TARGET_NR_pipe2
:
4676 ret
= do_pipe(cpu_env
, arg1
, arg2
);
4679 case TARGET_NR_times
:
4681 struct target_tms
*tmsp
;
4683 ret
= get_errno(times(&tms
));
4685 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4688 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4689 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4690 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4691 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4694 ret
= host_to_target_clock_t(ret
);
4697 #ifdef TARGET_NR_prof
4698 case TARGET_NR_prof
:
4701 #ifdef TARGET_NR_signal
4702 case TARGET_NR_signal
:
4705 case TARGET_NR_acct
:
4707 ret
= get_errno(acct(NULL
));
4709 if (!(p
= lock_user_string(arg1
)))
4711 ret
= get_errno(acct(path(p
)));
4712 unlock_user(p
, arg1
, 0);
4715 #ifdef TARGET_NR_umount2 /* not on alpha */
4716 case TARGET_NR_umount2
:
4717 if (!(p
= lock_user_string(arg1
)))
4719 ret
= get_errno(umount2(p
, arg2
));
4720 unlock_user(p
, arg1
, 0);
4723 #ifdef TARGET_NR_lock
4724 case TARGET_NR_lock
:
4727 case TARGET_NR_ioctl
:
4728 ret
= do_ioctl(arg1
, arg2
, arg3
);
4730 case TARGET_NR_fcntl
:
4731 ret
= do_fcntl(arg1
, arg2
, arg3
);
4733 #ifdef TARGET_NR_mpx
4737 case TARGET_NR_setpgid
:
4738 ret
= get_errno(setpgid(arg1
, arg2
));
4740 #ifdef TARGET_NR_ulimit
4741 case TARGET_NR_ulimit
:
4744 #ifdef TARGET_NR_oldolduname
4745 case TARGET_NR_oldolduname
:
4748 case TARGET_NR_umask
:
4749 ret
= get_errno(umask(arg1
));
4751 case TARGET_NR_chroot
:
4752 if (!(p
= lock_user_string(arg1
)))
4754 ret
= get_errno(chroot(p
));
4755 unlock_user(p
, arg1
, 0);
4757 case TARGET_NR_ustat
:
4759 case TARGET_NR_dup2
:
4760 ret
= get_errno(dup2(arg1
, arg2
));
4762 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
4763 case TARGET_NR_dup3
:
4764 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
4767 #ifdef TARGET_NR_getppid /* not on alpha */
4768 case TARGET_NR_getppid
:
4769 ret
= get_errno(getppid());
4772 case TARGET_NR_getpgrp
:
4773 ret
= get_errno(getpgrp());
4775 case TARGET_NR_setsid
:
4776 ret
= get_errno(setsid());
4778 #ifdef TARGET_NR_sigaction
4779 case TARGET_NR_sigaction
:
4781 #if defined(TARGET_ALPHA)
4782 struct target_sigaction act
, oact
, *pact
= 0;
4783 struct target_old_sigaction
*old_act
;
4785 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4787 act
._sa_handler
= old_act
->_sa_handler
;
4788 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4789 act
.sa_flags
= old_act
->sa_flags
;
4790 act
.sa_restorer
= 0;
4791 unlock_user_struct(old_act
, arg2
, 0);
4794 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4795 if (!is_error(ret
) && arg3
) {
4796 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4798 old_act
->_sa_handler
= oact
._sa_handler
;
4799 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4800 old_act
->sa_flags
= oact
.sa_flags
;
4801 unlock_user_struct(old_act
, arg3
, 1);
4803 #elif defined(TARGET_MIPS)
4804 struct target_sigaction act
, oact
, *pact
, *old_act
;
4807 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4809 act
._sa_handler
= old_act
->_sa_handler
;
4810 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
4811 act
.sa_flags
= old_act
->sa_flags
;
4812 unlock_user_struct(old_act
, arg2
, 0);
4818 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4820 if (!is_error(ret
) && arg3
) {
4821 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4823 old_act
->_sa_handler
= oact
._sa_handler
;
4824 old_act
->sa_flags
= oact
.sa_flags
;
4825 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
4826 old_act
->sa_mask
.sig
[1] = 0;
4827 old_act
->sa_mask
.sig
[2] = 0;
4828 old_act
->sa_mask
.sig
[3] = 0;
4829 unlock_user_struct(old_act
, arg3
, 1);
4832 struct target_old_sigaction
*old_act
;
4833 struct target_sigaction act
, oact
, *pact
;
4835 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4837 act
._sa_handler
= old_act
->_sa_handler
;
4838 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4839 act
.sa_flags
= old_act
->sa_flags
;
4840 act
.sa_restorer
= old_act
->sa_restorer
;
4841 unlock_user_struct(old_act
, arg2
, 0);
4846 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4847 if (!is_error(ret
) && arg3
) {
4848 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4850 old_act
->_sa_handler
= oact
._sa_handler
;
4851 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4852 old_act
->sa_flags
= oact
.sa_flags
;
4853 old_act
->sa_restorer
= oact
.sa_restorer
;
4854 unlock_user_struct(old_act
, arg3
, 1);
4860 case TARGET_NR_rt_sigaction
:
4862 #if defined(TARGET_ALPHA)
4863 struct target_sigaction act
, oact
, *pact
= 0;
4864 struct target_rt_sigaction
*rt_act
;
4865 /* ??? arg4 == sizeof(sigset_t). */
4867 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
4869 act
._sa_handler
= rt_act
->_sa_handler
;
4870 act
.sa_mask
= rt_act
->sa_mask
;
4871 act
.sa_flags
= rt_act
->sa_flags
;
4872 act
.sa_restorer
= arg5
;
4873 unlock_user_struct(rt_act
, arg2
, 0);
4876 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4877 if (!is_error(ret
) && arg3
) {
4878 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
4880 rt_act
->_sa_handler
= oact
._sa_handler
;
4881 rt_act
->sa_mask
= oact
.sa_mask
;
4882 rt_act
->sa_flags
= oact
.sa_flags
;
4883 unlock_user_struct(rt_act
, arg3
, 1);
4886 struct target_sigaction
*act
;
4887 struct target_sigaction
*oact
;
4890 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
4895 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
4896 ret
= -TARGET_EFAULT
;
4897 goto rt_sigaction_fail
;
4901 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
4904 unlock_user_struct(act
, arg2
, 0);
4906 unlock_user_struct(oact
, arg3
, 1);
4910 #ifdef TARGET_NR_sgetmask /* not on alpha */
4911 case TARGET_NR_sgetmask
:
4914 abi_ulong target_set
;
4915 sigprocmask(0, NULL
, &cur_set
);
4916 host_to_target_old_sigset(&target_set
, &cur_set
);
4921 #ifdef TARGET_NR_ssetmask /* not on alpha */
4922 case TARGET_NR_ssetmask
:
4924 sigset_t set
, oset
, cur_set
;
4925 abi_ulong target_set
= arg1
;
4926 sigprocmask(0, NULL
, &cur_set
);
4927 target_to_host_old_sigset(&set
, &target_set
);
4928 sigorset(&set
, &set
, &cur_set
);
4929 sigprocmask(SIG_SETMASK
, &set
, &oset
);
4930 host_to_target_old_sigset(&target_set
, &oset
);
4935 #ifdef TARGET_NR_sigprocmask
4936 case TARGET_NR_sigprocmask
:
4939 sigset_t set
, oldset
, *set_ptr
;
4943 case TARGET_SIG_BLOCK
:
4946 case TARGET_SIG_UNBLOCK
:
4949 case TARGET_SIG_SETMASK
:
4953 ret
= -TARGET_EINVAL
;
4956 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4958 target_to_host_old_sigset(&set
, p
);
4959 unlock_user(p
, arg2
, 0);
4965 ret
= get_errno(sigprocmask(arg1
, set_ptr
, &oldset
));
4966 if (!is_error(ret
) && arg3
) {
4967 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4969 host_to_target_old_sigset(p
, &oldset
);
4970 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4975 case TARGET_NR_rt_sigprocmask
:
4978 sigset_t set
, oldset
, *set_ptr
;
4982 case TARGET_SIG_BLOCK
:
4985 case TARGET_SIG_UNBLOCK
:
4988 case TARGET_SIG_SETMASK
:
4992 ret
= -TARGET_EINVAL
;
4995 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4997 target_to_host_sigset(&set
, p
);
4998 unlock_user(p
, arg2
, 0);
5004 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5005 if (!is_error(ret
) && arg3
) {
5006 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5008 host_to_target_sigset(p
, &oldset
);
5009 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5013 #ifdef TARGET_NR_sigpending
5014 case TARGET_NR_sigpending
:
5017 ret
= get_errno(sigpending(&set
));
5018 if (!is_error(ret
)) {
5019 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5021 host_to_target_old_sigset(p
, &set
);
5022 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5027 case TARGET_NR_rt_sigpending
:
5030 ret
= get_errno(sigpending(&set
));
5031 if (!is_error(ret
)) {
5032 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5034 host_to_target_sigset(p
, &set
);
5035 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5039 #ifdef TARGET_NR_sigsuspend
5040 case TARGET_NR_sigsuspend
:
5043 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5045 target_to_host_old_sigset(&set
, p
);
5046 unlock_user(p
, arg1
, 0);
5047 ret
= get_errno(sigsuspend(&set
));
5051 case TARGET_NR_rt_sigsuspend
:
5054 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5056 target_to_host_sigset(&set
, p
);
5057 unlock_user(p
, arg1
, 0);
5058 ret
= get_errno(sigsuspend(&set
));
5061 case TARGET_NR_rt_sigtimedwait
:
5064 struct timespec uts
, *puts
;
5067 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5069 target_to_host_sigset(&set
, p
);
5070 unlock_user(p
, arg1
, 0);
5073 target_to_host_timespec(puts
, arg3
);
5077 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5078 if (!is_error(ret
) && arg2
) {
5079 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5081 host_to_target_siginfo(p
, &uinfo
);
5082 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5086 case TARGET_NR_rt_sigqueueinfo
:
5089 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5091 target_to_host_siginfo(&uinfo
, p
);
5092 unlock_user(p
, arg1
, 0);
5093 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5096 #ifdef TARGET_NR_sigreturn
5097 case TARGET_NR_sigreturn
:
5098 /* NOTE: ret is eax, so not transcoding must be done */
5099 ret
= do_sigreturn(cpu_env
);
5102 case TARGET_NR_rt_sigreturn
:
5103 /* NOTE: ret is eax, so not transcoding must be done */
5104 ret
= do_rt_sigreturn(cpu_env
);
5106 case TARGET_NR_sethostname
:
5107 if (!(p
= lock_user_string(arg1
)))
5109 ret
= get_errno(sethostname(p
, arg2
));
5110 unlock_user(p
, arg1
, 0);
5112 case TARGET_NR_setrlimit
:
5114 /* XXX: convert resource ? */
5115 int resource
= arg1
;
5116 struct target_rlimit
*target_rlim
;
5118 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5120 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
5121 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
5122 unlock_user_struct(target_rlim
, arg2
, 0);
5123 ret
= get_errno(setrlimit(resource
, &rlim
));
5126 case TARGET_NR_getrlimit
:
5128 /* XXX: convert resource ? */
5129 int resource
= arg1
;
5130 struct target_rlimit
*target_rlim
;
5133 ret
= get_errno(getrlimit(resource
, &rlim
));
5134 if (!is_error(ret
)) {
5135 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5137 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
5138 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
5139 unlock_user_struct(target_rlim
, arg2
, 1);
5143 case TARGET_NR_getrusage
:
5145 struct rusage rusage
;
5146 ret
= get_errno(getrusage(arg1
, &rusage
));
5147 if (!is_error(ret
)) {
5148 host_to_target_rusage(arg2
, &rusage
);
5152 case TARGET_NR_gettimeofday
:
5155 ret
= get_errno(gettimeofday(&tv
, NULL
));
5156 if (!is_error(ret
)) {
5157 if (copy_to_user_timeval(arg1
, &tv
))
5162 case TARGET_NR_settimeofday
:
5165 if (copy_from_user_timeval(&tv
, arg1
))
5167 ret
= get_errno(settimeofday(&tv
, NULL
));
5170 #ifdef TARGET_NR_select
5171 case TARGET_NR_select
:
5173 struct target_sel_arg_struct
*sel
;
5174 abi_ulong inp
, outp
, exp
, tvp
;
5177 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5179 nsel
= tswapl(sel
->n
);
5180 inp
= tswapl(sel
->inp
);
5181 outp
= tswapl(sel
->outp
);
5182 exp
= tswapl(sel
->exp
);
5183 tvp
= tswapl(sel
->tvp
);
5184 unlock_user_struct(sel
, arg1
, 0);
5185 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5189 case TARGET_NR_symlink
:
5192 p
= lock_user_string(arg1
);
5193 p2
= lock_user_string(arg2
);
5195 ret
= -TARGET_EFAULT
;
5197 ret
= get_errno(symlink(p
, p2
));
5198 unlock_user(p2
, arg2
, 0);
5199 unlock_user(p
, arg1
, 0);
5202 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5203 case TARGET_NR_symlinkat
:
5206 p
= lock_user_string(arg1
);
5207 p2
= lock_user_string(arg3
);
5209 ret
= -TARGET_EFAULT
;
5211 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5212 unlock_user(p2
, arg3
, 0);
5213 unlock_user(p
, arg1
, 0);
5217 #ifdef TARGET_NR_oldlstat
5218 case TARGET_NR_oldlstat
:
5221 case TARGET_NR_readlink
:
5224 p
= lock_user_string(arg1
);
5225 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5227 ret
= -TARGET_EFAULT
;
5229 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5230 char real
[PATH_MAX
];
5231 temp
= realpath(exec_path
,real
);
5232 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5233 snprintf((char *)p2
, arg3
, "%s", real
);
5236 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5238 unlock_user(p2
, arg2
, ret
);
5239 unlock_user(p
, arg1
, 0);
5242 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5243 case TARGET_NR_readlinkat
:
5246 p
= lock_user_string(arg2
);
5247 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5249 ret
= -TARGET_EFAULT
;
5251 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5252 unlock_user(p2
, arg3
, ret
);
5253 unlock_user(p
, arg2
, 0);
5257 #ifdef TARGET_NR_uselib
5258 case TARGET_NR_uselib
:
5261 #ifdef TARGET_NR_swapon
5262 case TARGET_NR_swapon
:
5263 if (!(p
= lock_user_string(arg1
)))
5265 ret
= get_errno(swapon(p
, arg2
));
5266 unlock_user(p
, arg1
, 0);
5269 case TARGET_NR_reboot
:
5271 #ifdef TARGET_NR_readdir
5272 case TARGET_NR_readdir
:
5275 #ifdef TARGET_NR_mmap
5276 case TARGET_NR_mmap
:
5277 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5280 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5281 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5289 unlock_user(v
, arg1
, 0);
5290 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5291 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5295 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5296 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5302 #ifdef TARGET_NR_mmap2
5303 case TARGET_NR_mmap2
:
5305 #define MMAP_SHIFT 12
5307 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5308 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5310 arg6
<< MMAP_SHIFT
));
5313 case TARGET_NR_munmap
:
5314 ret
= get_errno(target_munmap(arg1
, arg2
));
5316 case TARGET_NR_mprotect
:
5317 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5319 #ifdef TARGET_NR_mremap
5320 case TARGET_NR_mremap
:
5321 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5324 /* ??? msync/mlock/munlock are broken for softmmu. */
5325 #ifdef TARGET_NR_msync
5326 case TARGET_NR_msync
:
5327 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5330 #ifdef TARGET_NR_mlock
5331 case TARGET_NR_mlock
:
5332 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5335 #ifdef TARGET_NR_munlock
5336 case TARGET_NR_munlock
:
5337 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5340 #ifdef TARGET_NR_mlockall
5341 case TARGET_NR_mlockall
:
5342 ret
= get_errno(mlockall(arg1
));
5345 #ifdef TARGET_NR_munlockall
5346 case TARGET_NR_munlockall
:
5347 ret
= get_errno(munlockall());
5350 case TARGET_NR_truncate
:
5351 if (!(p
= lock_user_string(arg1
)))
5353 ret
= get_errno(truncate(p
, arg2
));
5354 unlock_user(p
, arg1
, 0);
5356 case TARGET_NR_ftruncate
:
5357 ret
= get_errno(ftruncate(arg1
, arg2
));
5359 case TARGET_NR_fchmod
:
5360 ret
= get_errno(fchmod(arg1
, arg2
));
5362 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5363 case TARGET_NR_fchmodat
:
5364 if (!(p
= lock_user_string(arg2
)))
5366 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5367 unlock_user(p
, arg2
, 0);
5370 case TARGET_NR_getpriority
:
5371 /* libc does special remapping of the return value of
5372 * sys_getpriority() so it's just easiest to call
5373 * sys_getpriority() directly rather than through libc. */
5374 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5376 case TARGET_NR_setpriority
:
5377 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5379 #ifdef TARGET_NR_profil
5380 case TARGET_NR_profil
:
5383 case TARGET_NR_statfs
:
5384 if (!(p
= lock_user_string(arg1
)))
5386 ret
= get_errno(statfs(path(p
), &stfs
));
5387 unlock_user(p
, arg1
, 0);
5389 if (!is_error(ret
)) {
5390 struct target_statfs
*target_stfs
;
5392 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5394 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5395 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5396 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5397 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5398 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5399 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5400 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5401 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5402 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5403 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5404 unlock_user_struct(target_stfs
, arg2
, 1);
5407 case TARGET_NR_fstatfs
:
5408 ret
= get_errno(fstatfs(arg1
, &stfs
));
5409 goto convert_statfs
;
5410 #ifdef TARGET_NR_statfs64
5411 case TARGET_NR_statfs64
:
5412 if (!(p
= lock_user_string(arg1
)))
5414 ret
= get_errno(statfs(path(p
), &stfs
));
5415 unlock_user(p
, arg1
, 0);
5417 if (!is_error(ret
)) {
5418 struct target_statfs64
*target_stfs
;
5420 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5422 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5423 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5424 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5425 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5426 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5427 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5428 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5429 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5430 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5431 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5432 unlock_user_struct(target_stfs
, arg3
, 1);
5435 case TARGET_NR_fstatfs64
:
5436 ret
= get_errno(fstatfs(arg1
, &stfs
));
5437 goto convert_statfs64
;
5439 #ifdef TARGET_NR_ioperm
5440 case TARGET_NR_ioperm
:
5443 #ifdef TARGET_NR_socketcall
5444 case TARGET_NR_socketcall
:
5445 ret
= do_socketcall(arg1
, arg2
);
5448 #ifdef TARGET_NR_accept
5449 case TARGET_NR_accept
:
5450 ret
= do_accept(arg1
, arg2
, arg3
);
5453 #ifdef TARGET_NR_bind
5454 case TARGET_NR_bind
:
5455 ret
= do_bind(arg1
, arg2
, arg3
);
5458 #ifdef TARGET_NR_connect
5459 case TARGET_NR_connect
:
5460 ret
= do_connect(arg1
, arg2
, arg3
);
5463 #ifdef TARGET_NR_getpeername
5464 case TARGET_NR_getpeername
:
5465 ret
= do_getpeername(arg1
, arg2
, arg3
);
5468 #ifdef TARGET_NR_getsockname
5469 case TARGET_NR_getsockname
:
5470 ret
= do_getsockname(arg1
, arg2
, arg3
);
5473 #ifdef TARGET_NR_getsockopt
5474 case TARGET_NR_getsockopt
:
5475 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5478 #ifdef TARGET_NR_listen
5479 case TARGET_NR_listen
:
5480 ret
= get_errno(listen(arg1
, arg2
));
5483 #ifdef TARGET_NR_recv
5484 case TARGET_NR_recv
:
5485 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5488 #ifdef TARGET_NR_recvfrom
5489 case TARGET_NR_recvfrom
:
5490 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5493 #ifdef TARGET_NR_recvmsg
5494 case TARGET_NR_recvmsg
:
5495 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5498 #ifdef TARGET_NR_send
5499 case TARGET_NR_send
:
5500 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5503 #ifdef TARGET_NR_sendmsg
5504 case TARGET_NR_sendmsg
:
5505 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5508 #ifdef TARGET_NR_sendto
5509 case TARGET_NR_sendto
:
5510 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5513 #ifdef TARGET_NR_shutdown
5514 case TARGET_NR_shutdown
:
5515 ret
= get_errno(shutdown(arg1
, arg2
));
5518 #ifdef TARGET_NR_socket
5519 case TARGET_NR_socket
:
5520 ret
= do_socket(arg1
, arg2
, arg3
);
5523 #ifdef TARGET_NR_socketpair
5524 case TARGET_NR_socketpair
:
5525 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5528 #ifdef TARGET_NR_setsockopt
5529 case TARGET_NR_setsockopt
:
5530 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5534 case TARGET_NR_syslog
:
5535 if (!(p
= lock_user_string(arg2
)))
5537 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5538 unlock_user(p
, arg2
, 0);
5541 case TARGET_NR_setitimer
:
5543 struct itimerval value
, ovalue
, *pvalue
;
5547 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5548 || copy_from_user_timeval(&pvalue
->it_value
,
5549 arg2
+ sizeof(struct target_timeval
)))
5554 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5555 if (!is_error(ret
) && arg3
) {
5556 if (copy_to_user_timeval(arg3
,
5557 &ovalue
.it_interval
)
5558 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5564 case TARGET_NR_getitimer
:
5566 struct itimerval value
;
5568 ret
= get_errno(getitimer(arg1
, &value
));
5569 if (!is_error(ret
) && arg2
) {
5570 if (copy_to_user_timeval(arg2
,
5572 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5578 case TARGET_NR_stat
:
5579 if (!(p
= lock_user_string(arg1
)))
5581 ret
= get_errno(stat(path(p
), &st
));
5582 unlock_user(p
, arg1
, 0);
5584 case TARGET_NR_lstat
:
5585 if (!(p
= lock_user_string(arg1
)))
5587 ret
= get_errno(lstat(path(p
), &st
));
5588 unlock_user(p
, arg1
, 0);
5590 case TARGET_NR_fstat
:
5592 ret
= get_errno(fstat(arg1
, &st
));
5594 if (!is_error(ret
)) {
5595 struct target_stat
*target_st
;
5597 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5599 memset(target_st
, 0, sizeof(*target_st
));
5600 __put_user(st
.st_dev
, &target_st
->st_dev
);
5601 __put_user(st
.st_ino
, &target_st
->st_ino
);
5602 __put_user(st
.st_mode
, &target_st
->st_mode
);
5603 __put_user(st
.st_uid
, &target_st
->st_uid
);
5604 __put_user(st
.st_gid
, &target_st
->st_gid
);
5605 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5606 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5607 __put_user(st
.st_size
, &target_st
->st_size
);
5608 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5609 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5610 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5611 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5612 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5613 unlock_user_struct(target_st
, arg2
, 1);
5617 #ifdef TARGET_NR_olduname
5618 case TARGET_NR_olduname
:
5621 #ifdef TARGET_NR_iopl
5622 case TARGET_NR_iopl
:
5625 case TARGET_NR_vhangup
:
5626 ret
= get_errno(vhangup());
5628 #ifdef TARGET_NR_idle
5629 case TARGET_NR_idle
:
5632 #ifdef TARGET_NR_syscall
5633 case TARGET_NR_syscall
:
5634 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5637 case TARGET_NR_wait4
:
5640 abi_long status_ptr
= arg2
;
5641 struct rusage rusage
, *rusage_ptr
;
5642 abi_ulong target_rusage
= arg4
;
5644 rusage_ptr
= &rusage
;
5647 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5648 if (!is_error(ret
)) {
5650 status
= host_to_target_waitstatus(status
);
5651 if (put_user_s32(status
, status_ptr
))
5655 host_to_target_rusage(target_rusage
, &rusage
);
5659 #ifdef TARGET_NR_swapoff
5660 case TARGET_NR_swapoff
:
5661 if (!(p
= lock_user_string(arg1
)))
5663 ret
= get_errno(swapoff(p
));
5664 unlock_user(p
, arg1
, 0);
5667 case TARGET_NR_sysinfo
:
5669 struct target_sysinfo
*target_value
;
5670 struct sysinfo value
;
5671 ret
= get_errno(sysinfo(&value
));
5672 if (!is_error(ret
) && arg1
)
5674 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5676 __put_user(value
.uptime
, &target_value
->uptime
);
5677 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5678 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5679 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5680 __put_user(value
.totalram
, &target_value
->totalram
);
5681 __put_user(value
.freeram
, &target_value
->freeram
);
5682 __put_user(value
.sharedram
, &target_value
->sharedram
);
5683 __put_user(value
.bufferram
, &target_value
->bufferram
);
5684 __put_user(value
.totalswap
, &target_value
->totalswap
);
5685 __put_user(value
.freeswap
, &target_value
->freeswap
);
5686 __put_user(value
.procs
, &target_value
->procs
);
5687 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5688 __put_user(value
.freehigh
, &target_value
->freehigh
);
5689 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5690 unlock_user_struct(target_value
, arg1
, 1);
5694 #ifdef TARGET_NR_ipc
5696 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5699 #ifdef TARGET_NR_semget
5700 case TARGET_NR_semget
:
5701 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5704 #ifdef TARGET_NR_semop
5705 case TARGET_NR_semop
:
5706 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5709 #ifdef TARGET_NR_semctl
5710 case TARGET_NR_semctl
:
5711 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5714 #ifdef TARGET_NR_msgctl
5715 case TARGET_NR_msgctl
:
5716 ret
= do_msgctl(arg1
, arg2
, arg3
);
5719 #ifdef TARGET_NR_msgget
5720 case TARGET_NR_msgget
:
5721 ret
= get_errno(msgget(arg1
, arg2
));
5724 #ifdef TARGET_NR_msgrcv
5725 case TARGET_NR_msgrcv
:
5726 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
5729 #ifdef TARGET_NR_msgsnd
5730 case TARGET_NR_msgsnd
:
5731 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
5734 #ifdef TARGET_NR_shmget
5735 case TARGET_NR_shmget
:
5736 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
5739 #ifdef TARGET_NR_shmctl
5740 case TARGET_NR_shmctl
:
5741 ret
= do_shmctl(arg1
, arg2
, arg3
);
5744 #ifdef TARGET_NR_shmat
5745 case TARGET_NR_shmat
:
5746 ret
= do_shmat(arg1
, arg2
, arg3
);
5749 #ifdef TARGET_NR_shmdt
5750 case TARGET_NR_shmdt
:
5751 ret
= do_shmdt(arg1
);
5754 case TARGET_NR_fsync
:
5755 ret
= get_errno(fsync(arg1
));
5757 case TARGET_NR_clone
:
5758 #if defined(TARGET_SH4)
5759 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
5760 #elif defined(TARGET_CRIS)
5761 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
5763 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
5766 #ifdef __NR_exit_group
5767 /* new thread calls */
5768 case TARGET_NR_exit_group
:
5772 gdb_exit(cpu_env
, arg1
);
5773 ret
= get_errno(exit_group(arg1
));
5776 case TARGET_NR_setdomainname
:
5777 if (!(p
= lock_user_string(arg1
)))
5779 ret
= get_errno(setdomainname(p
, arg2
));
5780 unlock_user(p
, arg1
, 0);
5782 case TARGET_NR_uname
:
5783 /* no need to transcode because we use the linux syscall */
5785 struct new_utsname
* buf
;
5787 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
5789 ret
= get_errno(sys_uname(buf
));
5790 if (!is_error(ret
)) {
5791 /* Overrite the native machine name with whatever is being
5793 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
5794 /* Allow the user to override the reported release. */
5795 if (qemu_uname_release
&& *qemu_uname_release
)
5796 strcpy (buf
->release
, qemu_uname_release
);
5798 unlock_user_struct(buf
, arg1
, 1);
5802 case TARGET_NR_modify_ldt
:
5803 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
5805 #if !defined(TARGET_X86_64)
5806 case TARGET_NR_vm86old
:
5808 case TARGET_NR_vm86
:
5809 ret
= do_vm86(cpu_env
, arg1
, arg2
);
5813 case TARGET_NR_adjtimex
:
5815 #ifdef TARGET_NR_create_module
5816 case TARGET_NR_create_module
:
5818 case TARGET_NR_init_module
:
5819 case TARGET_NR_delete_module
:
5820 #ifdef TARGET_NR_get_kernel_syms
5821 case TARGET_NR_get_kernel_syms
:
5824 case TARGET_NR_quotactl
:
5826 case TARGET_NR_getpgid
:
5827 ret
= get_errno(getpgid(arg1
));
5829 case TARGET_NR_fchdir
:
5830 ret
= get_errno(fchdir(arg1
));
5832 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5833 case TARGET_NR_bdflush
:
5836 #ifdef TARGET_NR_sysfs
5837 case TARGET_NR_sysfs
:
5840 case TARGET_NR_personality
:
5841 ret
= get_errno(personality(arg1
));
5843 #ifdef TARGET_NR_afs_syscall
5844 case TARGET_NR_afs_syscall
:
5847 #ifdef TARGET_NR__llseek /* Not on alpha */
5848 case TARGET_NR__llseek
:
5850 #if defined (__x86_64__)
5851 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
5852 if (put_user_s64(ret
, arg4
))
5856 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
5857 if (put_user_s64(res
, arg4
))
5863 case TARGET_NR_getdents
:
5864 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5866 struct target_dirent
*target_dirp
;
5867 struct linux_dirent
*dirp
;
5868 abi_long count
= arg3
;
5870 dirp
= malloc(count
);
5872 ret
= -TARGET_ENOMEM
;
5876 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5877 if (!is_error(ret
)) {
5878 struct linux_dirent
*de
;
5879 struct target_dirent
*tde
;
5881 int reclen
, treclen
;
5882 int count1
, tnamelen
;
5886 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5890 reclen
= de
->d_reclen
;
5891 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
5892 tde
->d_reclen
= tswap16(treclen
);
5893 tde
->d_ino
= tswapl(de
->d_ino
);
5894 tde
->d_off
= tswapl(de
->d_off
);
5895 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
5898 /* XXX: may not be correct */
5899 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
5900 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5902 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
5906 unlock_user(target_dirp
, arg2
, ret
);
5912 struct linux_dirent
*dirp
;
5913 abi_long count
= arg3
;
5915 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5917 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5918 if (!is_error(ret
)) {
5919 struct linux_dirent
*de
;
5924 reclen
= de
->d_reclen
;
5927 de
->d_reclen
= tswap16(reclen
);
5928 tswapls(&de
->d_ino
);
5929 tswapls(&de
->d_off
);
5930 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5934 unlock_user(dirp
, arg2
, ret
);
5938 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5939 case TARGET_NR_getdents64
:
5941 struct linux_dirent64
*dirp
;
5942 abi_long count
= arg3
;
5943 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5945 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
5946 if (!is_error(ret
)) {
5947 struct linux_dirent64
*de
;
5952 reclen
= de
->d_reclen
;
5955 de
->d_reclen
= tswap16(reclen
);
5956 tswap64s((uint64_t *)&de
->d_ino
);
5957 tswap64s((uint64_t *)&de
->d_off
);
5958 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
5962 unlock_user(dirp
, arg2
, ret
);
5965 #endif /* TARGET_NR_getdents64 */
5966 #ifdef TARGET_NR__newselect
5967 case TARGET_NR__newselect
:
5968 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
5971 #ifdef TARGET_NR_poll
5972 case TARGET_NR_poll
:
5974 struct target_pollfd
*target_pfd
;
5975 unsigned int nfds
= arg2
;
5980 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
5983 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
5984 for(i
= 0; i
< nfds
; i
++) {
5985 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
5986 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
5988 ret
= get_errno(poll(pfd
, nfds
, timeout
));
5989 if (!is_error(ret
)) {
5990 for(i
= 0; i
< nfds
; i
++) {
5991 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
5993 ret
+= nfds
* (sizeof(struct target_pollfd
)
5994 - sizeof(struct pollfd
));
5996 unlock_user(target_pfd
, arg1
, ret
);
6000 case TARGET_NR_flock
:
6001 /* NOTE: the flock constant seems to be the same for every
6003 ret
= get_errno(flock(arg1
, arg2
));
6005 case TARGET_NR_readv
:
6010 vec
= alloca(count
* sizeof(struct iovec
));
6011 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6013 ret
= get_errno(readv(arg1
, vec
, count
));
6014 unlock_iovec(vec
, arg2
, count
, 1);
6017 case TARGET_NR_writev
:
6022 vec
= alloca(count
* sizeof(struct iovec
));
6023 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6025 ret
= get_errno(writev(arg1
, vec
, count
));
6026 unlock_iovec(vec
, arg2
, count
, 0);
6029 case TARGET_NR_getsid
:
6030 ret
= get_errno(getsid(arg1
));
6032 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6033 case TARGET_NR_fdatasync
:
6034 ret
= get_errno(fdatasync(arg1
));
6037 case TARGET_NR__sysctl
:
6038 /* We don't implement this, but ENOTDIR is always a safe
6040 ret
= -TARGET_ENOTDIR
;
6042 case TARGET_NR_sched_setparam
:
6044 struct sched_param
*target_schp
;
6045 struct sched_param schp
;
6047 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6049 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6050 unlock_user_struct(target_schp
, arg2
, 0);
6051 ret
= get_errno(sched_setparam(arg1
, &schp
));
6054 case TARGET_NR_sched_getparam
:
6056 struct sched_param
*target_schp
;
6057 struct sched_param schp
;
6058 ret
= get_errno(sched_getparam(arg1
, &schp
));
6059 if (!is_error(ret
)) {
6060 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6062 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6063 unlock_user_struct(target_schp
, arg2
, 1);
6067 case TARGET_NR_sched_setscheduler
:
6069 struct sched_param
*target_schp
;
6070 struct sched_param schp
;
6071 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6073 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6074 unlock_user_struct(target_schp
, arg3
, 0);
6075 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6078 case TARGET_NR_sched_getscheduler
:
6079 ret
= get_errno(sched_getscheduler(arg1
));
6081 case TARGET_NR_sched_yield
:
6082 ret
= get_errno(sched_yield());
6084 case TARGET_NR_sched_get_priority_max
:
6085 ret
= get_errno(sched_get_priority_max(arg1
));
6087 case TARGET_NR_sched_get_priority_min
:
6088 ret
= get_errno(sched_get_priority_min(arg1
));
6090 case TARGET_NR_sched_rr_get_interval
:
6093 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6094 if (!is_error(ret
)) {
6095 host_to_target_timespec(arg2
, &ts
);
6099 case TARGET_NR_nanosleep
:
6101 struct timespec req
, rem
;
6102 target_to_host_timespec(&req
, arg1
);
6103 ret
= get_errno(nanosleep(&req
, &rem
));
6104 if (is_error(ret
) && arg2
) {
6105 host_to_target_timespec(arg2
, &rem
);
6109 #ifdef TARGET_NR_query_module
6110 case TARGET_NR_query_module
:
6113 #ifdef TARGET_NR_nfsservctl
6114 case TARGET_NR_nfsservctl
:
6117 case TARGET_NR_prctl
:
6120 case PR_GET_PDEATHSIG
:
6123 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6124 if (!is_error(ret
) && arg2
6125 && put_user_ual(deathsig
, arg2
))
6130 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6134 #ifdef TARGET_NR_arch_prctl
6135 case TARGET_NR_arch_prctl
:
6136 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6137 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6143 #ifdef TARGET_NR_pread
6144 case TARGET_NR_pread
:
6146 if (((CPUARMState
*)cpu_env
)->eabi
)
6149 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6151 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6152 unlock_user(p
, arg2
, ret
);
6154 case TARGET_NR_pwrite
:
6156 if (((CPUARMState
*)cpu_env
)->eabi
)
6159 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6161 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6162 unlock_user(p
, arg2
, 0);
6165 #ifdef TARGET_NR_pread64
6166 case TARGET_NR_pread64
:
6167 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6169 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6170 unlock_user(p
, arg2
, ret
);
6172 case TARGET_NR_pwrite64
:
6173 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6175 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6176 unlock_user(p
, arg2
, 0);
6179 case TARGET_NR_getcwd
:
6180 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6182 ret
= get_errno(sys_getcwd1(p
, arg2
));
6183 unlock_user(p
, arg1
, ret
);
6185 case TARGET_NR_capget
:
6187 case TARGET_NR_capset
:
6189 case TARGET_NR_sigaltstack
:
6190 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6191 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6192 defined(TARGET_M68K)
6193 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6198 case TARGET_NR_sendfile
:
6200 #ifdef TARGET_NR_getpmsg
6201 case TARGET_NR_getpmsg
:
6204 #ifdef TARGET_NR_putpmsg
6205 case TARGET_NR_putpmsg
:
6208 #ifdef TARGET_NR_vfork
6209 case TARGET_NR_vfork
:
6210 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6214 #ifdef TARGET_NR_ugetrlimit
6215 case TARGET_NR_ugetrlimit
:
6218 ret
= get_errno(getrlimit(arg1
, &rlim
));
6219 if (!is_error(ret
)) {
6220 struct target_rlimit
*target_rlim
;
6221 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6223 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
6224 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
6225 unlock_user_struct(target_rlim
, arg2
, 1);
6230 #ifdef TARGET_NR_truncate64
6231 case TARGET_NR_truncate64
:
6232 if (!(p
= lock_user_string(arg1
)))
6234 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6235 unlock_user(p
, arg1
, 0);
6238 #ifdef TARGET_NR_ftruncate64
6239 case TARGET_NR_ftruncate64
:
6240 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6243 #ifdef TARGET_NR_stat64
6244 case TARGET_NR_stat64
:
6245 if (!(p
= lock_user_string(arg1
)))
6247 ret
= get_errno(stat(path(p
), &st
));
6248 unlock_user(p
, arg1
, 0);
6250 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6253 #ifdef TARGET_NR_lstat64
6254 case TARGET_NR_lstat64
:
6255 if (!(p
= lock_user_string(arg1
)))
6257 ret
= get_errno(lstat(path(p
), &st
));
6258 unlock_user(p
, arg1
, 0);
6260 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6263 #ifdef TARGET_NR_fstat64
6264 case TARGET_NR_fstat64
:
6265 ret
= get_errno(fstat(arg1
, &st
));
6267 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6270 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6271 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6272 #ifdef TARGET_NR_fstatat64
6273 case TARGET_NR_fstatat64
:
6275 #ifdef TARGET_NR_newfstatat
6276 case TARGET_NR_newfstatat
:
6278 if (!(p
= lock_user_string(arg2
)))
6280 #ifdef __NR_fstatat64
6281 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6283 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6286 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6290 case TARGET_NR_lchown
:
6291 if (!(p
= lock_user_string(arg1
)))
6293 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6294 unlock_user(p
, arg1
, 0);
6296 case TARGET_NR_getuid
:
6297 ret
= get_errno(high2lowuid(getuid()));
6299 case TARGET_NR_getgid
:
6300 ret
= get_errno(high2lowgid(getgid()));
6302 case TARGET_NR_geteuid
:
6303 ret
= get_errno(high2lowuid(geteuid()));
6305 case TARGET_NR_getegid
:
6306 ret
= get_errno(high2lowgid(getegid()));
6308 case TARGET_NR_setreuid
:
6309 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6311 case TARGET_NR_setregid
:
6312 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6314 case TARGET_NR_getgroups
:
6316 int gidsetsize
= arg1
;
6317 uint16_t *target_grouplist
;
6321 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6322 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6323 if (gidsetsize
== 0)
6325 if (!is_error(ret
)) {
6326 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6327 if (!target_grouplist
)
6329 for(i
= 0;i
< ret
; i
++)
6330 target_grouplist
[i
] = tswap16(grouplist
[i
]);
6331 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6335 case TARGET_NR_setgroups
:
6337 int gidsetsize
= arg1
;
6338 uint16_t *target_grouplist
;
6342 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6343 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6344 if (!target_grouplist
) {
6345 ret
= -TARGET_EFAULT
;
6348 for(i
= 0;i
< gidsetsize
; i
++)
6349 grouplist
[i
] = tswap16(target_grouplist
[i
]);
6350 unlock_user(target_grouplist
, arg2
, 0);
6351 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6354 case TARGET_NR_fchown
:
6355 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6357 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6358 case TARGET_NR_fchownat
:
6359 if (!(p
= lock_user_string(arg2
)))
6361 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6362 unlock_user(p
, arg2
, 0);
6365 #ifdef TARGET_NR_setresuid
6366 case TARGET_NR_setresuid
:
6367 ret
= get_errno(setresuid(low2highuid(arg1
),
6369 low2highuid(arg3
)));
6372 #ifdef TARGET_NR_getresuid
6373 case TARGET_NR_getresuid
:
6375 uid_t ruid
, euid
, suid
;
6376 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6377 if (!is_error(ret
)) {
6378 if (put_user_u16(high2lowuid(ruid
), arg1
)
6379 || put_user_u16(high2lowuid(euid
), arg2
)
6380 || put_user_u16(high2lowuid(suid
), arg3
))
6386 #ifdef TARGET_NR_getresgid
6387 case TARGET_NR_setresgid
:
6388 ret
= get_errno(setresgid(low2highgid(arg1
),
6390 low2highgid(arg3
)));
6393 #ifdef TARGET_NR_getresgid
6394 case TARGET_NR_getresgid
:
6396 gid_t rgid
, egid
, sgid
;
6397 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6398 if (!is_error(ret
)) {
6399 if (put_user_u16(high2lowgid(rgid
), arg1
)
6400 || put_user_u16(high2lowgid(egid
), arg2
)
6401 || put_user_u16(high2lowgid(sgid
), arg3
))
6407 case TARGET_NR_chown
:
6408 if (!(p
= lock_user_string(arg1
)))
6410 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6411 unlock_user(p
, arg1
, 0);
6413 case TARGET_NR_setuid
:
6414 ret
= get_errno(setuid(low2highuid(arg1
)));
6416 case TARGET_NR_setgid
:
6417 ret
= get_errno(setgid(low2highgid(arg1
)));
6419 case TARGET_NR_setfsuid
:
6420 ret
= get_errno(setfsuid(arg1
));
6422 case TARGET_NR_setfsgid
:
6423 ret
= get_errno(setfsgid(arg1
));
6425 #endif /* USE_UID16 */
6427 #ifdef TARGET_NR_lchown32
6428 case TARGET_NR_lchown32
:
6429 if (!(p
= lock_user_string(arg1
)))
6431 ret
= get_errno(lchown(p
, arg2
, arg3
));
6432 unlock_user(p
, arg1
, 0);
6435 #ifdef TARGET_NR_getuid32
6436 case TARGET_NR_getuid32
:
6437 ret
= get_errno(getuid());
6441 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6442 /* Alpha specific */
6443 case TARGET_NR_getxuid
:
6447 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6449 ret
= get_errno(getuid());
6452 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6453 /* Alpha specific */
6454 case TARGET_NR_getxgid
:
6458 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6460 ret
= get_errno(getgid());
6463 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6464 /* Alpha specific */
6465 case TARGET_NR_osf_getsysinfo
:
6466 ret
= -TARGET_EOPNOTSUPP
;
6468 case TARGET_GSI_IEEE_FP_CONTROL
:
6470 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6472 /* Copied from linux ieee_fpcr_to_swcr. */
6473 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
6474 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
6475 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
6476 | SWCR_TRAP_ENABLE_DZE
6477 | SWCR_TRAP_ENABLE_OVF
);
6478 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
6479 | SWCR_TRAP_ENABLE_INE
);
6480 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
6481 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
6483 if (put_user_u64 (swcr
, arg2
))
6489 /* case GSI_IEEE_STATE_AT_SIGNAL:
6490 -- Not implemented in linux kernel.
6492 -- Retrieves current unaligned access state; not much used.
6494 -- Retrieves implver information; surely not used.
6496 -- Grabs a copy of the HWRPB; surely not used.
6501 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
6502 /* Alpha specific */
6503 case TARGET_NR_osf_setsysinfo
:
6504 ret
= -TARGET_EOPNOTSUPP
;
6506 case TARGET_SSI_IEEE_FP_CONTROL
:
6507 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
6509 uint64_t swcr
, fpcr
, orig_fpcr
;
6511 if (get_user_u64 (swcr
, arg2
))
6513 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6514 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
6516 /* Copied from linux ieee_swcr_to_fpcr. */
6517 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
6518 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
6519 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
6520 | SWCR_TRAP_ENABLE_DZE
6521 | SWCR_TRAP_ENABLE_OVF
)) << 48;
6522 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
6523 | SWCR_TRAP_ENABLE_INE
)) << 57;
6524 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
6525 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
6527 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
6530 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
6531 /* Old exceptions are not signaled. */
6532 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
6534 /* If any exceptions set by this call, and are unmasked,
6541 /* case SSI_NVPAIRS:
6542 -- Used with SSIN_UACPROC to enable unaligned accesses.
6543 case SSI_IEEE_STATE_AT_SIGNAL:
6544 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
6545 -- Not implemented in linux kernel
6550 #ifdef TARGET_NR_osf_sigprocmask
6551 /* Alpha specific. */
6552 case TARGET_NR_osf_sigprocmask
:
6556 sigset_t set
, oldset
;
6559 case TARGET_SIG_BLOCK
:
6562 case TARGET_SIG_UNBLOCK
:
6565 case TARGET_SIG_SETMASK
:
6569 ret
= -TARGET_EINVAL
;
6573 target_to_host_old_sigset(&set
, &mask
);
6574 sigprocmask(arg1
, &set
, &oldset
);
6575 host_to_target_old_sigset(&mask
, &oldset
);
6581 #ifdef TARGET_NR_getgid32
6582 case TARGET_NR_getgid32
:
6583 ret
= get_errno(getgid());
6586 #ifdef TARGET_NR_geteuid32
6587 case TARGET_NR_geteuid32
:
6588 ret
= get_errno(geteuid());
6591 #ifdef TARGET_NR_getegid32
6592 case TARGET_NR_getegid32
:
6593 ret
= get_errno(getegid());
6596 #ifdef TARGET_NR_setreuid32
6597 case TARGET_NR_setreuid32
:
6598 ret
= get_errno(setreuid(arg1
, arg2
));
6601 #ifdef TARGET_NR_setregid32
6602 case TARGET_NR_setregid32
:
6603 ret
= get_errno(setregid(arg1
, arg2
));
6606 #ifdef TARGET_NR_getgroups32
6607 case TARGET_NR_getgroups32
:
6609 int gidsetsize
= arg1
;
6610 uint32_t *target_grouplist
;
6614 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6615 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6616 if (gidsetsize
== 0)
6618 if (!is_error(ret
)) {
6619 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
6620 if (!target_grouplist
) {
6621 ret
= -TARGET_EFAULT
;
6624 for(i
= 0;i
< ret
; i
++)
6625 target_grouplist
[i
] = tswap32(grouplist
[i
]);
6626 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
6631 #ifdef TARGET_NR_setgroups32
6632 case TARGET_NR_setgroups32
:
6634 int gidsetsize
= arg1
;
6635 uint32_t *target_grouplist
;
6639 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6640 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
6641 if (!target_grouplist
) {
6642 ret
= -TARGET_EFAULT
;
6645 for(i
= 0;i
< gidsetsize
; i
++)
6646 grouplist
[i
] = tswap32(target_grouplist
[i
]);
6647 unlock_user(target_grouplist
, arg2
, 0);
6648 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6652 #ifdef TARGET_NR_fchown32
6653 case TARGET_NR_fchown32
:
6654 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
6657 #ifdef TARGET_NR_setresuid32
6658 case TARGET_NR_setresuid32
:
6659 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
6662 #ifdef TARGET_NR_getresuid32
6663 case TARGET_NR_getresuid32
:
6665 uid_t ruid
, euid
, suid
;
6666 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6667 if (!is_error(ret
)) {
6668 if (put_user_u32(ruid
, arg1
)
6669 || put_user_u32(euid
, arg2
)
6670 || put_user_u32(suid
, arg3
))
6676 #ifdef TARGET_NR_setresgid32
6677 case TARGET_NR_setresgid32
:
6678 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
6681 #ifdef TARGET_NR_getresgid32
6682 case TARGET_NR_getresgid32
:
6684 gid_t rgid
, egid
, sgid
;
6685 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6686 if (!is_error(ret
)) {
6687 if (put_user_u32(rgid
, arg1
)
6688 || put_user_u32(egid
, arg2
)
6689 || put_user_u32(sgid
, arg3
))
6695 #ifdef TARGET_NR_chown32
6696 case TARGET_NR_chown32
:
6697 if (!(p
= lock_user_string(arg1
)))
6699 ret
= get_errno(chown(p
, arg2
, arg3
));
6700 unlock_user(p
, arg1
, 0);
6703 #ifdef TARGET_NR_setuid32
6704 case TARGET_NR_setuid32
:
6705 ret
= get_errno(setuid(arg1
));
6708 #ifdef TARGET_NR_setgid32
6709 case TARGET_NR_setgid32
:
6710 ret
= get_errno(setgid(arg1
));
6713 #ifdef TARGET_NR_setfsuid32
6714 case TARGET_NR_setfsuid32
:
6715 ret
= get_errno(setfsuid(arg1
));
6718 #ifdef TARGET_NR_setfsgid32
6719 case TARGET_NR_setfsgid32
:
6720 ret
= get_errno(setfsgid(arg1
));
6724 case TARGET_NR_pivot_root
:
6726 #ifdef TARGET_NR_mincore
6727 case TARGET_NR_mincore
:
6730 ret
= -TARGET_EFAULT
;
6731 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
6733 if (!(p
= lock_user_string(arg3
)))
6735 ret
= get_errno(mincore(a
, arg2
, p
));
6736 unlock_user(p
, arg3
, ret
);
6738 unlock_user(a
, arg1
, 0);
6742 #ifdef TARGET_NR_arm_fadvise64_64
6743 case TARGET_NR_arm_fadvise64_64
:
6746 * arm_fadvise64_64 looks like fadvise64_64 but
6747 * with different argument order
6755 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
6756 #ifdef TARGET_NR_fadvise64_64
6757 case TARGET_NR_fadvise64_64
:
6759 #ifdef TARGET_NR_fadvise64
6760 case TARGET_NR_fadvise64
:
6764 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
6765 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
6766 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
6767 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
6771 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
6774 #ifdef TARGET_NR_madvise
6775 case TARGET_NR_madvise
:
6776 /* A straight passthrough may not be safe because qemu sometimes
6777 turns private flie-backed mappings into anonymous mappings.
6778 This will break MADV_DONTNEED.
6779 This is a hint, so ignoring and returning success is ok. */
6783 #if TARGET_ABI_BITS == 32
6784 case TARGET_NR_fcntl64
:
6788 struct target_flock64
*target_fl
;
6790 struct target_eabi_flock64
*target_efl
;
6793 cmd
= target_to_host_fcntl_cmd(arg2
);
6794 if (cmd
== -TARGET_EINVAL
)
6798 case TARGET_F_GETLK64
:
6800 if (((CPUARMState
*)cpu_env
)->eabi
) {
6801 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6803 fl
.l_type
= tswap16(target_efl
->l_type
);
6804 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6805 fl
.l_start
= tswap64(target_efl
->l_start
);
6806 fl
.l_len
= tswap64(target_efl
->l_len
);
6807 fl
.l_pid
= tswap32(target_efl
->l_pid
);
6808 unlock_user_struct(target_efl
, arg3
, 0);
6812 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6814 fl
.l_type
= tswap16(target_fl
->l_type
);
6815 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6816 fl
.l_start
= tswap64(target_fl
->l_start
);
6817 fl
.l_len
= tswap64(target_fl
->l_len
);
6818 fl
.l_pid
= tswap32(target_fl
->l_pid
);
6819 unlock_user_struct(target_fl
, arg3
, 0);
6821 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6824 if (((CPUARMState
*)cpu_env
)->eabi
) {
6825 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
6827 target_efl
->l_type
= tswap16(fl
.l_type
);
6828 target_efl
->l_whence
= tswap16(fl
.l_whence
);
6829 target_efl
->l_start
= tswap64(fl
.l_start
);
6830 target_efl
->l_len
= tswap64(fl
.l_len
);
6831 target_efl
->l_pid
= tswap32(fl
.l_pid
);
6832 unlock_user_struct(target_efl
, arg3
, 1);
6836 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
6838 target_fl
->l_type
= tswap16(fl
.l_type
);
6839 target_fl
->l_whence
= tswap16(fl
.l_whence
);
6840 target_fl
->l_start
= tswap64(fl
.l_start
);
6841 target_fl
->l_len
= tswap64(fl
.l_len
);
6842 target_fl
->l_pid
= tswap32(fl
.l_pid
);
6843 unlock_user_struct(target_fl
, arg3
, 1);
6848 case TARGET_F_SETLK64
:
6849 case TARGET_F_SETLKW64
:
6851 if (((CPUARMState
*)cpu_env
)->eabi
) {
6852 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6854 fl
.l_type
= tswap16(target_efl
->l_type
);
6855 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6856 fl
.l_start
= tswap64(target_efl
->l_start
);
6857 fl
.l_len
= tswap64(target_efl
->l_len
);
6858 fl
.l_pid
= tswap32(target_efl
->l_pid
);
6859 unlock_user_struct(target_efl
, arg3
, 0);
6863 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6865 fl
.l_type
= tswap16(target_fl
->l_type
);
6866 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6867 fl
.l_start
= tswap64(target_fl
->l_start
);
6868 fl
.l_len
= tswap64(target_fl
->l_len
);
6869 fl
.l_pid
= tswap32(target_fl
->l_pid
);
6870 unlock_user_struct(target_fl
, arg3
, 0);
6872 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6875 ret
= do_fcntl(arg1
, arg2
, arg3
);
6881 #ifdef TARGET_NR_cacheflush
6882 case TARGET_NR_cacheflush
:
6883 /* self-modifying code is handled automatically, so nothing needed */
6887 #ifdef TARGET_NR_security
6888 case TARGET_NR_security
:
6891 #ifdef TARGET_NR_getpagesize
6892 case TARGET_NR_getpagesize
:
6893 ret
= TARGET_PAGE_SIZE
;
6896 case TARGET_NR_gettid
:
6897 ret
= get_errno(gettid());
6899 #ifdef TARGET_NR_readahead
6900 case TARGET_NR_readahead
:
6901 #if TARGET_ABI_BITS == 32
6903 if (((CPUARMState
*)cpu_env
)->eabi
)
6910 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
6912 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
6916 #ifdef TARGET_NR_setxattr
6917 case TARGET_NR_setxattr
:
6918 case TARGET_NR_lsetxattr
:
6919 case TARGET_NR_fsetxattr
:
6920 case TARGET_NR_getxattr
:
6921 case TARGET_NR_lgetxattr
:
6922 case TARGET_NR_fgetxattr
:
6923 case TARGET_NR_listxattr
:
6924 case TARGET_NR_llistxattr
:
6925 case TARGET_NR_flistxattr
:
6926 case TARGET_NR_removexattr
:
6927 case TARGET_NR_lremovexattr
:
6928 case TARGET_NR_fremovexattr
:
6929 ret
= -TARGET_EOPNOTSUPP
;
6932 #ifdef TARGET_NR_set_thread_area
6933 case TARGET_NR_set_thread_area
:
6934 #if defined(TARGET_MIPS)
6935 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
6938 #elif defined(TARGET_CRIS)
6940 ret
= -TARGET_EINVAL
;
6942 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
6946 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6947 ret
= do_set_thread_area(cpu_env
, arg1
);
6950 goto unimplemented_nowarn
;
6953 #ifdef TARGET_NR_get_thread_area
6954 case TARGET_NR_get_thread_area
:
6955 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6956 ret
= do_get_thread_area(cpu_env
, arg1
);
6958 goto unimplemented_nowarn
;
6961 #ifdef TARGET_NR_getdomainname
6962 case TARGET_NR_getdomainname
:
6963 goto unimplemented_nowarn
;
6966 #ifdef TARGET_NR_clock_gettime
6967 case TARGET_NR_clock_gettime
:
6970 ret
= get_errno(clock_gettime(arg1
, &ts
));
6971 if (!is_error(ret
)) {
6972 host_to_target_timespec(arg2
, &ts
);
6977 #ifdef TARGET_NR_clock_getres
6978 case TARGET_NR_clock_getres
:
6981 ret
= get_errno(clock_getres(arg1
, &ts
));
6982 if (!is_error(ret
)) {
6983 host_to_target_timespec(arg2
, &ts
);
6988 #ifdef TARGET_NR_clock_nanosleep
6989 case TARGET_NR_clock_nanosleep
:
6992 target_to_host_timespec(&ts
, arg3
);
6993 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
6995 host_to_target_timespec(arg4
, &ts
);
7000 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7001 case TARGET_NR_set_tid_address
:
7002 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7006 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7007 case TARGET_NR_tkill
:
7008 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7012 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7013 case TARGET_NR_tgkill
:
7014 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7015 target_to_host_signal(arg3
)));
7019 #ifdef TARGET_NR_set_robust_list
7020 case TARGET_NR_set_robust_list
:
7021 goto unimplemented_nowarn
;
7024 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7025 case TARGET_NR_utimensat
:
7027 struct timespec
*tsp
, ts
[2];
7031 target_to_host_timespec(ts
, arg3
);
7032 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7036 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7038 if (!(p
= lock_user_string(arg2
))) {
7039 ret
= -TARGET_EFAULT
;
7042 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7043 unlock_user(p
, arg2
, 0);
7048 #if defined(CONFIG_USE_NPTL)
7049 case TARGET_NR_futex
:
7050 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7053 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7054 case TARGET_NR_inotify_init
:
7055 ret
= get_errno(sys_inotify_init());
7058 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7059 case TARGET_NR_inotify_add_watch
:
7060 p
= lock_user_string(arg2
);
7061 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7062 unlock_user(p
, arg2
, 0);
7065 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7066 case TARGET_NR_inotify_rm_watch
:
7067 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7071 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7072 case TARGET_NR_mq_open
:
7074 struct mq_attr posix_mq_attr
;
7076 p
= lock_user_string(arg1
- 1);
7078 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7079 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7080 unlock_user (p
, arg1
, 0);
7084 case TARGET_NR_mq_unlink
:
7085 p
= lock_user_string(arg1
- 1);
7086 ret
= get_errno(mq_unlink(p
));
7087 unlock_user (p
, arg1
, 0);
7090 case TARGET_NR_mq_timedsend
:
7094 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7096 target_to_host_timespec(&ts
, arg5
);
7097 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7098 host_to_target_timespec(arg5
, &ts
);
7101 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7102 unlock_user (p
, arg2
, arg3
);
7106 case TARGET_NR_mq_timedreceive
:
7111 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7113 target_to_host_timespec(&ts
, arg5
);
7114 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7115 host_to_target_timespec(arg5
, &ts
);
7118 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7119 unlock_user (p
, arg2
, arg3
);
7121 put_user_u32(prio
, arg4
);
7125 /* Not implemented for now... */
7126 /* case TARGET_NR_mq_notify: */
7129 case TARGET_NR_mq_getsetattr
:
7131 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7134 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7135 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7138 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7139 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7146 #ifdef CONFIG_SPLICE
7147 #ifdef TARGET_NR_tee
7150 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7154 #ifdef TARGET_NR_splice
7155 case TARGET_NR_splice
:
7157 loff_t loff_in
, loff_out
;
7158 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7160 get_user_u64(loff_in
, arg2
);
7161 ploff_in
= &loff_in
;
7164 get_user_u64(loff_out
, arg2
);
7165 ploff_out
= &loff_out
;
7167 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7171 #ifdef TARGET_NR_vmsplice
7172 case TARGET_NR_vmsplice
:
7177 vec
= alloca(count
* sizeof(struct iovec
));
7178 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7180 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7181 unlock_iovec(vec
, arg2
, count
, 0);
7185 #endif /* CONFIG_SPLICE */
7186 #ifdef CONFIG_EVENTFD
7187 #if defined(TARGET_NR_eventfd)
7188 case TARGET_NR_eventfd
:
7189 ret
= get_errno(eventfd(arg1
, 0));
7192 #if defined(TARGET_NR_eventfd2)
7193 case TARGET_NR_eventfd2
:
7194 ret
= get_errno(eventfd(arg1
, arg2
));
7197 #endif /* CONFIG_EVENTFD */
7198 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7199 case TARGET_NR_fallocate
:
7200 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7205 gemu_log("qemu: Unsupported syscall: %d\n", num
);
7206 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7207 unimplemented_nowarn
:
7209 ret
= -TARGET_ENOSYS
;
7214 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
7217 print_syscall_ret(num
, ret
);
7220 ret
= -TARGET_EFAULT
;