4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <qemu-common.h>
67 #include <sys/eventfd.h>
70 #define termios host_termios
71 #define winsize host_winsize
72 #define termio host_termio
73 #define sgttyb host_sgttyb /* same as target */
74 #define tchars host_tchars /* same as target */
75 #define ltchars host_ltchars /* same as target */
77 #include <linux/termios.h>
78 #include <linux/unistd.h>
79 #include <linux/utsname.h>
80 #include <linux/cdrom.h>
81 #include <linux/hdreg.h>
82 #include <linux/soundcard.h>
84 #include <linux/mtio.h>
88 #include "linux_loop.h"
89 #include "cpu-uname.h"
92 #include "qemu-common.h"
94 #if defined(CONFIG_USE_NPTL)
95 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
96 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
98 /* XXX: Hardcode the above values. */
99 #define CLONE_NPTL_FLAGS2 0
104 //#include <linux/msdos_fs.h>
105 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
106 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
117 #define _syscall0(type,name) \
118 static type name (void) \
120 return syscall(__NR_##name); \
123 #define _syscall1(type,name,type1,arg1) \
124 static type name (type1 arg1) \
126 return syscall(__NR_##name, arg1); \
129 #define _syscall2(type,name,type1,arg1,type2,arg2) \
130 static type name (type1 arg1,type2 arg2) \
132 return syscall(__NR_##name, arg1, arg2); \
135 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
136 static type name (type1 arg1,type2 arg2,type3 arg3) \
138 return syscall(__NR_##name, arg1, arg2, arg3); \
141 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
142 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
144 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
147 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
155 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
156 type5,arg5,type6,arg6) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
164 #define __NR_sys_uname __NR_uname
165 #define __NR_sys_faccessat __NR_faccessat
166 #define __NR_sys_fchmodat __NR_fchmodat
167 #define __NR_sys_fchownat __NR_fchownat
168 #define __NR_sys_fstatat64 __NR_fstatat64
169 #define __NR_sys_futimesat __NR_futimesat
170 #define __NR_sys_getcwd1 __NR_getcwd
171 #define __NR_sys_getdents __NR_getdents
172 #define __NR_sys_getdents64 __NR_getdents64
173 #define __NR_sys_getpriority __NR_getpriority
174 #define __NR_sys_linkat __NR_linkat
175 #define __NR_sys_mkdirat __NR_mkdirat
176 #define __NR_sys_mknodat __NR_mknodat
177 #define __NR_sys_newfstatat __NR_newfstatat
178 #define __NR_sys_openat __NR_openat
179 #define __NR_sys_readlinkat __NR_readlinkat
180 #define __NR_sys_renameat __NR_renameat
181 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
182 #define __NR_sys_symlinkat __NR_symlinkat
183 #define __NR_sys_syslog __NR_syslog
184 #define __NR_sys_tgkill __NR_tgkill
185 #define __NR_sys_tkill __NR_tkill
186 #define __NR_sys_unlinkat __NR_unlinkat
187 #define __NR_sys_utimensat __NR_utimensat
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
194 #define __NR__llseek __NR_lseek
198 _syscall0(int, gettid
)
200 /* This is a replacement for the host gettid() and must return a host
202 static int gettid(void) {
206 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
207 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
208 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
210 _syscall2(int, sys_getpriority
, int, which
, int, who
);
211 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
212 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
213 loff_t
*, res
, uint
, wh
);
215 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
216 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
217 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
218 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
220 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
221 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
223 #ifdef __NR_exit_group
224 _syscall1(int,exit_group
,int,error_code
)
226 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
227 _syscall1(int,set_tid_address
,int *,tidptr
)
229 #if defined(CONFIG_USE_NPTL)
230 #if defined(TARGET_NR_futex) && defined(__NR_futex)
231 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
232 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
236 static bitmask_transtbl fcntl_flags_tbl
[] = {
237 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
238 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
239 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
240 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
241 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
242 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
243 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
244 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
245 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
246 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
247 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
248 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
249 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
250 #if defined(O_DIRECT)
251 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
256 #define COPY_UTSNAME_FIELD(dest, src) \
258 /* __NEW_UTS_LEN doesn't include terminating null */ \
259 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
260 (dest)[__NEW_UTS_LEN] = '\0'; \
263 static int sys_uname(struct new_utsname
*buf
)
265 struct utsname uts_buf
;
267 if (uname(&uts_buf
) < 0)
271 * Just in case these have some differences, we
272 * translate utsname to new_utsname (which is the
273 * struct linux kernel uses).
276 bzero(buf
, sizeof (*buf
));
277 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
278 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
279 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
280 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
281 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
283 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
287 #undef COPY_UTSNAME_FIELD
290 static int sys_getcwd1(char *buf
, size_t size
)
292 if (getcwd(buf
, size
) == NULL
) {
293 /* getcwd() sets errno */
296 return strlen(buf
)+1;
301 * Host system seems to have atfile syscall stubs available. We
302 * now enable them one by one as specified by target syscall_nr.h.
305 #ifdef TARGET_NR_faccessat
306 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
308 return (faccessat(dirfd
, pathname
, mode
, 0));
311 #ifdef TARGET_NR_fchmodat
312 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
314 return (fchmodat(dirfd
, pathname
, mode
, 0));
317 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
318 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
319 gid_t group
, int flags
)
321 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
324 #ifdef __NR_fstatat64
325 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
328 return (fstatat(dirfd
, pathname
, buf
, flags
));
331 #ifdef __NR_newfstatat
332 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
335 return (fstatat(dirfd
, pathname
, buf
, flags
));
338 #ifdef TARGET_NR_futimesat
339 static int sys_futimesat(int dirfd
, const char *pathname
,
340 const struct timeval times
[2])
342 return (futimesat(dirfd
, pathname
, times
));
345 #ifdef TARGET_NR_linkat
346 static int sys_linkat(int olddirfd
, const char *oldpath
,
347 int newdirfd
, const char *newpath
, int flags
)
349 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
352 #ifdef TARGET_NR_mkdirat
353 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
355 return (mkdirat(dirfd
, pathname
, mode
));
358 #ifdef TARGET_NR_mknodat
359 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
362 return (mknodat(dirfd
, pathname
, mode
, dev
));
365 #ifdef TARGET_NR_openat
366 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
369 * open(2) has extra parameter 'mode' when called with
372 if ((flags
& O_CREAT
) != 0) {
377 * Get the 'mode' parameter and translate it to
381 mode
= va_arg(ap
, mode_t
);
382 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
385 return (openat(dirfd
, pathname
, flags
, mode
));
387 return (openat(dirfd
, pathname
, flags
));
390 #ifdef TARGET_NR_readlinkat
391 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
393 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
396 #ifdef TARGET_NR_renameat
397 static int sys_renameat(int olddirfd
, const char *oldpath
,
398 int newdirfd
, const char *newpath
)
400 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
403 #ifdef TARGET_NR_symlinkat
404 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
406 return (symlinkat(oldpath
, newdirfd
, newpath
));
409 #ifdef TARGET_NR_unlinkat
410 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
412 return (unlinkat(dirfd
, pathname
, flags
));
415 #else /* !CONFIG_ATFILE */
418 * Try direct syscalls instead
420 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
421 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
423 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
424 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
426 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
427 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
428 uid_t
,owner
,gid_t
,group
,int,flags
)
430 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
431 defined(__NR_fstatat64)
432 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
433 struct stat
*,buf
,int,flags
)
435 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
436 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
437 const struct timeval
*,times
)
439 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
440 defined(__NR_newfstatat)
441 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
442 struct stat
*,buf
,int,flags
)
444 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
445 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
446 int,newdirfd
,const char *,newpath
,int,flags
)
448 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
449 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
451 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
452 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
453 mode_t
,mode
,dev_t
,dev
)
455 #if defined(TARGET_NR_openat) && defined(__NR_openat)
456 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
458 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
459 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
460 char *,buf
,size_t,bufsize
)
462 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
463 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
464 int,newdirfd
,const char *,newpath
)
466 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
467 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
468 int,newdirfd
,const char *,newpath
)
470 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
471 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
474 #endif /* CONFIG_ATFILE */
476 #ifdef CONFIG_UTIMENSAT
477 static int sys_utimensat(int dirfd
, const char *pathname
,
478 const struct timespec times
[2], int flags
)
480 if (pathname
== NULL
)
481 return futimens(dirfd
, times
);
483 return utimensat(dirfd
, pathname
, times
, flags
);
486 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
487 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
488 const struct timespec
*,tsp
,int,flags
)
490 #endif /* CONFIG_UTIMENSAT */
492 #ifdef CONFIG_INOTIFY
493 #include <sys/inotify.h>
495 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
496 static int sys_inotify_init(void)
498 return (inotify_init());
501 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
502 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
504 return (inotify_add_watch(fd
, pathname
, mask
));
507 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
508 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
510 return (inotify_rm_watch(fd
, wd
));
513 #ifdef CONFIG_INOTIFY1
514 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
515 static int sys_inotify_init1(int flags
)
517 return (inotify_init1(flags
));
522 /* Userspace can usually survive runtime without inotify */
523 #undef TARGET_NR_inotify_init
524 #undef TARGET_NR_inotify_init1
525 #undef TARGET_NR_inotify_add_watch
526 #undef TARGET_NR_inotify_rm_watch
527 #endif /* CONFIG_INOTIFY */
530 extern int personality(int);
531 extern int flock(int, int);
532 extern int setfsuid(int);
533 extern int setfsgid(int);
534 extern int setgroups(int, gid_t
*);
536 #define ERRNO_TABLE_SIZE 1200
538 /* target_to_host_errno_table[] is initialized from
539 * host_to_target_errno_table[] in syscall_init(). */
540 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
544 * This list is the union of errno values overridden in asm-<arch>/errno.h
545 * minus the errnos that are not actually generic to all archs.
547 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
548 [EIDRM
] = TARGET_EIDRM
,
549 [ECHRNG
] = TARGET_ECHRNG
,
550 [EL2NSYNC
] = TARGET_EL2NSYNC
,
551 [EL3HLT
] = TARGET_EL3HLT
,
552 [EL3RST
] = TARGET_EL3RST
,
553 [ELNRNG
] = TARGET_ELNRNG
,
554 [EUNATCH
] = TARGET_EUNATCH
,
555 [ENOCSI
] = TARGET_ENOCSI
,
556 [EL2HLT
] = TARGET_EL2HLT
,
557 [EDEADLK
] = TARGET_EDEADLK
,
558 [ENOLCK
] = TARGET_ENOLCK
,
559 [EBADE
] = TARGET_EBADE
,
560 [EBADR
] = TARGET_EBADR
,
561 [EXFULL
] = TARGET_EXFULL
,
562 [ENOANO
] = TARGET_ENOANO
,
563 [EBADRQC
] = TARGET_EBADRQC
,
564 [EBADSLT
] = TARGET_EBADSLT
,
565 [EBFONT
] = TARGET_EBFONT
,
566 [ENOSTR
] = TARGET_ENOSTR
,
567 [ENODATA
] = TARGET_ENODATA
,
568 [ETIME
] = TARGET_ETIME
,
569 [ENOSR
] = TARGET_ENOSR
,
570 [ENONET
] = TARGET_ENONET
,
571 [ENOPKG
] = TARGET_ENOPKG
,
572 [EREMOTE
] = TARGET_EREMOTE
,
573 [ENOLINK
] = TARGET_ENOLINK
,
574 [EADV
] = TARGET_EADV
,
575 [ESRMNT
] = TARGET_ESRMNT
,
576 [ECOMM
] = TARGET_ECOMM
,
577 [EPROTO
] = TARGET_EPROTO
,
578 [EDOTDOT
] = TARGET_EDOTDOT
,
579 [EMULTIHOP
] = TARGET_EMULTIHOP
,
580 [EBADMSG
] = TARGET_EBADMSG
,
581 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
582 [EOVERFLOW
] = TARGET_EOVERFLOW
,
583 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
584 [EBADFD
] = TARGET_EBADFD
,
585 [EREMCHG
] = TARGET_EREMCHG
,
586 [ELIBACC
] = TARGET_ELIBACC
,
587 [ELIBBAD
] = TARGET_ELIBBAD
,
588 [ELIBSCN
] = TARGET_ELIBSCN
,
589 [ELIBMAX
] = TARGET_ELIBMAX
,
590 [ELIBEXEC
] = TARGET_ELIBEXEC
,
591 [EILSEQ
] = TARGET_EILSEQ
,
592 [ENOSYS
] = TARGET_ENOSYS
,
593 [ELOOP
] = TARGET_ELOOP
,
594 [ERESTART
] = TARGET_ERESTART
,
595 [ESTRPIPE
] = TARGET_ESTRPIPE
,
596 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
597 [EUSERS
] = TARGET_EUSERS
,
598 [ENOTSOCK
] = TARGET_ENOTSOCK
,
599 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
600 [EMSGSIZE
] = TARGET_EMSGSIZE
,
601 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
602 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
603 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
604 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
605 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
606 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
607 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
608 [EADDRINUSE
] = TARGET_EADDRINUSE
,
609 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
610 [ENETDOWN
] = TARGET_ENETDOWN
,
611 [ENETUNREACH
] = TARGET_ENETUNREACH
,
612 [ENETRESET
] = TARGET_ENETRESET
,
613 [ECONNABORTED
] = TARGET_ECONNABORTED
,
614 [ECONNRESET
] = TARGET_ECONNRESET
,
615 [ENOBUFS
] = TARGET_ENOBUFS
,
616 [EISCONN
] = TARGET_EISCONN
,
617 [ENOTCONN
] = TARGET_ENOTCONN
,
618 [EUCLEAN
] = TARGET_EUCLEAN
,
619 [ENOTNAM
] = TARGET_ENOTNAM
,
620 [ENAVAIL
] = TARGET_ENAVAIL
,
621 [EISNAM
] = TARGET_EISNAM
,
622 [EREMOTEIO
] = TARGET_EREMOTEIO
,
623 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
624 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
625 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
626 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
627 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
628 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
629 [EALREADY
] = TARGET_EALREADY
,
630 [EINPROGRESS
] = TARGET_EINPROGRESS
,
631 [ESTALE
] = TARGET_ESTALE
,
632 [ECANCELED
] = TARGET_ECANCELED
,
633 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
634 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
636 [ENOKEY
] = TARGET_ENOKEY
,
639 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
642 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
645 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
648 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
650 #ifdef ENOTRECOVERABLE
651 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
655 static inline int host_to_target_errno(int err
)
657 if(host_to_target_errno_table
[err
])
658 return host_to_target_errno_table
[err
];
662 static inline int target_to_host_errno(int err
)
664 if (target_to_host_errno_table
[err
])
665 return target_to_host_errno_table
[err
];
669 static inline abi_long
get_errno(abi_long ret
)
672 return -host_to_target_errno(errno
);
677 static inline int is_error(abi_long ret
)
679 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
682 char *target_strerror(int err
)
684 return strerror(target_to_host_errno(err
));
687 static abi_ulong target_brk
;
688 static abi_ulong target_original_brk
;
690 void target_set_brk(abi_ulong new_brk
)
692 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
695 /* do_brk() must return target values and target errnos. */
696 abi_long
do_brk(abi_ulong new_brk
)
699 abi_long mapped_addr
;
704 if (new_brk
< target_original_brk
)
707 brk_page
= HOST_PAGE_ALIGN(target_brk
);
709 /* If the new brk is less than this, set it and we're done... */
710 if (new_brk
< brk_page
) {
711 target_brk
= new_brk
;
715 /* We need to allocate more memory after the brk... */
716 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
717 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
718 PROT_READ
|PROT_WRITE
,
719 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
721 #if defined(TARGET_ALPHA)
722 /* We (partially) emulate OSF/1 on Alpha, which requires we
723 return a proper errno, not an unchanged brk value. */
724 if (is_error(mapped_addr
)) {
725 return -TARGET_ENOMEM
;
729 if (!is_error(mapped_addr
)) {
730 target_brk
= new_brk
;
735 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
736 abi_ulong target_fds_addr
,
740 abi_ulong b
, *target_fds
;
742 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
743 if (!(target_fds
= lock_user(VERIFY_READ
,
745 sizeof(abi_ulong
) * nw
,
747 return -TARGET_EFAULT
;
751 for (i
= 0; i
< nw
; i
++) {
752 /* grab the abi_ulong */
753 __get_user(b
, &target_fds
[i
]);
754 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
755 /* check the bit inside the abi_ulong */
762 unlock_user(target_fds
, target_fds_addr
, 0);
767 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
773 abi_ulong
*target_fds
;
775 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
776 if (!(target_fds
= lock_user(VERIFY_WRITE
,
778 sizeof(abi_ulong
) * nw
,
780 return -TARGET_EFAULT
;
783 for (i
= 0; i
< nw
; i
++) {
785 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
786 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
789 __put_user(v
, &target_fds
[i
]);
792 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
797 #if defined(__alpha__)
803 static inline abi_long
host_to_target_clock_t(long ticks
)
805 #if HOST_HZ == TARGET_HZ
808 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
812 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
813 const struct rusage
*rusage
)
815 struct target_rusage
*target_rusage
;
817 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
818 return -TARGET_EFAULT
;
819 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
820 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
821 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
822 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
823 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
824 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
825 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
826 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
827 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
828 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
829 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
830 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
831 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
832 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
833 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
834 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
835 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
836 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
837 unlock_user_struct(target_rusage
, target_addr
, 1);
842 static inline rlim_t
target_to_host_rlim(target_ulong target_rlim
)
844 if (target_rlim
== TARGET_RLIM_INFINITY
)
845 return RLIM_INFINITY
;
847 return tswapl(target_rlim
);
850 static inline target_ulong
host_to_target_rlim(rlim_t rlim
)
852 if (rlim
== RLIM_INFINITY
|| rlim
!= (target_long
)rlim
)
853 return TARGET_RLIM_INFINITY
;
858 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
859 abi_ulong target_tv_addr
)
861 struct target_timeval
*target_tv
;
863 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
864 return -TARGET_EFAULT
;
866 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
867 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
869 unlock_user_struct(target_tv
, target_tv_addr
, 0);
874 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
875 const struct timeval
*tv
)
877 struct target_timeval
*target_tv
;
879 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
880 return -TARGET_EFAULT
;
882 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
883 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
885 unlock_user_struct(target_tv
, target_tv_addr
, 1);
890 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
893 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
894 abi_ulong target_mq_attr_addr
)
896 struct target_mq_attr
*target_mq_attr
;
898 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
899 target_mq_attr_addr
, 1))
900 return -TARGET_EFAULT
;
902 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
903 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
904 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
905 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
907 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
912 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
913 const struct mq_attr
*attr
)
915 struct target_mq_attr
*target_mq_attr
;
917 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
918 target_mq_attr_addr
, 0))
919 return -TARGET_EFAULT
;
921 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
922 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
923 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
924 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
926 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
932 /* do_select() must return target values and target errnos. */
933 static abi_long
do_select(int n
,
934 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
935 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
937 fd_set rfds
, wfds
, efds
;
938 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
939 struct timeval tv
, *tv_ptr
;
943 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
944 return -TARGET_EFAULT
;
950 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
951 return -TARGET_EFAULT
;
957 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
958 return -TARGET_EFAULT
;
964 if (target_tv_addr
) {
965 if (copy_from_user_timeval(&tv
, target_tv_addr
))
966 return -TARGET_EFAULT
;
972 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
974 if (!is_error(ret
)) {
975 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
976 return -TARGET_EFAULT
;
977 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
978 return -TARGET_EFAULT
;
979 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
980 return -TARGET_EFAULT
;
982 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
983 return -TARGET_EFAULT
;
989 static abi_long
do_pipe2(int host_pipe
[], int flags
)
992 return pipe2(host_pipe
, flags
);
998 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
999 int flags
, int is_pipe2
)
1003 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1006 return get_errno(ret
);
1008 /* Several targets have special calling conventions for the original
1009 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1011 #if defined(TARGET_ALPHA)
1012 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1013 return host_pipe
[0];
1014 #elif defined(TARGET_MIPS)
1015 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1016 return host_pipe
[0];
1017 #elif defined(TARGET_SH4)
1018 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1019 return host_pipe
[0];
1023 if (put_user_s32(host_pipe
[0], pipedes
)
1024 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1025 return -TARGET_EFAULT
;
1026 return get_errno(ret
);
1029 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1030 abi_ulong target_addr
,
1033 struct target_ip_mreqn
*target_smreqn
;
1035 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1037 return -TARGET_EFAULT
;
1038 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1039 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1040 if (len
== sizeof(struct target_ip_mreqn
))
1041 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
1042 unlock_user(target_smreqn
, target_addr
, 0);
1047 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1048 abi_ulong target_addr
,
1051 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1052 sa_family_t sa_family
;
1053 struct target_sockaddr
*target_saddr
;
1055 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1057 return -TARGET_EFAULT
;
1059 sa_family
= tswap16(target_saddr
->sa_family
);
1061 /* Oops. The caller might send a incomplete sun_path; sun_path
1062 * must be terminated by \0 (see the manual page), but
1063 * unfortunately it is quite common to specify sockaddr_un
1064 * length as "strlen(x->sun_path)" while it should be
1065 * "strlen(...) + 1". We'll fix that here if needed.
1066 * Linux kernel has a similar feature.
1069 if (sa_family
== AF_UNIX
) {
1070 if (len
< unix_maxlen
&& len
> 0) {
1071 char *cp
= (char*)target_saddr
;
1073 if ( cp
[len
-1] && !cp
[len
] )
1076 if (len
> unix_maxlen
)
1080 memcpy(addr
, target_saddr
, len
);
1081 addr
->sa_family
= sa_family
;
1082 unlock_user(target_saddr
, target_addr
, 0);
1087 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1088 struct sockaddr
*addr
,
1091 struct target_sockaddr
*target_saddr
;
1093 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1095 return -TARGET_EFAULT
;
1096 memcpy(target_saddr
, addr
, len
);
1097 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1098 unlock_user(target_saddr
, target_addr
, len
);
1103 /* ??? Should this also swap msgh->name? */
1104 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1105 struct target_msghdr
*target_msgh
)
1107 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1108 abi_long msg_controllen
;
1109 abi_ulong target_cmsg_addr
;
1110 struct target_cmsghdr
*target_cmsg
;
1111 socklen_t space
= 0;
1113 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1114 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1116 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1117 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1119 return -TARGET_EFAULT
;
1121 while (cmsg
&& target_cmsg
) {
1122 void *data
= CMSG_DATA(cmsg
);
1123 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1125 int len
= tswapl(target_cmsg
->cmsg_len
)
1126 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1128 space
+= CMSG_SPACE(len
);
1129 if (space
> msgh
->msg_controllen
) {
1130 space
-= CMSG_SPACE(len
);
1131 gemu_log("Host cmsg overflow\n");
1135 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1136 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1137 cmsg
->cmsg_len
= CMSG_LEN(len
);
1139 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1140 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1141 memcpy(data
, target_data
, len
);
1143 int *fd
= (int *)data
;
1144 int *target_fd
= (int *)target_data
;
1145 int i
, numfds
= len
/ sizeof(int);
1147 for (i
= 0; i
< numfds
; i
++)
1148 fd
[i
] = tswap32(target_fd
[i
]);
1151 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1152 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1154 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1156 msgh
->msg_controllen
= space
;
1160 /* ??? Should this also swap msgh->name? */
1161 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1162 struct msghdr
*msgh
)
1164 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1165 abi_long msg_controllen
;
1166 abi_ulong target_cmsg_addr
;
1167 struct target_cmsghdr
*target_cmsg
;
1168 socklen_t space
= 0;
1170 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1171 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1173 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1174 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1176 return -TARGET_EFAULT
;
1178 while (cmsg
&& target_cmsg
) {
1179 void *data
= CMSG_DATA(cmsg
);
1180 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1182 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1184 space
+= TARGET_CMSG_SPACE(len
);
1185 if (space
> msg_controllen
) {
1186 space
-= TARGET_CMSG_SPACE(len
);
1187 gemu_log("Target cmsg overflow\n");
1191 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1192 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1193 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1195 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1196 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1197 memcpy(target_data
, data
, len
);
1199 int *fd
= (int *)data
;
1200 int *target_fd
= (int *)target_data
;
1201 int i
, numfds
= len
/ sizeof(int);
1203 for (i
= 0; i
< numfds
; i
++)
1204 target_fd
[i
] = tswap32(fd
[i
]);
1207 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1208 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1210 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1212 target_msgh
->msg_controllen
= tswapl(space
);
1216 /* do_setsockopt() Must return target values and target errnos. */
1217 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1218 abi_ulong optval_addr
, socklen_t optlen
)
1222 struct ip_mreqn
*ip_mreq
;
1223 struct ip_mreq_source
*ip_mreq_source
;
1227 /* TCP options all take an 'int' value. */
1228 if (optlen
< sizeof(uint32_t))
1229 return -TARGET_EINVAL
;
1231 if (get_user_u32(val
, optval_addr
))
1232 return -TARGET_EFAULT
;
1233 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1240 case IP_ROUTER_ALERT
:
1244 case IP_MTU_DISCOVER
:
1250 case IP_MULTICAST_TTL
:
1251 case IP_MULTICAST_LOOP
:
1253 if (optlen
>= sizeof(uint32_t)) {
1254 if (get_user_u32(val
, optval_addr
))
1255 return -TARGET_EFAULT
;
1256 } else if (optlen
>= 1) {
1257 if (get_user_u8(val
, optval_addr
))
1258 return -TARGET_EFAULT
;
1260 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1262 case IP_ADD_MEMBERSHIP
:
1263 case IP_DROP_MEMBERSHIP
:
1264 if (optlen
< sizeof (struct target_ip_mreq
) ||
1265 optlen
> sizeof (struct target_ip_mreqn
))
1266 return -TARGET_EINVAL
;
1268 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1269 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1270 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1273 case IP_BLOCK_SOURCE
:
1274 case IP_UNBLOCK_SOURCE
:
1275 case IP_ADD_SOURCE_MEMBERSHIP
:
1276 case IP_DROP_SOURCE_MEMBERSHIP
:
1277 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1278 return -TARGET_EINVAL
;
1280 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1281 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1282 unlock_user (ip_mreq_source
, optval_addr
, 0);
1289 case TARGET_SOL_SOCKET
:
1291 /* Options with 'int' argument. */
1292 case TARGET_SO_DEBUG
:
1295 case TARGET_SO_REUSEADDR
:
1296 optname
= SO_REUSEADDR
;
1298 case TARGET_SO_TYPE
:
1301 case TARGET_SO_ERROR
:
1304 case TARGET_SO_DONTROUTE
:
1305 optname
= SO_DONTROUTE
;
1307 case TARGET_SO_BROADCAST
:
1308 optname
= SO_BROADCAST
;
1310 case TARGET_SO_SNDBUF
:
1311 optname
= SO_SNDBUF
;
1313 case TARGET_SO_RCVBUF
:
1314 optname
= SO_RCVBUF
;
1316 case TARGET_SO_KEEPALIVE
:
1317 optname
= SO_KEEPALIVE
;
1319 case TARGET_SO_OOBINLINE
:
1320 optname
= SO_OOBINLINE
;
1322 case TARGET_SO_NO_CHECK
:
1323 optname
= SO_NO_CHECK
;
1325 case TARGET_SO_PRIORITY
:
1326 optname
= SO_PRIORITY
;
1329 case TARGET_SO_BSDCOMPAT
:
1330 optname
= SO_BSDCOMPAT
;
1333 case TARGET_SO_PASSCRED
:
1334 optname
= SO_PASSCRED
;
1336 case TARGET_SO_TIMESTAMP
:
1337 optname
= SO_TIMESTAMP
;
1339 case TARGET_SO_RCVLOWAT
:
1340 optname
= SO_RCVLOWAT
;
1342 case TARGET_SO_RCVTIMEO
:
1343 optname
= SO_RCVTIMEO
;
1345 case TARGET_SO_SNDTIMEO
:
1346 optname
= SO_SNDTIMEO
;
1352 if (optlen
< sizeof(uint32_t))
1353 return -TARGET_EINVAL
;
1355 if (get_user_u32(val
, optval_addr
))
1356 return -TARGET_EFAULT
;
1357 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1361 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1362 ret
= -TARGET_ENOPROTOOPT
;
1367 /* do_getsockopt() Must return target values and target errnos. */
1368 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1369 abi_ulong optval_addr
, abi_ulong optlen
)
1376 case TARGET_SOL_SOCKET
:
1379 /* These don't just return a single integer */
1380 case TARGET_SO_LINGER
:
1381 case TARGET_SO_RCVTIMEO
:
1382 case TARGET_SO_SNDTIMEO
:
1383 case TARGET_SO_PEERCRED
:
1384 case TARGET_SO_PEERNAME
:
1386 /* Options with 'int' argument. */
1387 case TARGET_SO_DEBUG
:
1390 case TARGET_SO_REUSEADDR
:
1391 optname
= SO_REUSEADDR
;
1393 case TARGET_SO_TYPE
:
1396 case TARGET_SO_ERROR
:
1399 case TARGET_SO_DONTROUTE
:
1400 optname
= SO_DONTROUTE
;
1402 case TARGET_SO_BROADCAST
:
1403 optname
= SO_BROADCAST
;
1405 case TARGET_SO_SNDBUF
:
1406 optname
= SO_SNDBUF
;
1408 case TARGET_SO_RCVBUF
:
1409 optname
= SO_RCVBUF
;
1411 case TARGET_SO_KEEPALIVE
:
1412 optname
= SO_KEEPALIVE
;
1414 case TARGET_SO_OOBINLINE
:
1415 optname
= SO_OOBINLINE
;
1417 case TARGET_SO_NO_CHECK
:
1418 optname
= SO_NO_CHECK
;
1420 case TARGET_SO_PRIORITY
:
1421 optname
= SO_PRIORITY
;
1424 case TARGET_SO_BSDCOMPAT
:
1425 optname
= SO_BSDCOMPAT
;
1428 case TARGET_SO_PASSCRED
:
1429 optname
= SO_PASSCRED
;
1431 case TARGET_SO_TIMESTAMP
:
1432 optname
= SO_TIMESTAMP
;
1434 case TARGET_SO_RCVLOWAT
:
1435 optname
= SO_RCVLOWAT
;
1442 /* TCP options all take an 'int' value. */
1444 if (get_user_u32(len
, optlen
))
1445 return -TARGET_EFAULT
;
1447 return -TARGET_EINVAL
;
1449 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1455 if (put_user_u32(val
, optval_addr
))
1456 return -TARGET_EFAULT
;
1458 if (put_user_u8(val
, optval_addr
))
1459 return -TARGET_EFAULT
;
1461 if (put_user_u32(len
, optlen
))
1462 return -TARGET_EFAULT
;
1469 case IP_ROUTER_ALERT
:
1473 case IP_MTU_DISCOVER
:
1479 case IP_MULTICAST_TTL
:
1480 case IP_MULTICAST_LOOP
:
1481 if (get_user_u32(len
, optlen
))
1482 return -TARGET_EFAULT
;
1484 return -TARGET_EINVAL
;
1486 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1489 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1491 if (put_user_u32(len
, optlen
)
1492 || put_user_u8(val
, optval_addr
))
1493 return -TARGET_EFAULT
;
1495 if (len
> sizeof(int))
1497 if (put_user_u32(len
, optlen
)
1498 || put_user_u32(val
, optval_addr
))
1499 return -TARGET_EFAULT
;
1503 ret
= -TARGET_ENOPROTOOPT
;
1509 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1511 ret
= -TARGET_EOPNOTSUPP
;
1518 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1519 * other lock functions have a return code of 0 for failure.
1521 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1522 int count
, int copy
)
1524 struct target_iovec
*target_vec
;
1528 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1530 return -TARGET_EFAULT
;
1531 for(i
= 0;i
< count
; i
++) {
1532 base
= tswapl(target_vec
[i
].iov_base
);
1533 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1534 if (vec
[i
].iov_len
!= 0) {
1535 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1536 /* Don't check lock_user return value. We must call writev even
1537 if a element has invalid base address. */
1539 /* zero length pointer is ignored */
1540 vec
[i
].iov_base
= NULL
;
1543 unlock_user (target_vec
, target_addr
, 0);
1547 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1548 int count
, int copy
)
1550 struct target_iovec
*target_vec
;
1554 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1556 return -TARGET_EFAULT
;
1557 for(i
= 0;i
< count
; i
++) {
1558 if (target_vec
[i
].iov_base
) {
1559 base
= tswapl(target_vec
[i
].iov_base
);
1560 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1563 unlock_user (target_vec
, target_addr
, 0);
1568 /* do_socket() Must return target values and target errnos. */
1569 static abi_long
do_socket(int domain
, int type
, int protocol
)
1571 #if defined(TARGET_MIPS)
1573 case TARGET_SOCK_DGRAM
:
1576 case TARGET_SOCK_STREAM
:
1579 case TARGET_SOCK_RAW
:
1582 case TARGET_SOCK_RDM
:
1585 case TARGET_SOCK_SEQPACKET
:
1586 type
= SOCK_SEQPACKET
;
1588 case TARGET_SOCK_PACKET
:
1593 if (domain
== PF_NETLINK
)
1594 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1595 return get_errno(socket(domain
, type
, protocol
));
1598 /* do_bind() Must return target values and target errnos. */
1599 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1605 if ((int)addrlen
< 0) {
1606 return -TARGET_EINVAL
;
1609 addr
= alloca(addrlen
+1);
1611 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1615 return get_errno(bind(sockfd
, addr
, addrlen
));
1618 /* do_connect() Must return target values and target errnos. */
1619 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1625 if ((int)addrlen
< 0) {
1626 return -TARGET_EINVAL
;
1629 addr
= alloca(addrlen
);
1631 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1635 return get_errno(connect(sockfd
, addr
, addrlen
));
1638 /* do_sendrecvmsg() Must return target values and target errnos. */
1639 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1640 int flags
, int send
)
1643 struct target_msghdr
*msgp
;
1647 abi_ulong target_vec
;
1650 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1654 return -TARGET_EFAULT
;
1655 if (msgp
->msg_name
) {
1656 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1657 msg
.msg_name
= alloca(msg
.msg_namelen
);
1658 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1661 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1665 msg
.msg_name
= NULL
;
1666 msg
.msg_namelen
= 0;
1668 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1669 msg
.msg_control
= alloca(msg
.msg_controllen
);
1670 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1672 count
= tswapl(msgp
->msg_iovlen
);
1673 vec
= alloca(count
* sizeof(struct iovec
));
1674 target_vec
= tswapl(msgp
->msg_iov
);
1675 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1676 msg
.msg_iovlen
= count
;
1680 ret
= target_to_host_cmsg(&msg
, msgp
);
1682 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1684 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1685 if (!is_error(ret
)) {
1687 ret
= host_to_target_cmsg(msgp
, &msg
);
1692 unlock_iovec(vec
, target_vec
, count
, !send
);
1693 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1697 /* do_accept() Must return target values and target errnos. */
1698 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1699 abi_ulong target_addrlen_addr
)
1705 if (target_addr
== 0)
1706 return get_errno(accept(fd
, NULL
, NULL
));
1708 /* linux returns EINVAL if addrlen pointer is invalid */
1709 if (get_user_u32(addrlen
, target_addrlen_addr
))
1710 return -TARGET_EINVAL
;
1712 if ((int)addrlen
< 0) {
1713 return -TARGET_EINVAL
;
1716 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1717 return -TARGET_EINVAL
;
1719 addr
= alloca(addrlen
);
1721 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1722 if (!is_error(ret
)) {
1723 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1724 if (put_user_u32(addrlen
, target_addrlen_addr
))
1725 ret
= -TARGET_EFAULT
;
1730 /* do_getpeername() Must return target values and target errnos. */
1731 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1732 abi_ulong target_addrlen_addr
)
1738 if (get_user_u32(addrlen
, target_addrlen_addr
))
1739 return -TARGET_EFAULT
;
1741 if ((int)addrlen
< 0) {
1742 return -TARGET_EINVAL
;
1745 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1746 return -TARGET_EFAULT
;
1748 addr
= alloca(addrlen
);
1750 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1751 if (!is_error(ret
)) {
1752 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1753 if (put_user_u32(addrlen
, target_addrlen_addr
))
1754 ret
= -TARGET_EFAULT
;
1759 /* do_getsockname() Must return target values and target errnos. */
1760 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1761 abi_ulong target_addrlen_addr
)
1767 if (get_user_u32(addrlen
, target_addrlen_addr
))
1768 return -TARGET_EFAULT
;
1770 if ((int)addrlen
< 0) {
1771 return -TARGET_EINVAL
;
1774 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1775 return -TARGET_EFAULT
;
1777 addr
= alloca(addrlen
);
1779 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1780 if (!is_error(ret
)) {
1781 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1782 if (put_user_u32(addrlen
, target_addrlen_addr
))
1783 ret
= -TARGET_EFAULT
;
1788 /* do_socketpair() Must return target values and target errnos. */
1789 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1790 abi_ulong target_tab_addr
)
1795 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1796 if (!is_error(ret
)) {
1797 if (put_user_s32(tab
[0], target_tab_addr
)
1798 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1799 ret
= -TARGET_EFAULT
;
1804 /* do_sendto() Must return target values and target errnos. */
1805 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1806 abi_ulong target_addr
, socklen_t addrlen
)
1812 if ((int)addrlen
< 0) {
1813 return -TARGET_EINVAL
;
1816 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1818 return -TARGET_EFAULT
;
1820 addr
= alloca(addrlen
);
1821 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1823 unlock_user(host_msg
, msg
, 0);
1826 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1828 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1830 unlock_user(host_msg
, msg
, 0);
1834 /* do_recvfrom() Must return target values and target errnos. */
1835 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1836 abi_ulong target_addr
,
1837 abi_ulong target_addrlen
)
1844 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1846 return -TARGET_EFAULT
;
1848 if (get_user_u32(addrlen
, target_addrlen
)) {
1849 ret
= -TARGET_EFAULT
;
1852 if ((int)addrlen
< 0) {
1853 ret
= -TARGET_EINVAL
;
1856 addr
= alloca(addrlen
);
1857 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1859 addr
= NULL
; /* To keep compiler quiet. */
1860 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1862 if (!is_error(ret
)) {
1864 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1865 if (put_user_u32(addrlen
, target_addrlen
)) {
1866 ret
= -TARGET_EFAULT
;
1870 unlock_user(host_msg
, msg
, len
);
1873 unlock_user(host_msg
, msg
, 0);
1878 #ifdef TARGET_NR_socketcall
1879 /* do_socketcall() Must return target values and target errnos. */
1880 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1883 const int n
= sizeof(abi_ulong
);
1888 abi_ulong domain
, type
, protocol
;
1890 if (get_user_ual(domain
, vptr
)
1891 || get_user_ual(type
, vptr
+ n
)
1892 || get_user_ual(protocol
, vptr
+ 2 * n
))
1893 return -TARGET_EFAULT
;
1895 ret
= do_socket(domain
, type
, protocol
);
1901 abi_ulong target_addr
;
1904 if (get_user_ual(sockfd
, vptr
)
1905 || get_user_ual(target_addr
, vptr
+ n
)
1906 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1907 return -TARGET_EFAULT
;
1909 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1912 case SOCKOP_connect
:
1915 abi_ulong target_addr
;
1918 if (get_user_ual(sockfd
, vptr
)
1919 || get_user_ual(target_addr
, vptr
+ n
)
1920 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1921 return -TARGET_EFAULT
;
1923 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1928 abi_ulong sockfd
, backlog
;
1930 if (get_user_ual(sockfd
, vptr
)
1931 || get_user_ual(backlog
, vptr
+ n
))
1932 return -TARGET_EFAULT
;
1934 ret
= get_errno(listen(sockfd
, backlog
));
1940 abi_ulong target_addr
, target_addrlen
;
1942 if (get_user_ual(sockfd
, vptr
)
1943 || get_user_ual(target_addr
, vptr
+ n
)
1944 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1945 return -TARGET_EFAULT
;
1947 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1950 case SOCKOP_getsockname
:
1953 abi_ulong target_addr
, target_addrlen
;
1955 if (get_user_ual(sockfd
, vptr
)
1956 || get_user_ual(target_addr
, vptr
+ n
)
1957 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1958 return -TARGET_EFAULT
;
1960 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1963 case SOCKOP_getpeername
:
1966 abi_ulong target_addr
, target_addrlen
;
1968 if (get_user_ual(sockfd
, vptr
)
1969 || get_user_ual(target_addr
, vptr
+ n
)
1970 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1971 return -TARGET_EFAULT
;
1973 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1976 case SOCKOP_socketpair
:
1978 abi_ulong domain
, type
, protocol
;
1981 if (get_user_ual(domain
, vptr
)
1982 || get_user_ual(type
, vptr
+ n
)
1983 || get_user_ual(protocol
, vptr
+ 2 * n
)
1984 || get_user_ual(tab
, vptr
+ 3 * n
))
1985 return -TARGET_EFAULT
;
1987 ret
= do_socketpair(domain
, type
, protocol
, tab
);
1997 if (get_user_ual(sockfd
, vptr
)
1998 || get_user_ual(msg
, vptr
+ n
)
1999 || get_user_ual(len
, vptr
+ 2 * n
)
2000 || get_user_ual(flags
, vptr
+ 3 * n
))
2001 return -TARGET_EFAULT
;
2003 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2013 if (get_user_ual(sockfd
, vptr
)
2014 || get_user_ual(msg
, vptr
+ n
)
2015 || get_user_ual(len
, vptr
+ 2 * n
)
2016 || get_user_ual(flags
, vptr
+ 3 * n
))
2017 return -TARGET_EFAULT
;
2019 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2031 if (get_user_ual(sockfd
, vptr
)
2032 || get_user_ual(msg
, vptr
+ n
)
2033 || get_user_ual(len
, vptr
+ 2 * n
)
2034 || get_user_ual(flags
, vptr
+ 3 * n
)
2035 || get_user_ual(addr
, vptr
+ 4 * n
)
2036 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2037 return -TARGET_EFAULT
;
2039 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2042 case SOCKOP_recvfrom
:
2051 if (get_user_ual(sockfd
, vptr
)
2052 || get_user_ual(msg
, vptr
+ n
)
2053 || get_user_ual(len
, vptr
+ 2 * n
)
2054 || get_user_ual(flags
, vptr
+ 3 * n
)
2055 || get_user_ual(addr
, vptr
+ 4 * n
)
2056 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2057 return -TARGET_EFAULT
;
2059 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2062 case SOCKOP_shutdown
:
2064 abi_ulong sockfd
, how
;
2066 if (get_user_ual(sockfd
, vptr
)
2067 || get_user_ual(how
, vptr
+ n
))
2068 return -TARGET_EFAULT
;
2070 ret
= get_errno(shutdown(sockfd
, how
));
2073 case SOCKOP_sendmsg
:
2074 case SOCKOP_recvmsg
:
2077 abi_ulong target_msg
;
2080 if (get_user_ual(fd
, vptr
)
2081 || get_user_ual(target_msg
, vptr
+ n
)
2082 || get_user_ual(flags
, vptr
+ 2 * n
))
2083 return -TARGET_EFAULT
;
2085 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2086 (num
== SOCKOP_sendmsg
));
2089 case SOCKOP_setsockopt
:
2097 if (get_user_ual(sockfd
, vptr
)
2098 || get_user_ual(level
, vptr
+ n
)
2099 || get_user_ual(optname
, vptr
+ 2 * n
)
2100 || get_user_ual(optval
, vptr
+ 3 * n
)
2101 || get_user_ual(optlen
, vptr
+ 4 * n
))
2102 return -TARGET_EFAULT
;
2104 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2107 case SOCKOP_getsockopt
:
2115 if (get_user_ual(sockfd
, vptr
)
2116 || get_user_ual(level
, vptr
+ n
)
2117 || get_user_ual(optname
, vptr
+ 2 * n
)
2118 || get_user_ual(optval
, vptr
+ 3 * n
)
2119 || get_user_ual(optlen
, vptr
+ 4 * n
))
2120 return -TARGET_EFAULT
;
2122 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2126 gemu_log("Unsupported socketcall: %d\n", num
);
2127 ret
= -TARGET_ENOSYS
;
2134 #define N_SHM_REGIONS 32
2136 static struct shm_region
{
2139 } shm_regions
[N_SHM_REGIONS
];
2141 struct target_ipc_perm
2148 unsigned short int mode
;
2149 unsigned short int __pad1
;
2150 unsigned short int __seq
;
2151 unsigned short int __pad2
;
2152 abi_ulong __unused1
;
2153 abi_ulong __unused2
;
2156 struct target_semid_ds
2158 struct target_ipc_perm sem_perm
;
2159 abi_ulong sem_otime
;
2160 abi_ulong __unused1
;
2161 abi_ulong sem_ctime
;
2162 abi_ulong __unused2
;
2163 abi_ulong sem_nsems
;
2164 abi_ulong __unused3
;
2165 abi_ulong __unused4
;
2168 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2169 abi_ulong target_addr
)
2171 struct target_ipc_perm
*target_ip
;
2172 struct target_semid_ds
*target_sd
;
2174 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2175 return -TARGET_EFAULT
;
2176 target_ip
= &(target_sd
->sem_perm
);
2177 host_ip
->__key
= tswapl(target_ip
->__key
);
2178 host_ip
->uid
= tswapl(target_ip
->uid
);
2179 host_ip
->gid
= tswapl(target_ip
->gid
);
2180 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2181 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2182 host_ip
->mode
= tswapl(target_ip
->mode
);
2183 unlock_user_struct(target_sd
, target_addr
, 0);
2187 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2188 struct ipc_perm
*host_ip
)
2190 struct target_ipc_perm
*target_ip
;
2191 struct target_semid_ds
*target_sd
;
2193 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2194 return -TARGET_EFAULT
;
2195 target_ip
= &(target_sd
->sem_perm
);
2196 target_ip
->__key
= tswapl(host_ip
->__key
);
2197 target_ip
->uid
= tswapl(host_ip
->uid
);
2198 target_ip
->gid
= tswapl(host_ip
->gid
);
2199 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2200 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2201 target_ip
->mode
= tswapl(host_ip
->mode
);
2202 unlock_user_struct(target_sd
, target_addr
, 1);
2206 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2207 abi_ulong target_addr
)
2209 struct target_semid_ds
*target_sd
;
2211 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2212 return -TARGET_EFAULT
;
2213 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2214 return -TARGET_EFAULT
;
2215 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2216 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2217 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2218 unlock_user_struct(target_sd
, target_addr
, 0);
2222 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2223 struct semid_ds
*host_sd
)
2225 struct target_semid_ds
*target_sd
;
2227 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2228 return -TARGET_EFAULT
;
2229 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2230 return -TARGET_EFAULT
;;
2231 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2232 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2233 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2234 unlock_user_struct(target_sd
, target_addr
, 1);
2238 struct target_seminfo
{
2251 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2252 struct seminfo
*host_seminfo
)
2254 struct target_seminfo
*target_seminfo
;
2255 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2256 return -TARGET_EFAULT
;
2257 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2258 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2259 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2260 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2261 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2262 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2263 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2264 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2265 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2266 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2267 unlock_user_struct(target_seminfo
, target_addr
, 1);
2273 struct semid_ds
*buf
;
2274 unsigned short *array
;
2275 struct seminfo
*__buf
;
2278 union target_semun
{
2285 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2286 abi_ulong target_addr
)
2289 unsigned short *array
;
2291 struct semid_ds semid_ds
;
2294 semun
.buf
= &semid_ds
;
2296 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2298 return get_errno(ret
);
2300 nsems
= semid_ds
.sem_nsems
;
2302 *host_array
= malloc(nsems
*sizeof(unsigned short));
2303 array
= lock_user(VERIFY_READ
, target_addr
,
2304 nsems
*sizeof(unsigned short), 1);
2306 return -TARGET_EFAULT
;
2308 for(i
=0; i
<nsems
; i
++) {
2309 __get_user((*host_array
)[i
], &array
[i
]);
2311 unlock_user(array
, target_addr
, 0);
2316 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2317 unsigned short **host_array
)
2320 unsigned short *array
;
2322 struct semid_ds semid_ds
;
2325 semun
.buf
= &semid_ds
;
2327 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2329 return get_errno(ret
);
2331 nsems
= semid_ds
.sem_nsems
;
2333 array
= lock_user(VERIFY_WRITE
, target_addr
,
2334 nsems
*sizeof(unsigned short), 0);
2336 return -TARGET_EFAULT
;
2338 for(i
=0; i
<nsems
; i
++) {
2339 __put_user((*host_array
)[i
], &array
[i
]);
2342 unlock_user(array
, target_addr
, 1);
2347 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2348 union target_semun target_su
)
2351 struct semid_ds dsarg
;
2352 unsigned short *array
= NULL
;
2353 struct seminfo seminfo
;
2354 abi_long ret
= -TARGET_EINVAL
;
2361 arg
.val
= tswapl(target_su
.val
);
2362 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2363 target_su
.val
= tswapl(arg
.val
);
2367 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2371 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2372 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2379 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2383 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2384 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2390 arg
.__buf
= &seminfo
;
2391 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2392 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2400 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2407 struct target_sembuf
{
2408 unsigned short sem_num
;
2413 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2414 abi_ulong target_addr
,
2417 struct target_sembuf
*target_sembuf
;
2420 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2421 nsops
*sizeof(struct target_sembuf
), 1);
2423 return -TARGET_EFAULT
;
2425 for(i
=0; i
<nsops
; i
++) {
2426 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2427 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2428 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2431 unlock_user(target_sembuf
, target_addr
, 0);
2436 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2438 struct sembuf sops
[nsops
];
2440 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2441 return -TARGET_EFAULT
;
2443 return semop(semid
, sops
, nsops
);
2446 struct target_msqid_ds
2448 struct target_ipc_perm msg_perm
;
2449 abi_ulong msg_stime
;
2450 #if TARGET_ABI_BITS == 32
2451 abi_ulong __unused1
;
2453 abi_ulong msg_rtime
;
2454 #if TARGET_ABI_BITS == 32
2455 abi_ulong __unused2
;
2457 abi_ulong msg_ctime
;
2458 #if TARGET_ABI_BITS == 32
2459 abi_ulong __unused3
;
2461 abi_ulong __msg_cbytes
;
2463 abi_ulong msg_qbytes
;
2464 abi_ulong msg_lspid
;
2465 abi_ulong msg_lrpid
;
2466 abi_ulong __unused4
;
2467 abi_ulong __unused5
;
2470 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2471 abi_ulong target_addr
)
2473 struct target_msqid_ds
*target_md
;
2475 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2476 return -TARGET_EFAULT
;
2477 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2478 return -TARGET_EFAULT
;
2479 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2480 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2481 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2482 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2483 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2484 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2485 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2486 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2487 unlock_user_struct(target_md
, target_addr
, 0);
2491 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2492 struct msqid_ds
*host_md
)
2494 struct target_msqid_ds
*target_md
;
2496 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2497 return -TARGET_EFAULT
;
2498 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2499 return -TARGET_EFAULT
;
2500 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2501 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2502 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2503 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2504 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2505 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2506 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2507 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2508 unlock_user_struct(target_md
, target_addr
, 1);
2512 struct target_msginfo
{
2520 unsigned short int msgseg
;
2523 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2524 struct msginfo
*host_msginfo
)
2526 struct target_msginfo
*target_msginfo
;
2527 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2528 return -TARGET_EFAULT
;
2529 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2530 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2531 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2532 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2533 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2534 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2535 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2536 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2537 unlock_user_struct(target_msginfo
, target_addr
, 1);
2541 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2543 struct msqid_ds dsarg
;
2544 struct msginfo msginfo
;
2545 abi_long ret
= -TARGET_EINVAL
;
2553 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2554 return -TARGET_EFAULT
;
2555 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2556 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2557 return -TARGET_EFAULT
;
2560 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2564 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2565 if (host_to_target_msginfo(ptr
, &msginfo
))
2566 return -TARGET_EFAULT
;
2573 struct target_msgbuf
{
2578 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2579 unsigned int msgsz
, int msgflg
)
2581 struct target_msgbuf
*target_mb
;
2582 struct msgbuf
*host_mb
;
2585 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2586 return -TARGET_EFAULT
;
2587 host_mb
= malloc(msgsz
+sizeof(long));
2588 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2589 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2590 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2592 unlock_user_struct(target_mb
, msgp
, 0);
2597 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2598 unsigned int msgsz
, abi_long msgtyp
,
2601 struct target_msgbuf
*target_mb
;
2603 struct msgbuf
*host_mb
;
2606 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2607 return -TARGET_EFAULT
;
2609 host_mb
= malloc(msgsz
+sizeof(long));
2610 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2613 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2614 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2615 if (!target_mtext
) {
2616 ret
= -TARGET_EFAULT
;
2619 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2620 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2623 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2628 unlock_user_struct(target_mb
, msgp
, 1);
2632 struct target_shmid_ds
2634 struct target_ipc_perm shm_perm
;
2635 abi_ulong shm_segsz
;
2636 abi_ulong shm_atime
;
2637 #if TARGET_ABI_BITS == 32
2638 abi_ulong __unused1
;
2640 abi_ulong shm_dtime
;
2641 #if TARGET_ABI_BITS == 32
2642 abi_ulong __unused2
;
2644 abi_ulong shm_ctime
;
2645 #if TARGET_ABI_BITS == 32
2646 abi_ulong __unused3
;
2650 abi_ulong shm_nattch
;
2651 unsigned long int __unused4
;
2652 unsigned long int __unused5
;
2655 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2656 abi_ulong target_addr
)
2658 struct target_shmid_ds
*target_sd
;
2660 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2661 return -TARGET_EFAULT
;
2662 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2663 return -TARGET_EFAULT
;
2664 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2665 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2666 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2667 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2668 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2669 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2670 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2671 unlock_user_struct(target_sd
, target_addr
, 0);
2675 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2676 struct shmid_ds
*host_sd
)
2678 struct target_shmid_ds
*target_sd
;
2680 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2681 return -TARGET_EFAULT
;
2682 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2683 return -TARGET_EFAULT
;
2684 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2685 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2686 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2687 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2688 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2689 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2690 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2691 unlock_user_struct(target_sd
, target_addr
, 1);
2695 struct target_shminfo
{
2703 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2704 struct shminfo
*host_shminfo
)
2706 struct target_shminfo
*target_shminfo
;
2707 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2708 return -TARGET_EFAULT
;
2709 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2710 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2711 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2712 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2713 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2714 unlock_user_struct(target_shminfo
, target_addr
, 1);
2718 struct target_shm_info
{
2723 abi_ulong swap_attempts
;
2724 abi_ulong swap_successes
;
2727 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2728 struct shm_info
*host_shm_info
)
2730 struct target_shm_info
*target_shm_info
;
2731 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2732 return -TARGET_EFAULT
;
2733 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2734 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2735 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2736 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2737 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2738 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2739 unlock_user_struct(target_shm_info
, target_addr
, 1);
2743 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2745 struct shmid_ds dsarg
;
2746 struct shminfo shminfo
;
2747 struct shm_info shm_info
;
2748 abi_long ret
= -TARGET_EINVAL
;
2756 if (target_to_host_shmid_ds(&dsarg
, buf
))
2757 return -TARGET_EFAULT
;
2758 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2759 if (host_to_target_shmid_ds(buf
, &dsarg
))
2760 return -TARGET_EFAULT
;
2763 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2764 if (host_to_target_shminfo(buf
, &shminfo
))
2765 return -TARGET_EFAULT
;
2768 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2769 if (host_to_target_shm_info(buf
, &shm_info
))
2770 return -TARGET_EFAULT
;
2775 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2782 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2786 struct shmid_ds shm_info
;
2789 /* find out the length of the shared memory segment */
2790 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2791 if (is_error(ret
)) {
2792 /* can't get length, bail out */
2799 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2801 abi_ulong mmap_start
;
2803 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2805 if (mmap_start
== -1) {
2807 host_raddr
= (void *)-1;
2809 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2812 if (host_raddr
== (void *)-1) {
2814 return get_errno((long)host_raddr
);
2816 raddr
=h2g((unsigned long)host_raddr
);
2818 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2819 PAGE_VALID
| PAGE_READ
|
2820 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2822 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2823 if (shm_regions
[i
].start
== 0) {
2824 shm_regions
[i
].start
= raddr
;
2825 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2835 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2839 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2840 if (shm_regions
[i
].start
== shmaddr
) {
2841 shm_regions
[i
].start
= 0;
2842 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
2847 return get_errno(shmdt(g2h(shmaddr
)));
2850 #ifdef TARGET_NR_ipc
2851 /* ??? This only works with linear mappings. */
2852 /* do_ipc() must return target values and target errnos. */
2853 static abi_long
do_ipc(unsigned int call
, int first
,
2854 int second
, int third
,
2855 abi_long ptr
, abi_long fifth
)
2860 version
= call
>> 16;
2865 ret
= do_semop(first
, ptr
, second
);
2869 ret
= get_errno(semget(first
, second
, third
));
2873 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2877 ret
= get_errno(msgget(first
, second
));
2881 ret
= do_msgsnd(first
, ptr
, second
, third
);
2885 ret
= do_msgctl(first
, second
, ptr
);
2892 struct target_ipc_kludge
{
2897 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2898 ret
= -TARGET_EFAULT
;
2902 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2904 unlock_user_struct(tmp
, ptr
, 0);
2908 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2917 raddr
= do_shmat(first
, ptr
, second
);
2918 if (is_error(raddr
))
2919 return get_errno(raddr
);
2920 if (put_user_ual(raddr
, third
))
2921 return -TARGET_EFAULT
;
2925 ret
= -TARGET_EINVAL
;
2930 ret
= do_shmdt(ptr
);
2934 /* IPC_* flag values are the same on all linux platforms */
2935 ret
= get_errno(shmget(first
, second
, third
));
2938 /* IPC_* and SHM_* command values are the same on all linux platforms */
2940 ret
= do_shmctl(first
, second
, third
);
2943 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2944 ret
= -TARGET_ENOSYS
;
2951 /* kernel structure types definitions */
2954 #define STRUCT(name, ...) STRUCT_ ## name,
2955 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2957 #include "syscall_types.h"
2960 #undef STRUCT_SPECIAL
2962 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2963 #define STRUCT_SPECIAL(name)
2964 #include "syscall_types.h"
2966 #undef STRUCT_SPECIAL
2968 typedef struct IOCTLEntry
{
2969 unsigned int target_cmd
;
2970 unsigned int host_cmd
;
2973 const argtype arg_type
[5];
2976 #define IOC_R 0x0001
2977 #define IOC_W 0x0002
2978 #define IOC_RW (IOC_R | IOC_W)
2980 #define MAX_STRUCT_SIZE 4096
2982 static IOCTLEntry ioctl_entries
[] = {
2983 #define IOCTL(cmd, access, ...) \
2984 { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
2989 /* ??? Implement proper locking for ioctls. */
2990 /* do_ioctl() Must return target values and target errnos. */
2991 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
2993 const IOCTLEntry
*ie
;
2994 const argtype
*arg_type
;
2996 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3002 if (ie
->target_cmd
== 0) {
3003 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3004 return -TARGET_ENOSYS
;
3006 if (ie
->target_cmd
== cmd
)
3010 arg_type
= ie
->arg_type
;
3012 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3014 switch(arg_type
[0]) {
3017 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3022 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3026 target_size
= thunk_type_size(arg_type
, 0);
3027 switch(ie
->access
) {
3029 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3030 if (!is_error(ret
)) {
3031 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3033 return -TARGET_EFAULT
;
3034 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3035 unlock_user(argptr
, arg
, target_size
);
3039 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3041 return -TARGET_EFAULT
;
3042 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3043 unlock_user(argptr
, arg
, 0);
3044 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3048 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3050 return -TARGET_EFAULT
;
3051 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3052 unlock_user(argptr
, arg
, 0);
3053 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3054 if (!is_error(ret
)) {
3055 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3057 return -TARGET_EFAULT
;
3058 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3059 unlock_user(argptr
, arg
, target_size
);
3065 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3066 (long)cmd
, arg_type
[0]);
3067 ret
= -TARGET_ENOSYS
;
3073 static const bitmask_transtbl iflag_tbl
[] = {
3074 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3075 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3076 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3077 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3078 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3079 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3080 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3081 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3082 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3083 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3084 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3085 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3086 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3087 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3091 static const bitmask_transtbl oflag_tbl
[] = {
3092 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3093 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3094 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3095 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3096 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3097 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3098 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3099 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3100 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3101 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3102 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3103 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3104 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3105 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3106 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3107 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3108 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3109 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3110 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3111 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3112 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3113 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3114 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3115 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3119 static const bitmask_transtbl cflag_tbl
[] = {
3120 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3121 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3122 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3123 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3124 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3125 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3126 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3127 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3128 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3129 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3130 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3131 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3132 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3133 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3134 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3135 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3136 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3137 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3138 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3139 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3140 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3141 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3142 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3143 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3144 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3145 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3146 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3147 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3148 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3149 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3150 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3154 static const bitmask_transtbl lflag_tbl
[] = {
3155 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3156 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3157 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3158 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3159 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3160 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3161 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3162 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3163 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3164 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3165 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3166 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3167 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3168 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3169 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3173 static void target_to_host_termios (void *dst
, const void *src
)
3175 struct host_termios
*host
= dst
;
3176 const struct target_termios
*target
= src
;
3179 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3181 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3183 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3185 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3186 host
->c_line
= target
->c_line
;
3188 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3189 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3190 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3191 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3192 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3193 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3194 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3195 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3196 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3197 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3198 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3199 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3200 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3201 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3202 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3203 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3204 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3205 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3208 static void host_to_target_termios (void *dst
, const void *src
)
3210 struct target_termios
*target
= dst
;
3211 const struct host_termios
*host
= src
;
3214 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3216 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3218 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3220 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3221 target
->c_line
= host
->c_line
;
3223 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3224 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3225 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3226 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3227 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3228 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3229 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3230 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3231 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3232 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3233 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3234 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3235 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3236 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3237 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3238 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3239 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3240 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3243 static const StructEntry struct_termios_def
= {
3244 .convert
= { host_to_target_termios
, target_to_host_termios
},
3245 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3246 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3249 static bitmask_transtbl mmap_flags_tbl
[] = {
3250 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3251 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3252 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3253 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3254 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3255 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3256 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3257 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3261 #if defined(TARGET_I386)
3263 /* NOTE: there is really one LDT for all the threads */
3264 static uint8_t *ldt_table
;
3266 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3273 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3274 if (size
> bytecount
)
3276 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3278 return -TARGET_EFAULT
;
3279 /* ??? Should this by byteswapped? */
3280 memcpy(p
, ldt_table
, size
);
3281 unlock_user(p
, ptr
, size
);
3285 /* XXX: add locking support */
3286 static abi_long
write_ldt(CPUX86State
*env
,
3287 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3289 struct target_modify_ldt_ldt_s ldt_info
;
3290 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3291 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3292 int seg_not_present
, useable
, lm
;
3293 uint32_t *lp
, entry_1
, entry_2
;
3295 if (bytecount
!= sizeof(ldt_info
))
3296 return -TARGET_EINVAL
;
3297 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3298 return -TARGET_EFAULT
;
3299 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3300 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3301 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3302 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3303 unlock_user_struct(target_ldt_info
, ptr
, 0);
3305 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3306 return -TARGET_EINVAL
;
3307 seg_32bit
= ldt_info
.flags
& 1;
3308 contents
= (ldt_info
.flags
>> 1) & 3;
3309 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3310 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3311 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3312 useable
= (ldt_info
.flags
>> 6) & 1;
3316 lm
= (ldt_info
.flags
>> 7) & 1;
3318 if (contents
== 3) {
3320 return -TARGET_EINVAL
;
3321 if (seg_not_present
== 0)
3322 return -TARGET_EINVAL
;
3324 /* allocate the LDT */
3326 env
->ldt
.base
= target_mmap(0,
3327 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3328 PROT_READ
|PROT_WRITE
,
3329 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3330 if (env
->ldt
.base
== -1)
3331 return -TARGET_ENOMEM
;
3332 memset(g2h(env
->ldt
.base
), 0,
3333 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3334 env
->ldt
.limit
= 0xffff;
3335 ldt_table
= g2h(env
->ldt
.base
);
3338 /* NOTE: same code as Linux kernel */
3339 /* Allow LDTs to be cleared by the user. */
3340 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3343 read_exec_only
== 1 &&
3345 limit_in_pages
== 0 &&
3346 seg_not_present
== 1 &&
3354 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3355 (ldt_info
.limit
& 0x0ffff);
3356 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3357 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3358 (ldt_info
.limit
& 0xf0000) |
3359 ((read_exec_only
^ 1) << 9) |
3361 ((seg_not_present
^ 1) << 15) |
3363 (limit_in_pages
<< 23) |
3367 entry_2
|= (useable
<< 20);
3369 /* Install the new entry ... */
3371 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3372 lp
[0] = tswap32(entry_1
);
3373 lp
[1] = tswap32(entry_2
);
3377 /* specific and weird i386 syscalls */
3378 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3379 unsigned long bytecount
)
3385 ret
= read_ldt(ptr
, bytecount
);
3388 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3391 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3394 ret
= -TARGET_ENOSYS
;
3400 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3401 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3403 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3404 struct target_modify_ldt_ldt_s ldt_info
;
3405 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3406 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3407 int seg_not_present
, useable
, lm
;
3408 uint32_t *lp
, entry_1
, entry_2
;
3411 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3412 if (!target_ldt_info
)
3413 return -TARGET_EFAULT
;
3414 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3415 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3416 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3417 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3418 if (ldt_info
.entry_number
== -1) {
3419 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3420 if (gdt_table
[i
] == 0) {
3421 ldt_info
.entry_number
= i
;
3422 target_ldt_info
->entry_number
= tswap32(i
);
3427 unlock_user_struct(target_ldt_info
, ptr
, 1);
3429 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3430 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3431 return -TARGET_EINVAL
;
3432 seg_32bit
= ldt_info
.flags
& 1;
3433 contents
= (ldt_info
.flags
>> 1) & 3;
3434 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3435 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3436 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3437 useable
= (ldt_info
.flags
>> 6) & 1;
3441 lm
= (ldt_info
.flags
>> 7) & 1;
3444 if (contents
== 3) {
3445 if (seg_not_present
== 0)
3446 return -TARGET_EINVAL
;
3449 /* NOTE: same code as Linux kernel */
3450 /* Allow LDTs to be cleared by the user. */
3451 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3452 if ((contents
== 0 &&
3453 read_exec_only
== 1 &&
3455 limit_in_pages
== 0 &&
3456 seg_not_present
== 1 &&
3464 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3465 (ldt_info
.limit
& 0x0ffff);
3466 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3467 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3468 (ldt_info
.limit
& 0xf0000) |
3469 ((read_exec_only
^ 1) << 9) |
3471 ((seg_not_present
^ 1) << 15) |
3473 (limit_in_pages
<< 23) |
3478 /* Install the new entry ... */
3480 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3481 lp
[0] = tswap32(entry_1
);
3482 lp
[1] = tswap32(entry_2
);
3486 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3488 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3489 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3490 uint32_t base_addr
, limit
, flags
;
3491 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3492 int seg_not_present
, useable
, lm
;
3493 uint32_t *lp
, entry_1
, entry_2
;
3495 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3496 if (!target_ldt_info
)
3497 return -TARGET_EFAULT
;
3498 idx
= tswap32(target_ldt_info
->entry_number
);
3499 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3500 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3501 unlock_user_struct(target_ldt_info
, ptr
, 1);
3502 return -TARGET_EINVAL
;
3504 lp
= (uint32_t *)(gdt_table
+ idx
);
3505 entry_1
= tswap32(lp
[0]);
3506 entry_2
= tswap32(lp
[1]);
3508 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3509 contents
= (entry_2
>> 10) & 3;
3510 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3511 seg_32bit
= (entry_2
>> 22) & 1;
3512 limit_in_pages
= (entry_2
>> 23) & 1;
3513 useable
= (entry_2
>> 20) & 1;
3517 lm
= (entry_2
>> 21) & 1;
3519 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3520 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3521 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3522 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3523 base_addr
= (entry_1
>> 16) |
3524 (entry_2
& 0xff000000) |
3525 ((entry_2
& 0xff) << 16);
3526 target_ldt_info
->base_addr
= tswapl(base_addr
);
3527 target_ldt_info
->limit
= tswap32(limit
);
3528 target_ldt_info
->flags
= tswap32(flags
);
3529 unlock_user_struct(target_ldt_info
, ptr
, 1);
3532 #endif /* TARGET_I386 && TARGET_ABI32 */
3534 #ifndef TARGET_ABI32
3535 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3542 case TARGET_ARCH_SET_GS
:
3543 case TARGET_ARCH_SET_FS
:
3544 if (code
== TARGET_ARCH_SET_GS
)
3548 cpu_x86_load_seg(env
, idx
, 0);
3549 env
->segs
[idx
].base
= addr
;
3551 case TARGET_ARCH_GET_GS
:
3552 case TARGET_ARCH_GET_FS
:
3553 if (code
== TARGET_ARCH_GET_GS
)
3557 val
= env
->segs
[idx
].base
;
3558 if (put_user(val
, addr
, abi_ulong
))
3559 return -TARGET_EFAULT
;
3562 ret
= -TARGET_EINVAL
;
3569 #endif /* defined(TARGET_I386) */
3571 #if defined(CONFIG_USE_NPTL)
3573 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3575 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3578 pthread_mutex_t mutex
;
3579 pthread_cond_t cond
;
3582 abi_ulong child_tidptr
;
3583 abi_ulong parent_tidptr
;
3587 static void *clone_func(void *arg
)
3589 new_thread_info
*info
= arg
;
3595 ts
= (TaskState
*)thread_env
->opaque
;
3596 info
->tid
= gettid();
3597 env
->host_tid
= info
->tid
;
3599 if (info
->child_tidptr
)
3600 put_user_u32(info
->tid
, info
->child_tidptr
);
3601 if (info
->parent_tidptr
)
3602 put_user_u32(info
->tid
, info
->parent_tidptr
);
3603 /* Enable signals. */
3604 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3605 /* Signal to the parent that we're ready. */
3606 pthread_mutex_lock(&info
->mutex
);
3607 pthread_cond_broadcast(&info
->cond
);
3608 pthread_mutex_unlock(&info
->mutex
);
3609 /* Wait until the parent has finshed initializing the tls state. */
3610 pthread_mutex_lock(&clone_lock
);
3611 pthread_mutex_unlock(&clone_lock
);
3617 /* this stack is the equivalent of the kernel stack associated with a
3619 #define NEW_STACK_SIZE 8192
3621 static int clone_func(void *arg
)
3623 CPUState
*env
= arg
;
3630 /* do_fork() Must return host values and target errnos (unlike most
3631 do_*() functions). */
3632 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3633 abi_ulong parent_tidptr
, target_ulong newtls
,
3634 abi_ulong child_tidptr
)
3639 #if defined(CONFIG_USE_NPTL)
3640 unsigned int nptl_flags
;
3646 /* Emulate vfork() with fork() */
3647 if (flags
& CLONE_VFORK
)
3648 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3650 if (flags
& CLONE_VM
) {
3651 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3652 #if defined(CONFIG_USE_NPTL)
3653 new_thread_info info
;
3654 pthread_attr_t attr
;
3656 ts
= qemu_mallocz(sizeof(TaskState
));
3657 init_task_state(ts
);
3658 /* we create a new CPU instance. */
3659 new_env
= cpu_copy(env
);
3660 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3663 /* Init regs that differ from the parent. */
3664 cpu_clone_regs(new_env
, newsp
);
3665 new_env
->opaque
= ts
;
3666 ts
->bprm
= parent_ts
->bprm
;
3667 ts
->info
= parent_ts
->info
;
3668 #if defined(CONFIG_USE_NPTL)
3670 flags
&= ~CLONE_NPTL_FLAGS2
;
3672 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3673 ts
->child_tidptr
= child_tidptr
;
3676 if (nptl_flags
& CLONE_SETTLS
)
3677 cpu_set_tls (new_env
, newtls
);
3679 /* Grab a mutex so that thread setup appears atomic. */
3680 pthread_mutex_lock(&clone_lock
);
3682 memset(&info
, 0, sizeof(info
));
3683 pthread_mutex_init(&info
.mutex
, NULL
);
3684 pthread_mutex_lock(&info
.mutex
);
3685 pthread_cond_init(&info
.cond
, NULL
);
3687 if (nptl_flags
& CLONE_CHILD_SETTID
)
3688 info
.child_tidptr
= child_tidptr
;
3689 if (nptl_flags
& CLONE_PARENT_SETTID
)
3690 info
.parent_tidptr
= parent_tidptr
;
3692 ret
= pthread_attr_init(&attr
);
3693 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
3694 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
3695 /* It is not safe to deliver signals until the child has finished
3696 initializing, so temporarily block all signals. */
3697 sigfillset(&sigmask
);
3698 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3700 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3701 /* TODO: Free new CPU state if thread creation failed. */
3703 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3704 pthread_attr_destroy(&attr
);
3706 /* Wait for the child to initialize. */
3707 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3709 if (flags
& CLONE_PARENT_SETTID
)
3710 put_user_u32(ret
, parent_tidptr
);
3714 pthread_mutex_unlock(&info
.mutex
);
3715 pthread_cond_destroy(&info
.cond
);
3716 pthread_mutex_destroy(&info
.mutex
);
3717 pthread_mutex_unlock(&clone_lock
);
3719 if (flags
& CLONE_NPTL_FLAGS2
)
3721 /* This is probably going to die very quickly, but do it anyway. */
3722 new_stack
= qemu_mallocz (NEW_STACK_SIZE
);
3724 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
3726 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3730 /* if no CLONE_VM, we consider it is a fork */
3731 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3736 /* Child Process. */
3737 cpu_clone_regs(env
, newsp
);
3739 #if defined(CONFIG_USE_NPTL)
3740 /* There is a race condition here. The parent process could
3741 theoretically read the TID in the child process before the child
3742 tid is set. This would require using either ptrace
3743 (not implemented) or having *_tidptr to point at a shared memory
3744 mapping. We can't repeat the spinlock hack used above because
3745 the child process gets its own copy of the lock. */
3746 if (flags
& CLONE_CHILD_SETTID
)
3747 put_user_u32(gettid(), child_tidptr
);
3748 if (flags
& CLONE_PARENT_SETTID
)
3749 put_user_u32(gettid(), parent_tidptr
);
3750 ts
= (TaskState
*)env
->opaque
;
3751 if (flags
& CLONE_SETTLS
)
3752 cpu_set_tls (env
, newtls
);
3753 if (flags
& CLONE_CHILD_CLEARTID
)
3754 ts
->child_tidptr
= child_tidptr
;
3763 /* warning : doesn't handle linux specific flags... */
3764 static int target_to_host_fcntl_cmd(int cmd
)
3767 case TARGET_F_DUPFD
:
3768 case TARGET_F_GETFD
:
3769 case TARGET_F_SETFD
:
3770 case TARGET_F_GETFL
:
3771 case TARGET_F_SETFL
:
3773 case TARGET_F_GETLK
:
3775 case TARGET_F_SETLK
:
3777 case TARGET_F_SETLKW
:
3779 case TARGET_F_GETOWN
:
3781 case TARGET_F_SETOWN
:
3783 case TARGET_F_GETSIG
:
3785 case TARGET_F_SETSIG
:
3787 #if TARGET_ABI_BITS == 32
3788 case TARGET_F_GETLK64
:
3790 case TARGET_F_SETLK64
:
3792 case TARGET_F_SETLKW64
:
3795 case TARGET_F_SETLEASE
:
3797 case TARGET_F_GETLEASE
:
3799 #ifdef F_DUPFD_CLOEXEC
3800 case TARGET_F_DUPFD_CLOEXEC
:
3801 return F_DUPFD_CLOEXEC
;
3803 case TARGET_F_NOTIFY
:
3806 return -TARGET_EINVAL
;
3808 return -TARGET_EINVAL
;
3811 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3814 struct target_flock
*target_fl
;
3815 struct flock64 fl64
;
3816 struct target_flock64
*target_fl64
;
3818 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
3820 if (host_cmd
== -TARGET_EINVAL
)
3824 case TARGET_F_GETLK
:
3825 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3826 return -TARGET_EFAULT
;
3827 fl
.l_type
= tswap16(target_fl
->l_type
);
3828 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3829 fl
.l_start
= tswapl(target_fl
->l_start
);
3830 fl
.l_len
= tswapl(target_fl
->l_len
);
3831 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3832 unlock_user_struct(target_fl
, arg
, 0);
3833 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3835 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3836 return -TARGET_EFAULT
;
3837 target_fl
->l_type
= tswap16(fl
.l_type
);
3838 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3839 target_fl
->l_start
= tswapl(fl
.l_start
);
3840 target_fl
->l_len
= tswapl(fl
.l_len
);
3841 target_fl
->l_pid
= tswap32(fl
.l_pid
);
3842 unlock_user_struct(target_fl
, arg
, 1);
3846 case TARGET_F_SETLK
:
3847 case TARGET_F_SETLKW
:
3848 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3849 return -TARGET_EFAULT
;
3850 fl
.l_type
= tswap16(target_fl
->l_type
);
3851 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3852 fl
.l_start
= tswapl(target_fl
->l_start
);
3853 fl
.l_len
= tswapl(target_fl
->l_len
);
3854 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3855 unlock_user_struct(target_fl
, arg
, 0);
3856 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3859 case TARGET_F_GETLK64
:
3860 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3861 return -TARGET_EFAULT
;
3862 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3863 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3864 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3865 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3866 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3867 unlock_user_struct(target_fl64
, arg
, 0);
3868 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3870 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3871 return -TARGET_EFAULT
;
3872 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3873 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3874 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3875 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3876 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
3877 unlock_user_struct(target_fl64
, arg
, 1);
3880 case TARGET_F_SETLK64
:
3881 case TARGET_F_SETLKW64
:
3882 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3883 return -TARGET_EFAULT
;
3884 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3885 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3886 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3887 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3888 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3889 unlock_user_struct(target_fl64
, arg
, 0);
3890 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3893 case TARGET_F_GETFL
:
3894 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3896 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
3900 case TARGET_F_SETFL
:
3901 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
3904 case TARGET_F_SETOWN
:
3905 case TARGET_F_GETOWN
:
3906 case TARGET_F_SETSIG
:
3907 case TARGET_F_GETSIG
:
3908 case TARGET_F_SETLEASE
:
3909 case TARGET_F_GETLEASE
:
3910 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3914 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3922 static inline int high2lowuid(int uid
)
3930 static inline int high2lowgid(int gid
)
3938 static inline int low2highuid(int uid
)
3940 if ((int16_t)uid
== -1)
3946 static inline int low2highgid(int gid
)
3948 if ((int16_t)gid
== -1)
3954 #endif /* USE_UID16 */
3956 void syscall_init(void)
3959 const argtype
*arg_type
;
3963 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3964 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3965 #include "syscall_types.h"
3967 #undef STRUCT_SPECIAL
3969 /* we patch the ioctl size if necessary. We rely on the fact that
3970 no ioctl has all the bits at '1' in the size field */
3972 while (ie
->target_cmd
!= 0) {
3973 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
3974 TARGET_IOC_SIZEMASK
) {
3975 arg_type
= ie
->arg_type
;
3976 if (arg_type
[0] != TYPE_PTR
) {
3977 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
3982 size
= thunk_type_size(arg_type
, 0);
3983 ie
->target_cmd
= (ie
->target_cmd
&
3984 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
3985 (size
<< TARGET_IOC_SIZESHIFT
);
3988 /* Build target_to_host_errno_table[] table from
3989 * host_to_target_errno_table[]. */
3990 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
3991 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
3993 /* automatic consistency check if same arch */
3994 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3995 (defined(__x86_64__) && defined(TARGET_X86_64))
3996 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
3997 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3998 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4005 #if TARGET_ABI_BITS == 32
4006 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4008 #ifdef TARGET_WORDS_BIGENDIAN
4009 return ((uint64_t)word0
<< 32) | word1
;
4011 return ((uint64_t)word1
<< 32) | word0
;
4014 #else /* TARGET_ABI_BITS == 32 */
4015 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4019 #endif /* TARGET_ABI_BITS != 32 */
4021 #ifdef TARGET_NR_truncate64
4022 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4028 if (((CPUARMState
*)cpu_env
)->eabi
)
4034 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4038 #ifdef TARGET_NR_ftruncate64
4039 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4045 if (((CPUARMState
*)cpu_env
)->eabi
)
4051 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4055 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4056 abi_ulong target_addr
)
4058 struct target_timespec
*target_ts
;
4060 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4061 return -TARGET_EFAULT
;
4062 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
4063 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
4064 unlock_user_struct(target_ts
, target_addr
, 0);
4068 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4069 struct timespec
*host_ts
)
4071 struct target_timespec
*target_ts
;
4073 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4074 return -TARGET_EFAULT
;
4075 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
4076 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
4077 unlock_user_struct(target_ts
, target_addr
, 1);
4081 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4082 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4083 abi_ulong target_addr
,
4084 struct stat
*host_st
)
4087 if (((CPUARMState
*)cpu_env
)->eabi
) {
4088 struct target_eabi_stat64
*target_st
;
4090 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4091 return -TARGET_EFAULT
;
4092 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4093 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4094 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4095 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4096 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4098 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4099 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4100 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4101 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4102 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4103 __put_user(host_st
->st_size
, &target_st
->st_size
);
4104 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4105 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4106 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4107 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4108 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4109 unlock_user_struct(target_st
, target_addr
, 1);
4113 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4114 struct target_stat
*target_st
;
4116 struct target_stat64
*target_st
;
4119 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4120 return -TARGET_EFAULT
;
4121 memset(target_st
, 0, sizeof(*target_st
));
4122 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4123 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4124 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4125 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4127 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4128 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4129 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4130 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4131 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4132 /* XXX: better use of kernel struct */
4133 __put_user(host_st
->st_size
, &target_st
->st_size
);
4134 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4135 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4136 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4137 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4138 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4139 unlock_user_struct(target_st
, target_addr
, 1);
4146 #if defined(CONFIG_USE_NPTL)
4147 /* ??? Using host futex calls even when target atomic operations
4148 are not really atomic probably breaks things. However implementing
4149 futexes locally would make futexes shared between multiple processes
4150 tricky. However they're probably useless because guest atomic
4151 operations won't work either. */
4152 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4153 target_ulong uaddr2
, int val3
)
4155 struct timespec ts
, *pts
;
4158 /* ??? We assume FUTEX_* constants are the same on both host
4160 #ifdef FUTEX_CMD_MASK
4161 base_op
= op
& FUTEX_CMD_MASK
;
4169 target_to_host_timespec(pts
, timeout
);
4173 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4176 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4178 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4180 case FUTEX_CMP_REQUEUE
:
4182 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4183 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4184 But the prototype takes a `struct timespec *'; insert casts
4185 to satisfy the compiler. We do not need to tswap TIMEOUT
4186 since it's not compared to guest memory. */
4187 pts
= (struct timespec
*)(uintptr_t) timeout
;
4188 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4190 (base_op
== FUTEX_CMP_REQUEUE
4194 return -TARGET_ENOSYS
;
4199 /* Map host to target signal numbers for the wait family of syscalls.
4200 Assume all other status bits are the same. */
4201 static int host_to_target_waitstatus(int status
)
4203 if (WIFSIGNALED(status
)) {
4204 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4206 if (WIFSTOPPED(status
)) {
4207 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4213 int get_osversion(void)
4215 static int osversion
;
4216 struct new_utsname buf
;
4221 if (qemu_uname_release
&& *qemu_uname_release
) {
4222 s
= qemu_uname_release
;
4224 if (sys_uname(&buf
))
4229 for (i
= 0; i
< 3; i
++) {
4231 while (*s
>= '0' && *s
<= '9') {
4236 tmp
= (tmp
<< 8) + n
;
4244 /* do_syscall() should always have a single exit point at the end so
4245 that actions, such as logging of syscall results, can be performed.
4246 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4247 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4248 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4249 abi_long arg5
, abi_long arg6
)
4257 gemu_log("syscall %d", num
);
4260 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4263 case TARGET_NR_exit
:
4264 #ifdef CONFIG_USE_NPTL
4265 /* In old applications this may be used to implement _exit(2).
4266 However in threaded applictions it is used for thread termination,
4267 and _exit_group is used for application termination.
4268 Do thread termination if we have more then one thread. */
4269 /* FIXME: This probably breaks if a signal arrives. We should probably
4270 be disabling signals. */
4271 if (first_cpu
->next_cpu
) {
4279 while (p
&& p
!= (CPUState
*)cpu_env
) {
4280 lastp
= &p
->next_cpu
;
4283 /* If we didn't find the CPU for this thread then something is
4287 /* Remove the CPU from the list. */
4288 *lastp
= p
->next_cpu
;
4290 ts
= ((CPUState
*)cpu_env
)->opaque
;
4291 if (ts
->child_tidptr
) {
4292 put_user_u32(0, ts
->child_tidptr
);
4293 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4305 gdb_exit(cpu_env
, arg1
);
4307 ret
= 0; /* avoid warning */
4309 case TARGET_NR_read
:
4313 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4315 ret
= get_errno(read(arg1
, p
, arg3
));
4316 unlock_user(p
, arg2
, ret
);
4319 case TARGET_NR_write
:
4320 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4322 ret
= get_errno(write(arg1
, p
, arg3
));
4323 unlock_user(p
, arg2
, 0);
4325 case TARGET_NR_open
:
4326 if (!(p
= lock_user_string(arg1
)))
4328 ret
= get_errno(open(path(p
),
4329 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4331 unlock_user(p
, arg1
, 0);
4333 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4334 case TARGET_NR_openat
:
4335 if (!(p
= lock_user_string(arg2
)))
4337 ret
= get_errno(sys_openat(arg1
,
4339 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4341 unlock_user(p
, arg2
, 0);
4344 case TARGET_NR_close
:
4345 ret
= get_errno(close(arg1
));
4350 case TARGET_NR_fork
:
4351 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4353 #ifdef TARGET_NR_waitpid
4354 case TARGET_NR_waitpid
:
4357 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4358 if (!is_error(ret
) && arg2
4359 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4364 #ifdef TARGET_NR_waitid
4365 case TARGET_NR_waitid
:
4369 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4370 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4371 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4373 host_to_target_siginfo(p
, &info
);
4374 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4379 #ifdef TARGET_NR_creat /* not on alpha */
4380 case TARGET_NR_creat
:
4381 if (!(p
= lock_user_string(arg1
)))
4383 ret
= get_errno(creat(p
, arg2
));
4384 unlock_user(p
, arg1
, 0);
4387 case TARGET_NR_link
:
4390 p
= lock_user_string(arg1
);
4391 p2
= lock_user_string(arg2
);
4393 ret
= -TARGET_EFAULT
;
4395 ret
= get_errno(link(p
, p2
));
4396 unlock_user(p2
, arg2
, 0);
4397 unlock_user(p
, arg1
, 0);
4400 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4401 case TARGET_NR_linkat
:
4406 p
= lock_user_string(arg2
);
4407 p2
= lock_user_string(arg4
);
4409 ret
= -TARGET_EFAULT
;
4411 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4412 unlock_user(p
, arg2
, 0);
4413 unlock_user(p2
, arg4
, 0);
4417 case TARGET_NR_unlink
:
4418 if (!(p
= lock_user_string(arg1
)))
4420 ret
= get_errno(unlink(p
));
4421 unlock_user(p
, arg1
, 0);
4423 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4424 case TARGET_NR_unlinkat
:
4425 if (!(p
= lock_user_string(arg2
)))
4427 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4428 unlock_user(p
, arg2
, 0);
4431 case TARGET_NR_execve
:
4433 char **argp
, **envp
;
4436 abi_ulong guest_argp
;
4437 abi_ulong guest_envp
;
4443 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4444 if (get_user_ual(addr
, gp
))
4452 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4453 if (get_user_ual(addr
, gp
))
4460 argp
= alloca((argc
+ 1) * sizeof(void *));
4461 envp
= alloca((envc
+ 1) * sizeof(void *));
4463 for (gp
= guest_argp
, q
= argp
; gp
;
4464 gp
+= sizeof(abi_ulong
), q
++) {
4465 if (get_user_ual(addr
, gp
))
4469 if (!(*q
= lock_user_string(addr
)))
4474 for (gp
= guest_envp
, q
= envp
; gp
;
4475 gp
+= sizeof(abi_ulong
), q
++) {
4476 if (get_user_ual(addr
, gp
))
4480 if (!(*q
= lock_user_string(addr
)))
4485 if (!(p
= lock_user_string(arg1
)))
4487 ret
= get_errno(execve(p
, argp
, envp
));
4488 unlock_user(p
, arg1
, 0);
4493 ret
= -TARGET_EFAULT
;
4496 for (gp
= guest_argp
, q
= argp
; *q
;
4497 gp
+= sizeof(abi_ulong
), q
++) {
4498 if (get_user_ual(addr
, gp
)
4501 unlock_user(*q
, addr
, 0);
4503 for (gp
= guest_envp
, q
= envp
; *q
;
4504 gp
+= sizeof(abi_ulong
), q
++) {
4505 if (get_user_ual(addr
, gp
)
4508 unlock_user(*q
, addr
, 0);
4512 case TARGET_NR_chdir
:
4513 if (!(p
= lock_user_string(arg1
)))
4515 ret
= get_errno(chdir(p
));
4516 unlock_user(p
, arg1
, 0);
4518 #ifdef TARGET_NR_time
4519 case TARGET_NR_time
:
4522 ret
= get_errno(time(&host_time
));
4525 && put_user_sal(host_time
, arg1
))
4530 case TARGET_NR_mknod
:
4531 if (!(p
= lock_user_string(arg1
)))
4533 ret
= get_errno(mknod(p
, arg2
, arg3
));
4534 unlock_user(p
, arg1
, 0);
4536 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4537 case TARGET_NR_mknodat
:
4538 if (!(p
= lock_user_string(arg2
)))
4540 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4541 unlock_user(p
, arg2
, 0);
4544 case TARGET_NR_chmod
:
4545 if (!(p
= lock_user_string(arg1
)))
4547 ret
= get_errno(chmod(p
, arg2
));
4548 unlock_user(p
, arg1
, 0);
4550 #ifdef TARGET_NR_break
4551 case TARGET_NR_break
:
4554 #ifdef TARGET_NR_oldstat
4555 case TARGET_NR_oldstat
:
4558 case TARGET_NR_lseek
:
4559 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4561 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4562 /* Alpha specific */
4563 case TARGET_NR_getxpid
:
4564 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4565 ret
= get_errno(getpid());
4568 #ifdef TARGET_NR_getpid
4569 case TARGET_NR_getpid
:
4570 ret
= get_errno(getpid());
4573 case TARGET_NR_mount
:
4575 /* need to look at the data field */
4577 p
= lock_user_string(arg1
);
4578 p2
= lock_user_string(arg2
);
4579 p3
= lock_user_string(arg3
);
4580 if (!p
|| !p2
|| !p3
)
4581 ret
= -TARGET_EFAULT
;
4583 /* FIXME - arg5 should be locked, but it isn't clear how to
4584 * do that since it's not guaranteed to be a NULL-terminated
4588 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4590 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4592 unlock_user(p
, arg1
, 0);
4593 unlock_user(p2
, arg2
, 0);
4594 unlock_user(p3
, arg3
, 0);
4597 #ifdef TARGET_NR_umount
4598 case TARGET_NR_umount
:
4599 if (!(p
= lock_user_string(arg1
)))
4601 ret
= get_errno(umount(p
));
4602 unlock_user(p
, arg1
, 0);
4605 #ifdef TARGET_NR_stime /* not on alpha */
4606 case TARGET_NR_stime
:
4609 if (get_user_sal(host_time
, arg1
))
4611 ret
= get_errno(stime(&host_time
));
4615 case TARGET_NR_ptrace
:
4617 #ifdef TARGET_NR_alarm /* not on alpha */
4618 case TARGET_NR_alarm
:
4622 #ifdef TARGET_NR_oldfstat
4623 case TARGET_NR_oldfstat
:
4626 #ifdef TARGET_NR_pause /* not on alpha */
4627 case TARGET_NR_pause
:
4628 ret
= get_errno(pause());
4631 #ifdef TARGET_NR_utime
4632 case TARGET_NR_utime
:
4634 struct utimbuf tbuf
, *host_tbuf
;
4635 struct target_utimbuf
*target_tbuf
;
4637 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4639 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4640 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4641 unlock_user_struct(target_tbuf
, arg2
, 0);
4646 if (!(p
= lock_user_string(arg1
)))
4648 ret
= get_errno(utime(p
, host_tbuf
));
4649 unlock_user(p
, arg1
, 0);
4653 case TARGET_NR_utimes
:
4655 struct timeval
*tvp
, tv
[2];
4657 if (copy_from_user_timeval(&tv
[0], arg2
)
4658 || copy_from_user_timeval(&tv
[1],
4659 arg2
+ sizeof(struct target_timeval
)))
4665 if (!(p
= lock_user_string(arg1
)))
4667 ret
= get_errno(utimes(p
, tvp
));
4668 unlock_user(p
, arg1
, 0);
4671 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4672 case TARGET_NR_futimesat
:
4674 struct timeval
*tvp
, tv
[2];
4676 if (copy_from_user_timeval(&tv
[0], arg3
)
4677 || copy_from_user_timeval(&tv
[1],
4678 arg3
+ sizeof(struct target_timeval
)))
4684 if (!(p
= lock_user_string(arg2
)))
4686 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4687 unlock_user(p
, arg2
, 0);
4691 #ifdef TARGET_NR_stty
4692 case TARGET_NR_stty
:
4695 #ifdef TARGET_NR_gtty
4696 case TARGET_NR_gtty
:
4699 case TARGET_NR_access
:
4700 if (!(p
= lock_user_string(arg1
)))
4702 ret
= get_errno(access(path(p
), arg2
));
4703 unlock_user(p
, arg1
, 0);
4705 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4706 case TARGET_NR_faccessat
:
4707 if (!(p
= lock_user_string(arg2
)))
4709 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4710 unlock_user(p
, arg2
, 0);
4713 #ifdef TARGET_NR_nice /* not on alpha */
4714 case TARGET_NR_nice
:
4715 ret
= get_errno(nice(arg1
));
4718 #ifdef TARGET_NR_ftime
4719 case TARGET_NR_ftime
:
4722 case TARGET_NR_sync
:
4726 case TARGET_NR_kill
:
4727 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4729 case TARGET_NR_rename
:
4732 p
= lock_user_string(arg1
);
4733 p2
= lock_user_string(arg2
);
4735 ret
= -TARGET_EFAULT
;
4737 ret
= get_errno(rename(p
, p2
));
4738 unlock_user(p2
, arg2
, 0);
4739 unlock_user(p
, arg1
, 0);
4742 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4743 case TARGET_NR_renameat
:
4746 p
= lock_user_string(arg2
);
4747 p2
= lock_user_string(arg4
);
4749 ret
= -TARGET_EFAULT
;
4751 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4752 unlock_user(p2
, arg4
, 0);
4753 unlock_user(p
, arg2
, 0);
4757 case TARGET_NR_mkdir
:
4758 if (!(p
= lock_user_string(arg1
)))
4760 ret
= get_errno(mkdir(p
, arg2
));
4761 unlock_user(p
, arg1
, 0);
4763 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4764 case TARGET_NR_mkdirat
:
4765 if (!(p
= lock_user_string(arg2
)))
4767 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4768 unlock_user(p
, arg2
, 0);
4771 case TARGET_NR_rmdir
:
4772 if (!(p
= lock_user_string(arg1
)))
4774 ret
= get_errno(rmdir(p
));
4775 unlock_user(p
, arg1
, 0);
4778 ret
= get_errno(dup(arg1
));
4780 case TARGET_NR_pipe
:
4781 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
4783 #ifdef TARGET_NR_pipe2
4784 case TARGET_NR_pipe2
:
4785 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
4788 case TARGET_NR_times
:
4790 struct target_tms
*tmsp
;
4792 ret
= get_errno(times(&tms
));
4794 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4797 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4798 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4799 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4800 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4803 ret
= host_to_target_clock_t(ret
);
4806 #ifdef TARGET_NR_prof
4807 case TARGET_NR_prof
:
4810 #ifdef TARGET_NR_signal
4811 case TARGET_NR_signal
:
4814 case TARGET_NR_acct
:
4816 ret
= get_errno(acct(NULL
));
4818 if (!(p
= lock_user_string(arg1
)))
4820 ret
= get_errno(acct(path(p
)));
4821 unlock_user(p
, arg1
, 0);
4824 #ifdef TARGET_NR_umount2 /* not on alpha */
4825 case TARGET_NR_umount2
:
4826 if (!(p
= lock_user_string(arg1
)))
4828 ret
= get_errno(umount2(p
, arg2
));
4829 unlock_user(p
, arg1
, 0);
4832 #ifdef TARGET_NR_lock
4833 case TARGET_NR_lock
:
4836 case TARGET_NR_ioctl
:
4837 ret
= do_ioctl(arg1
, arg2
, arg3
);
4839 case TARGET_NR_fcntl
:
4840 ret
= do_fcntl(arg1
, arg2
, arg3
);
4842 #ifdef TARGET_NR_mpx
4846 case TARGET_NR_setpgid
:
4847 ret
= get_errno(setpgid(arg1
, arg2
));
4849 #ifdef TARGET_NR_ulimit
4850 case TARGET_NR_ulimit
:
4853 #ifdef TARGET_NR_oldolduname
4854 case TARGET_NR_oldolduname
:
4857 case TARGET_NR_umask
:
4858 ret
= get_errno(umask(arg1
));
4860 case TARGET_NR_chroot
:
4861 if (!(p
= lock_user_string(arg1
)))
4863 ret
= get_errno(chroot(p
));
4864 unlock_user(p
, arg1
, 0);
4866 case TARGET_NR_ustat
:
4868 case TARGET_NR_dup2
:
4869 ret
= get_errno(dup2(arg1
, arg2
));
4871 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
4872 case TARGET_NR_dup3
:
4873 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
4876 #ifdef TARGET_NR_getppid /* not on alpha */
4877 case TARGET_NR_getppid
:
4878 ret
= get_errno(getppid());
4881 case TARGET_NR_getpgrp
:
4882 ret
= get_errno(getpgrp());
4884 case TARGET_NR_setsid
:
4885 ret
= get_errno(setsid());
4887 #ifdef TARGET_NR_sigaction
4888 case TARGET_NR_sigaction
:
4890 #if defined(TARGET_ALPHA)
4891 struct target_sigaction act
, oact
, *pact
= 0;
4892 struct target_old_sigaction
*old_act
;
4894 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4896 act
._sa_handler
= old_act
->_sa_handler
;
4897 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4898 act
.sa_flags
= old_act
->sa_flags
;
4899 act
.sa_restorer
= 0;
4900 unlock_user_struct(old_act
, arg2
, 0);
4903 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4904 if (!is_error(ret
) && arg3
) {
4905 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4907 old_act
->_sa_handler
= oact
._sa_handler
;
4908 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4909 old_act
->sa_flags
= oact
.sa_flags
;
4910 unlock_user_struct(old_act
, arg3
, 1);
4912 #elif defined(TARGET_MIPS)
4913 struct target_sigaction act
, oact
, *pact
, *old_act
;
4916 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4918 act
._sa_handler
= old_act
->_sa_handler
;
4919 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
4920 act
.sa_flags
= old_act
->sa_flags
;
4921 unlock_user_struct(old_act
, arg2
, 0);
4927 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4929 if (!is_error(ret
) && arg3
) {
4930 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4932 old_act
->_sa_handler
= oact
._sa_handler
;
4933 old_act
->sa_flags
= oact
.sa_flags
;
4934 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
4935 old_act
->sa_mask
.sig
[1] = 0;
4936 old_act
->sa_mask
.sig
[2] = 0;
4937 old_act
->sa_mask
.sig
[3] = 0;
4938 unlock_user_struct(old_act
, arg3
, 1);
4941 struct target_old_sigaction
*old_act
;
4942 struct target_sigaction act
, oact
, *pact
;
4944 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4946 act
._sa_handler
= old_act
->_sa_handler
;
4947 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4948 act
.sa_flags
= old_act
->sa_flags
;
4949 act
.sa_restorer
= old_act
->sa_restorer
;
4950 unlock_user_struct(old_act
, arg2
, 0);
4955 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4956 if (!is_error(ret
) && arg3
) {
4957 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4959 old_act
->_sa_handler
= oact
._sa_handler
;
4960 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4961 old_act
->sa_flags
= oact
.sa_flags
;
4962 old_act
->sa_restorer
= oact
.sa_restorer
;
4963 unlock_user_struct(old_act
, arg3
, 1);
4969 case TARGET_NR_rt_sigaction
:
4971 #if defined(TARGET_ALPHA)
4972 struct target_sigaction act
, oact
, *pact
= 0;
4973 struct target_rt_sigaction
*rt_act
;
4974 /* ??? arg4 == sizeof(sigset_t). */
4976 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
4978 act
._sa_handler
= rt_act
->_sa_handler
;
4979 act
.sa_mask
= rt_act
->sa_mask
;
4980 act
.sa_flags
= rt_act
->sa_flags
;
4981 act
.sa_restorer
= arg5
;
4982 unlock_user_struct(rt_act
, arg2
, 0);
4985 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4986 if (!is_error(ret
) && arg3
) {
4987 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
4989 rt_act
->_sa_handler
= oact
._sa_handler
;
4990 rt_act
->sa_mask
= oact
.sa_mask
;
4991 rt_act
->sa_flags
= oact
.sa_flags
;
4992 unlock_user_struct(rt_act
, arg3
, 1);
4995 struct target_sigaction
*act
;
4996 struct target_sigaction
*oact
;
4999 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5004 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5005 ret
= -TARGET_EFAULT
;
5006 goto rt_sigaction_fail
;
5010 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5013 unlock_user_struct(act
, arg2
, 0);
5015 unlock_user_struct(oact
, arg3
, 1);
5019 #ifdef TARGET_NR_sgetmask /* not on alpha */
5020 case TARGET_NR_sgetmask
:
5023 abi_ulong target_set
;
5024 sigprocmask(0, NULL
, &cur_set
);
5025 host_to_target_old_sigset(&target_set
, &cur_set
);
5030 #ifdef TARGET_NR_ssetmask /* not on alpha */
5031 case TARGET_NR_ssetmask
:
5033 sigset_t set
, oset
, cur_set
;
5034 abi_ulong target_set
= arg1
;
5035 sigprocmask(0, NULL
, &cur_set
);
5036 target_to_host_old_sigset(&set
, &target_set
);
5037 sigorset(&set
, &set
, &cur_set
);
5038 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5039 host_to_target_old_sigset(&target_set
, &oset
);
5044 #ifdef TARGET_NR_sigprocmask
5045 case TARGET_NR_sigprocmask
:
5047 #if defined(TARGET_ALPHA)
5048 sigset_t set
, oldset
;
5053 case TARGET_SIG_BLOCK
:
5056 case TARGET_SIG_UNBLOCK
:
5059 case TARGET_SIG_SETMASK
:
5063 ret
= -TARGET_EINVAL
;
5067 target_to_host_old_sigset(&set
, &mask
);
5069 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5071 if (!is_error(ret
)) {
5072 host_to_target_old_sigset(&mask
, &oldset
);
5074 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5077 sigset_t set
, oldset
, *set_ptr
;
5082 case TARGET_SIG_BLOCK
:
5085 case TARGET_SIG_UNBLOCK
:
5088 case TARGET_SIG_SETMASK
:
5092 ret
= -TARGET_EINVAL
;
5095 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5097 target_to_host_old_sigset(&set
, p
);
5098 unlock_user(p
, arg2
, 0);
5104 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5105 if (!is_error(ret
) && arg3
) {
5106 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5108 host_to_target_old_sigset(p
, &oldset
);
5109 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5115 case TARGET_NR_rt_sigprocmask
:
5118 sigset_t set
, oldset
, *set_ptr
;
5122 case TARGET_SIG_BLOCK
:
5125 case TARGET_SIG_UNBLOCK
:
5128 case TARGET_SIG_SETMASK
:
5132 ret
= -TARGET_EINVAL
;
5135 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5137 target_to_host_sigset(&set
, p
);
5138 unlock_user(p
, arg2
, 0);
5144 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5145 if (!is_error(ret
) && arg3
) {
5146 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5148 host_to_target_sigset(p
, &oldset
);
5149 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5153 #ifdef TARGET_NR_sigpending
5154 case TARGET_NR_sigpending
:
5157 ret
= get_errno(sigpending(&set
));
5158 if (!is_error(ret
)) {
5159 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5161 host_to_target_old_sigset(p
, &set
);
5162 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5167 case TARGET_NR_rt_sigpending
:
5170 ret
= get_errno(sigpending(&set
));
5171 if (!is_error(ret
)) {
5172 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5174 host_to_target_sigset(p
, &set
);
5175 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5179 #ifdef TARGET_NR_sigsuspend
5180 case TARGET_NR_sigsuspend
:
5183 #if defined(TARGET_ALPHA)
5184 abi_ulong mask
= arg1
;
5185 target_to_host_old_sigset(&set
, &mask
);
5187 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5189 target_to_host_old_sigset(&set
, p
);
5190 unlock_user(p
, arg1
, 0);
5192 ret
= get_errno(sigsuspend(&set
));
5196 case TARGET_NR_rt_sigsuspend
:
5199 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5201 target_to_host_sigset(&set
, p
);
5202 unlock_user(p
, arg1
, 0);
5203 ret
= get_errno(sigsuspend(&set
));
5206 case TARGET_NR_rt_sigtimedwait
:
5209 struct timespec uts
, *puts
;
5212 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5214 target_to_host_sigset(&set
, p
);
5215 unlock_user(p
, arg1
, 0);
5218 target_to_host_timespec(puts
, arg3
);
5222 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5223 if (!is_error(ret
) && arg2
) {
5224 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5226 host_to_target_siginfo(p
, &uinfo
);
5227 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5231 case TARGET_NR_rt_sigqueueinfo
:
5234 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5236 target_to_host_siginfo(&uinfo
, p
);
5237 unlock_user(p
, arg1
, 0);
5238 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5241 #ifdef TARGET_NR_sigreturn
5242 case TARGET_NR_sigreturn
:
5243 /* NOTE: ret is eax, so not transcoding must be done */
5244 ret
= do_sigreturn(cpu_env
);
5247 case TARGET_NR_rt_sigreturn
:
5248 /* NOTE: ret is eax, so not transcoding must be done */
5249 ret
= do_rt_sigreturn(cpu_env
);
5251 case TARGET_NR_sethostname
:
5252 if (!(p
= lock_user_string(arg1
)))
5254 ret
= get_errno(sethostname(p
, arg2
));
5255 unlock_user(p
, arg1
, 0);
5257 case TARGET_NR_setrlimit
:
5259 int resource
= arg1
;
5260 struct target_rlimit
*target_rlim
;
5262 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5264 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5265 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5266 unlock_user_struct(target_rlim
, arg2
, 0);
5267 ret
= get_errno(setrlimit(resource
, &rlim
));
5270 case TARGET_NR_getrlimit
:
5272 int resource
= arg1
;
5273 struct target_rlimit
*target_rlim
;
5276 ret
= get_errno(getrlimit(resource
, &rlim
));
5277 if (!is_error(ret
)) {
5278 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5280 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5281 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5282 unlock_user_struct(target_rlim
, arg2
, 1);
5286 case TARGET_NR_getrusage
:
5288 struct rusage rusage
;
5289 ret
= get_errno(getrusage(arg1
, &rusage
));
5290 if (!is_error(ret
)) {
5291 host_to_target_rusage(arg2
, &rusage
);
5295 case TARGET_NR_gettimeofday
:
5298 ret
= get_errno(gettimeofday(&tv
, NULL
));
5299 if (!is_error(ret
)) {
5300 if (copy_to_user_timeval(arg1
, &tv
))
5305 case TARGET_NR_settimeofday
:
5308 if (copy_from_user_timeval(&tv
, arg1
))
5310 ret
= get_errno(settimeofday(&tv
, NULL
));
5313 #ifdef TARGET_NR_select
5314 case TARGET_NR_select
:
5316 struct target_sel_arg_struct
*sel
;
5317 abi_ulong inp
, outp
, exp
, tvp
;
5320 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5322 nsel
= tswapl(sel
->n
);
5323 inp
= tswapl(sel
->inp
);
5324 outp
= tswapl(sel
->outp
);
5325 exp
= tswapl(sel
->exp
);
5326 tvp
= tswapl(sel
->tvp
);
5327 unlock_user_struct(sel
, arg1
, 0);
5328 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5332 #ifdef TARGET_NR_pselect6
5333 case TARGET_NR_pselect6
:
5334 goto unimplemented_nowarn
;
5336 case TARGET_NR_symlink
:
5339 p
= lock_user_string(arg1
);
5340 p2
= lock_user_string(arg2
);
5342 ret
= -TARGET_EFAULT
;
5344 ret
= get_errno(symlink(p
, p2
));
5345 unlock_user(p2
, arg2
, 0);
5346 unlock_user(p
, arg1
, 0);
5349 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5350 case TARGET_NR_symlinkat
:
5353 p
= lock_user_string(arg1
);
5354 p2
= lock_user_string(arg3
);
5356 ret
= -TARGET_EFAULT
;
5358 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5359 unlock_user(p2
, arg3
, 0);
5360 unlock_user(p
, arg1
, 0);
5364 #ifdef TARGET_NR_oldlstat
5365 case TARGET_NR_oldlstat
:
5368 case TARGET_NR_readlink
:
5371 p
= lock_user_string(arg1
);
5372 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5374 ret
= -TARGET_EFAULT
;
5376 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5377 char real
[PATH_MAX
];
5378 temp
= realpath(exec_path
,real
);
5379 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5380 snprintf((char *)p2
, arg3
, "%s", real
);
5383 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5385 unlock_user(p2
, arg2
, ret
);
5386 unlock_user(p
, arg1
, 0);
5389 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5390 case TARGET_NR_readlinkat
:
5393 p
= lock_user_string(arg2
);
5394 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5396 ret
= -TARGET_EFAULT
;
5398 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5399 unlock_user(p2
, arg3
, ret
);
5400 unlock_user(p
, arg2
, 0);
5404 #ifdef TARGET_NR_uselib
5405 case TARGET_NR_uselib
:
5408 #ifdef TARGET_NR_swapon
5409 case TARGET_NR_swapon
:
5410 if (!(p
= lock_user_string(arg1
)))
5412 ret
= get_errno(swapon(p
, arg2
));
5413 unlock_user(p
, arg1
, 0);
5416 case TARGET_NR_reboot
:
5418 #ifdef TARGET_NR_readdir
5419 case TARGET_NR_readdir
:
5422 #ifdef TARGET_NR_mmap
5423 case TARGET_NR_mmap
:
5424 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5427 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5428 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5436 unlock_user(v
, arg1
, 0);
5437 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5438 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5442 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5443 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5449 #ifdef TARGET_NR_mmap2
5450 case TARGET_NR_mmap2
:
5452 #define MMAP_SHIFT 12
5454 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5455 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5457 arg6
<< MMAP_SHIFT
));
5460 case TARGET_NR_munmap
:
5461 ret
= get_errno(target_munmap(arg1
, arg2
));
5463 case TARGET_NR_mprotect
:
5465 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
5466 /* Special hack to detect libc making the stack executable. */
5467 if ((arg3
& PROT_GROWSDOWN
)
5468 && arg1
>= ts
->info
->stack_limit
5469 && arg1
<= ts
->info
->start_stack
) {
5470 arg3
&= ~PROT_GROWSDOWN
;
5471 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
5472 arg1
= ts
->info
->stack_limit
;
5475 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5477 #ifdef TARGET_NR_mremap
5478 case TARGET_NR_mremap
:
5479 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5482 /* ??? msync/mlock/munlock are broken for softmmu. */
5483 #ifdef TARGET_NR_msync
5484 case TARGET_NR_msync
:
5485 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5488 #ifdef TARGET_NR_mlock
5489 case TARGET_NR_mlock
:
5490 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5493 #ifdef TARGET_NR_munlock
5494 case TARGET_NR_munlock
:
5495 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5498 #ifdef TARGET_NR_mlockall
5499 case TARGET_NR_mlockall
:
5500 ret
= get_errno(mlockall(arg1
));
5503 #ifdef TARGET_NR_munlockall
5504 case TARGET_NR_munlockall
:
5505 ret
= get_errno(munlockall());
5508 case TARGET_NR_truncate
:
5509 if (!(p
= lock_user_string(arg1
)))
5511 ret
= get_errno(truncate(p
, arg2
));
5512 unlock_user(p
, arg1
, 0);
5514 case TARGET_NR_ftruncate
:
5515 ret
= get_errno(ftruncate(arg1
, arg2
));
5517 case TARGET_NR_fchmod
:
5518 ret
= get_errno(fchmod(arg1
, arg2
));
5520 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5521 case TARGET_NR_fchmodat
:
5522 if (!(p
= lock_user_string(arg2
)))
5524 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5525 unlock_user(p
, arg2
, 0);
5528 case TARGET_NR_getpriority
:
5529 /* libc does special remapping of the return value of
5530 * sys_getpriority() so it's just easiest to call
5531 * sys_getpriority() directly rather than through libc. */
5532 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5534 case TARGET_NR_setpriority
:
5535 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5537 #ifdef TARGET_NR_profil
5538 case TARGET_NR_profil
:
5541 case TARGET_NR_statfs
:
5542 if (!(p
= lock_user_string(arg1
)))
5544 ret
= get_errno(statfs(path(p
), &stfs
));
5545 unlock_user(p
, arg1
, 0);
5547 if (!is_error(ret
)) {
5548 struct target_statfs
*target_stfs
;
5550 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5552 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5553 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5554 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5555 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5556 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5557 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5558 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5559 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5560 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5561 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5562 unlock_user_struct(target_stfs
, arg2
, 1);
5565 case TARGET_NR_fstatfs
:
5566 ret
= get_errno(fstatfs(arg1
, &stfs
));
5567 goto convert_statfs
;
5568 #ifdef TARGET_NR_statfs64
5569 case TARGET_NR_statfs64
:
5570 if (!(p
= lock_user_string(arg1
)))
5572 ret
= get_errno(statfs(path(p
), &stfs
));
5573 unlock_user(p
, arg1
, 0);
5575 if (!is_error(ret
)) {
5576 struct target_statfs64
*target_stfs
;
5578 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5580 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5581 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5582 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5583 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5584 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5585 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5586 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5587 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5588 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5589 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5590 unlock_user_struct(target_stfs
, arg3
, 1);
5593 case TARGET_NR_fstatfs64
:
5594 ret
= get_errno(fstatfs(arg1
, &stfs
));
5595 goto convert_statfs64
;
5597 #ifdef TARGET_NR_ioperm
5598 case TARGET_NR_ioperm
:
5601 #ifdef TARGET_NR_socketcall
5602 case TARGET_NR_socketcall
:
5603 ret
= do_socketcall(arg1
, arg2
);
5606 #ifdef TARGET_NR_accept
5607 case TARGET_NR_accept
:
5608 ret
= do_accept(arg1
, arg2
, arg3
);
5611 #ifdef TARGET_NR_bind
5612 case TARGET_NR_bind
:
5613 ret
= do_bind(arg1
, arg2
, arg3
);
5616 #ifdef TARGET_NR_connect
5617 case TARGET_NR_connect
:
5618 ret
= do_connect(arg1
, arg2
, arg3
);
5621 #ifdef TARGET_NR_getpeername
5622 case TARGET_NR_getpeername
:
5623 ret
= do_getpeername(arg1
, arg2
, arg3
);
5626 #ifdef TARGET_NR_getsockname
5627 case TARGET_NR_getsockname
:
5628 ret
= do_getsockname(arg1
, arg2
, arg3
);
5631 #ifdef TARGET_NR_getsockopt
5632 case TARGET_NR_getsockopt
:
5633 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5636 #ifdef TARGET_NR_listen
5637 case TARGET_NR_listen
:
5638 ret
= get_errno(listen(arg1
, arg2
));
5641 #ifdef TARGET_NR_recv
5642 case TARGET_NR_recv
:
5643 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5646 #ifdef TARGET_NR_recvfrom
5647 case TARGET_NR_recvfrom
:
5648 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5651 #ifdef TARGET_NR_recvmsg
5652 case TARGET_NR_recvmsg
:
5653 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5656 #ifdef TARGET_NR_send
5657 case TARGET_NR_send
:
5658 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5661 #ifdef TARGET_NR_sendmsg
5662 case TARGET_NR_sendmsg
:
5663 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5666 #ifdef TARGET_NR_sendto
5667 case TARGET_NR_sendto
:
5668 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5671 #ifdef TARGET_NR_shutdown
5672 case TARGET_NR_shutdown
:
5673 ret
= get_errno(shutdown(arg1
, arg2
));
5676 #ifdef TARGET_NR_socket
5677 case TARGET_NR_socket
:
5678 ret
= do_socket(arg1
, arg2
, arg3
);
5681 #ifdef TARGET_NR_socketpair
5682 case TARGET_NR_socketpair
:
5683 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5686 #ifdef TARGET_NR_setsockopt
5687 case TARGET_NR_setsockopt
:
5688 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5692 case TARGET_NR_syslog
:
5693 if (!(p
= lock_user_string(arg2
)))
5695 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5696 unlock_user(p
, arg2
, 0);
5699 case TARGET_NR_setitimer
:
5701 struct itimerval value
, ovalue
, *pvalue
;
5705 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5706 || copy_from_user_timeval(&pvalue
->it_value
,
5707 arg2
+ sizeof(struct target_timeval
)))
5712 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5713 if (!is_error(ret
) && arg3
) {
5714 if (copy_to_user_timeval(arg3
,
5715 &ovalue
.it_interval
)
5716 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5722 case TARGET_NR_getitimer
:
5724 struct itimerval value
;
5726 ret
= get_errno(getitimer(arg1
, &value
));
5727 if (!is_error(ret
) && arg2
) {
5728 if (copy_to_user_timeval(arg2
,
5730 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5736 case TARGET_NR_stat
:
5737 if (!(p
= lock_user_string(arg1
)))
5739 ret
= get_errno(stat(path(p
), &st
));
5740 unlock_user(p
, arg1
, 0);
5742 case TARGET_NR_lstat
:
5743 if (!(p
= lock_user_string(arg1
)))
5745 ret
= get_errno(lstat(path(p
), &st
));
5746 unlock_user(p
, arg1
, 0);
5748 case TARGET_NR_fstat
:
5750 ret
= get_errno(fstat(arg1
, &st
));
5752 if (!is_error(ret
)) {
5753 struct target_stat
*target_st
;
5755 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5757 memset(target_st
, 0, sizeof(*target_st
));
5758 __put_user(st
.st_dev
, &target_st
->st_dev
);
5759 __put_user(st
.st_ino
, &target_st
->st_ino
);
5760 __put_user(st
.st_mode
, &target_st
->st_mode
);
5761 __put_user(st
.st_uid
, &target_st
->st_uid
);
5762 __put_user(st
.st_gid
, &target_st
->st_gid
);
5763 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5764 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5765 __put_user(st
.st_size
, &target_st
->st_size
);
5766 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5767 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5768 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5769 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5770 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5771 unlock_user_struct(target_st
, arg2
, 1);
5775 #ifdef TARGET_NR_olduname
5776 case TARGET_NR_olduname
:
5779 #ifdef TARGET_NR_iopl
5780 case TARGET_NR_iopl
:
5783 case TARGET_NR_vhangup
:
5784 ret
= get_errno(vhangup());
5786 #ifdef TARGET_NR_idle
5787 case TARGET_NR_idle
:
5790 #ifdef TARGET_NR_syscall
5791 case TARGET_NR_syscall
:
5792 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5795 case TARGET_NR_wait4
:
5798 abi_long status_ptr
= arg2
;
5799 struct rusage rusage
, *rusage_ptr
;
5800 abi_ulong target_rusage
= arg4
;
5802 rusage_ptr
= &rusage
;
5805 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5806 if (!is_error(ret
)) {
5808 status
= host_to_target_waitstatus(status
);
5809 if (put_user_s32(status
, status_ptr
))
5813 host_to_target_rusage(target_rusage
, &rusage
);
5817 #ifdef TARGET_NR_swapoff
5818 case TARGET_NR_swapoff
:
5819 if (!(p
= lock_user_string(arg1
)))
5821 ret
= get_errno(swapoff(p
));
5822 unlock_user(p
, arg1
, 0);
5825 case TARGET_NR_sysinfo
:
5827 struct target_sysinfo
*target_value
;
5828 struct sysinfo value
;
5829 ret
= get_errno(sysinfo(&value
));
5830 if (!is_error(ret
) && arg1
)
5832 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5834 __put_user(value
.uptime
, &target_value
->uptime
);
5835 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5836 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5837 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5838 __put_user(value
.totalram
, &target_value
->totalram
);
5839 __put_user(value
.freeram
, &target_value
->freeram
);
5840 __put_user(value
.sharedram
, &target_value
->sharedram
);
5841 __put_user(value
.bufferram
, &target_value
->bufferram
);
5842 __put_user(value
.totalswap
, &target_value
->totalswap
);
5843 __put_user(value
.freeswap
, &target_value
->freeswap
);
5844 __put_user(value
.procs
, &target_value
->procs
);
5845 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5846 __put_user(value
.freehigh
, &target_value
->freehigh
);
5847 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5848 unlock_user_struct(target_value
, arg1
, 1);
5852 #ifdef TARGET_NR_ipc
5854 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5857 #ifdef TARGET_NR_semget
5858 case TARGET_NR_semget
:
5859 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5862 #ifdef TARGET_NR_semop
5863 case TARGET_NR_semop
:
5864 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5867 #ifdef TARGET_NR_semctl
5868 case TARGET_NR_semctl
:
5869 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5872 #ifdef TARGET_NR_msgctl
5873 case TARGET_NR_msgctl
:
5874 ret
= do_msgctl(arg1
, arg2
, arg3
);
5877 #ifdef TARGET_NR_msgget
5878 case TARGET_NR_msgget
:
5879 ret
= get_errno(msgget(arg1
, arg2
));
5882 #ifdef TARGET_NR_msgrcv
5883 case TARGET_NR_msgrcv
:
5884 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
5887 #ifdef TARGET_NR_msgsnd
5888 case TARGET_NR_msgsnd
:
5889 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
5892 #ifdef TARGET_NR_shmget
5893 case TARGET_NR_shmget
:
5894 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
5897 #ifdef TARGET_NR_shmctl
5898 case TARGET_NR_shmctl
:
5899 ret
= do_shmctl(arg1
, arg2
, arg3
);
5902 #ifdef TARGET_NR_shmat
5903 case TARGET_NR_shmat
:
5904 ret
= do_shmat(arg1
, arg2
, arg3
);
5907 #ifdef TARGET_NR_shmdt
5908 case TARGET_NR_shmdt
:
5909 ret
= do_shmdt(arg1
);
5912 case TARGET_NR_fsync
:
5913 ret
= get_errno(fsync(arg1
));
5915 case TARGET_NR_clone
:
5916 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
5917 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
5918 #elif defined(TARGET_CRIS)
5919 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
5921 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
5924 #ifdef __NR_exit_group
5925 /* new thread calls */
5926 case TARGET_NR_exit_group
:
5930 gdb_exit(cpu_env
, arg1
);
5931 ret
= get_errno(exit_group(arg1
));
5934 case TARGET_NR_setdomainname
:
5935 if (!(p
= lock_user_string(arg1
)))
5937 ret
= get_errno(setdomainname(p
, arg2
));
5938 unlock_user(p
, arg1
, 0);
5940 case TARGET_NR_uname
:
5941 /* no need to transcode because we use the linux syscall */
5943 struct new_utsname
* buf
;
5945 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
5947 ret
= get_errno(sys_uname(buf
));
5948 if (!is_error(ret
)) {
5949 /* Overrite the native machine name with whatever is being
5951 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
5952 /* Allow the user to override the reported release. */
5953 if (qemu_uname_release
&& *qemu_uname_release
)
5954 strcpy (buf
->release
, qemu_uname_release
);
5956 unlock_user_struct(buf
, arg1
, 1);
5960 case TARGET_NR_modify_ldt
:
5961 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
5963 #if !defined(TARGET_X86_64)
5964 case TARGET_NR_vm86old
:
5966 case TARGET_NR_vm86
:
5967 ret
= do_vm86(cpu_env
, arg1
, arg2
);
5971 case TARGET_NR_adjtimex
:
5973 #ifdef TARGET_NR_create_module
5974 case TARGET_NR_create_module
:
5976 case TARGET_NR_init_module
:
5977 case TARGET_NR_delete_module
:
5978 #ifdef TARGET_NR_get_kernel_syms
5979 case TARGET_NR_get_kernel_syms
:
5982 case TARGET_NR_quotactl
:
5984 case TARGET_NR_getpgid
:
5985 ret
= get_errno(getpgid(arg1
));
5987 case TARGET_NR_fchdir
:
5988 ret
= get_errno(fchdir(arg1
));
5990 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5991 case TARGET_NR_bdflush
:
5994 #ifdef TARGET_NR_sysfs
5995 case TARGET_NR_sysfs
:
5998 case TARGET_NR_personality
:
5999 ret
= get_errno(personality(arg1
));
6001 #ifdef TARGET_NR_afs_syscall
6002 case TARGET_NR_afs_syscall
:
6005 #ifdef TARGET_NR__llseek /* Not on alpha */
6006 case TARGET_NR__llseek
:
6008 #if !defined(__NR_llseek)
6009 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
6010 if (put_user_s64(ret
, arg4
))
6014 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6015 if (put_user_s64(res
, arg4
))
6021 case TARGET_NR_getdents
:
6022 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6024 struct target_dirent
*target_dirp
;
6025 struct linux_dirent
*dirp
;
6026 abi_long count
= arg3
;
6028 dirp
= malloc(count
);
6030 ret
= -TARGET_ENOMEM
;
6034 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6035 if (!is_error(ret
)) {
6036 struct linux_dirent
*de
;
6037 struct target_dirent
*tde
;
6039 int reclen
, treclen
;
6040 int count1
, tnamelen
;
6044 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6048 reclen
= de
->d_reclen
;
6049 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6050 tde
->d_reclen
= tswap16(treclen
);
6051 tde
->d_ino
= tswapl(de
->d_ino
);
6052 tde
->d_off
= tswapl(de
->d_off
);
6053 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6056 /* XXX: may not be correct */
6057 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6058 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6060 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6064 unlock_user(target_dirp
, arg2
, ret
);
6070 struct linux_dirent
*dirp
;
6071 abi_long count
= arg3
;
6073 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6075 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6076 if (!is_error(ret
)) {
6077 struct linux_dirent
*de
;
6082 reclen
= de
->d_reclen
;
6085 de
->d_reclen
= tswap16(reclen
);
6086 tswapls(&de
->d_ino
);
6087 tswapls(&de
->d_off
);
6088 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6092 unlock_user(dirp
, arg2
, ret
);
6096 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6097 case TARGET_NR_getdents64
:
6099 struct linux_dirent64
*dirp
;
6100 abi_long count
= arg3
;
6101 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6103 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6104 if (!is_error(ret
)) {
6105 struct linux_dirent64
*de
;
6110 reclen
= de
->d_reclen
;
6113 de
->d_reclen
= tswap16(reclen
);
6114 tswap64s((uint64_t *)&de
->d_ino
);
6115 tswap64s((uint64_t *)&de
->d_off
);
6116 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6120 unlock_user(dirp
, arg2
, ret
);
6123 #endif /* TARGET_NR_getdents64 */
6124 #ifdef TARGET_NR__newselect
6125 case TARGET_NR__newselect
:
6126 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6129 #ifdef TARGET_NR_poll
6130 case TARGET_NR_poll
:
6132 struct target_pollfd
*target_pfd
;
6133 unsigned int nfds
= arg2
;
6138 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6141 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6142 for(i
= 0; i
< nfds
; i
++) {
6143 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6144 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6146 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6147 if (!is_error(ret
)) {
6148 for(i
= 0; i
< nfds
; i
++) {
6149 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6151 ret
+= nfds
* (sizeof(struct target_pollfd
)
6152 - sizeof(struct pollfd
));
6154 unlock_user(target_pfd
, arg1
, ret
);
6158 case TARGET_NR_flock
:
6159 /* NOTE: the flock constant seems to be the same for every
6161 ret
= get_errno(flock(arg1
, arg2
));
6163 case TARGET_NR_readv
:
6168 vec
= alloca(count
* sizeof(struct iovec
));
6169 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6171 ret
= get_errno(readv(arg1
, vec
, count
));
6172 unlock_iovec(vec
, arg2
, count
, 1);
6175 case TARGET_NR_writev
:
6180 vec
= alloca(count
* sizeof(struct iovec
));
6181 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6183 ret
= get_errno(writev(arg1
, vec
, count
));
6184 unlock_iovec(vec
, arg2
, count
, 0);
6187 case TARGET_NR_getsid
:
6188 ret
= get_errno(getsid(arg1
));
6190 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6191 case TARGET_NR_fdatasync
:
6192 ret
= get_errno(fdatasync(arg1
));
6195 case TARGET_NR__sysctl
:
6196 /* We don't implement this, but ENOTDIR is always a safe
6198 ret
= -TARGET_ENOTDIR
;
6200 case TARGET_NR_sched_setparam
:
6202 struct sched_param
*target_schp
;
6203 struct sched_param schp
;
6205 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6207 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6208 unlock_user_struct(target_schp
, arg2
, 0);
6209 ret
= get_errno(sched_setparam(arg1
, &schp
));
6212 case TARGET_NR_sched_getparam
:
6214 struct sched_param
*target_schp
;
6215 struct sched_param schp
;
6216 ret
= get_errno(sched_getparam(arg1
, &schp
));
6217 if (!is_error(ret
)) {
6218 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6220 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6221 unlock_user_struct(target_schp
, arg2
, 1);
6225 case TARGET_NR_sched_setscheduler
:
6227 struct sched_param
*target_schp
;
6228 struct sched_param schp
;
6229 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6231 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6232 unlock_user_struct(target_schp
, arg3
, 0);
6233 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6236 case TARGET_NR_sched_getscheduler
:
6237 ret
= get_errno(sched_getscheduler(arg1
));
6239 case TARGET_NR_sched_yield
:
6240 ret
= get_errno(sched_yield());
6242 case TARGET_NR_sched_get_priority_max
:
6243 ret
= get_errno(sched_get_priority_max(arg1
));
6245 case TARGET_NR_sched_get_priority_min
:
6246 ret
= get_errno(sched_get_priority_min(arg1
));
6248 case TARGET_NR_sched_rr_get_interval
:
6251 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6252 if (!is_error(ret
)) {
6253 host_to_target_timespec(arg2
, &ts
);
6257 case TARGET_NR_nanosleep
:
6259 struct timespec req
, rem
;
6260 target_to_host_timespec(&req
, arg1
);
6261 ret
= get_errno(nanosleep(&req
, &rem
));
6262 if (is_error(ret
) && arg2
) {
6263 host_to_target_timespec(arg2
, &rem
);
6267 #ifdef TARGET_NR_query_module
6268 case TARGET_NR_query_module
:
6271 #ifdef TARGET_NR_nfsservctl
6272 case TARGET_NR_nfsservctl
:
6275 case TARGET_NR_prctl
:
6278 case PR_GET_PDEATHSIG
:
6281 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6282 if (!is_error(ret
) && arg2
6283 && put_user_ual(deathsig
, arg2
))
6288 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6292 #ifdef TARGET_NR_arch_prctl
6293 case TARGET_NR_arch_prctl
:
6294 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6295 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6301 #ifdef TARGET_NR_pread
6302 case TARGET_NR_pread
:
6304 if (((CPUARMState
*)cpu_env
)->eabi
)
6307 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6309 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6310 unlock_user(p
, arg2
, ret
);
6312 case TARGET_NR_pwrite
:
6314 if (((CPUARMState
*)cpu_env
)->eabi
)
6317 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6319 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6320 unlock_user(p
, arg2
, 0);
6323 #ifdef TARGET_NR_pread64
6324 case TARGET_NR_pread64
:
6325 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6327 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6328 unlock_user(p
, arg2
, ret
);
6330 case TARGET_NR_pwrite64
:
6331 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6333 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6334 unlock_user(p
, arg2
, 0);
6337 case TARGET_NR_getcwd
:
6338 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6340 ret
= get_errno(sys_getcwd1(p
, arg2
));
6341 unlock_user(p
, arg1
, ret
);
6343 case TARGET_NR_capget
:
6345 case TARGET_NR_capset
:
6347 case TARGET_NR_sigaltstack
:
6348 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6349 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6350 defined(TARGET_M68K)
6351 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6356 case TARGET_NR_sendfile
:
6358 #ifdef TARGET_NR_getpmsg
6359 case TARGET_NR_getpmsg
:
6362 #ifdef TARGET_NR_putpmsg
6363 case TARGET_NR_putpmsg
:
6366 #ifdef TARGET_NR_vfork
6367 case TARGET_NR_vfork
:
6368 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6372 #ifdef TARGET_NR_ugetrlimit
6373 case TARGET_NR_ugetrlimit
:
6376 ret
= get_errno(getrlimit(arg1
, &rlim
));
6377 if (!is_error(ret
)) {
6378 struct target_rlimit
*target_rlim
;
6379 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6381 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6382 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6383 unlock_user_struct(target_rlim
, arg2
, 1);
6388 #ifdef TARGET_NR_truncate64
6389 case TARGET_NR_truncate64
:
6390 if (!(p
= lock_user_string(arg1
)))
6392 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6393 unlock_user(p
, arg1
, 0);
6396 #ifdef TARGET_NR_ftruncate64
6397 case TARGET_NR_ftruncate64
:
6398 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6401 #ifdef TARGET_NR_stat64
6402 case TARGET_NR_stat64
:
6403 if (!(p
= lock_user_string(arg1
)))
6405 ret
= get_errno(stat(path(p
), &st
));
6406 unlock_user(p
, arg1
, 0);
6408 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6411 #ifdef TARGET_NR_lstat64
6412 case TARGET_NR_lstat64
:
6413 if (!(p
= lock_user_string(arg1
)))
6415 ret
= get_errno(lstat(path(p
), &st
));
6416 unlock_user(p
, arg1
, 0);
6418 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6421 #ifdef TARGET_NR_fstat64
6422 case TARGET_NR_fstat64
:
6423 ret
= get_errno(fstat(arg1
, &st
));
6425 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6428 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6429 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6430 #ifdef TARGET_NR_fstatat64
6431 case TARGET_NR_fstatat64
:
6433 #ifdef TARGET_NR_newfstatat
6434 case TARGET_NR_newfstatat
:
6436 if (!(p
= lock_user_string(arg2
)))
6438 #ifdef __NR_fstatat64
6439 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6441 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6444 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6448 case TARGET_NR_lchown
:
6449 if (!(p
= lock_user_string(arg1
)))
6451 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6452 unlock_user(p
, arg1
, 0);
6454 case TARGET_NR_getuid
:
6455 ret
= get_errno(high2lowuid(getuid()));
6457 case TARGET_NR_getgid
:
6458 ret
= get_errno(high2lowgid(getgid()));
6460 case TARGET_NR_geteuid
:
6461 ret
= get_errno(high2lowuid(geteuid()));
6463 case TARGET_NR_getegid
:
6464 ret
= get_errno(high2lowgid(getegid()));
6466 case TARGET_NR_setreuid
:
6467 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6469 case TARGET_NR_setregid
:
6470 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6472 case TARGET_NR_getgroups
:
6474 int gidsetsize
= arg1
;
6475 uint16_t *target_grouplist
;
6479 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6480 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6481 if (gidsetsize
== 0)
6483 if (!is_error(ret
)) {
6484 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6485 if (!target_grouplist
)
6487 for(i
= 0;i
< ret
; i
++)
6488 target_grouplist
[i
] = tswap16(grouplist
[i
]);
6489 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6493 case TARGET_NR_setgroups
:
6495 int gidsetsize
= arg1
;
6496 uint16_t *target_grouplist
;
6500 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6501 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6502 if (!target_grouplist
) {
6503 ret
= -TARGET_EFAULT
;
6506 for(i
= 0;i
< gidsetsize
; i
++)
6507 grouplist
[i
] = tswap16(target_grouplist
[i
]);
6508 unlock_user(target_grouplist
, arg2
, 0);
6509 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6512 case TARGET_NR_fchown
:
6513 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6515 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6516 case TARGET_NR_fchownat
:
6517 if (!(p
= lock_user_string(arg2
)))
6519 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6520 unlock_user(p
, arg2
, 0);
6523 #ifdef TARGET_NR_setresuid
6524 case TARGET_NR_setresuid
:
6525 ret
= get_errno(setresuid(low2highuid(arg1
),
6527 low2highuid(arg3
)));
6530 #ifdef TARGET_NR_getresuid
6531 case TARGET_NR_getresuid
:
6533 uid_t ruid
, euid
, suid
;
6534 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6535 if (!is_error(ret
)) {
6536 if (put_user_u16(high2lowuid(ruid
), arg1
)
6537 || put_user_u16(high2lowuid(euid
), arg2
)
6538 || put_user_u16(high2lowuid(suid
), arg3
))
6544 #ifdef TARGET_NR_getresgid
6545 case TARGET_NR_setresgid
:
6546 ret
= get_errno(setresgid(low2highgid(arg1
),
6548 low2highgid(arg3
)));
6551 #ifdef TARGET_NR_getresgid
6552 case TARGET_NR_getresgid
:
6554 gid_t rgid
, egid
, sgid
;
6555 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6556 if (!is_error(ret
)) {
6557 if (put_user_u16(high2lowgid(rgid
), arg1
)
6558 || put_user_u16(high2lowgid(egid
), arg2
)
6559 || put_user_u16(high2lowgid(sgid
), arg3
))
6565 case TARGET_NR_chown
:
6566 if (!(p
= lock_user_string(arg1
)))
6568 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6569 unlock_user(p
, arg1
, 0);
6571 case TARGET_NR_setuid
:
6572 ret
= get_errno(setuid(low2highuid(arg1
)));
6574 case TARGET_NR_setgid
:
6575 ret
= get_errno(setgid(low2highgid(arg1
)));
6577 case TARGET_NR_setfsuid
:
6578 ret
= get_errno(setfsuid(arg1
));
6580 case TARGET_NR_setfsgid
:
6581 ret
= get_errno(setfsgid(arg1
));
6583 #endif /* USE_UID16 */
6585 #ifdef TARGET_NR_lchown32
6586 case TARGET_NR_lchown32
:
6587 if (!(p
= lock_user_string(arg1
)))
6589 ret
= get_errno(lchown(p
, arg2
, arg3
));
6590 unlock_user(p
, arg1
, 0);
6593 #ifdef TARGET_NR_getuid32
6594 case TARGET_NR_getuid32
:
6595 ret
= get_errno(getuid());
6599 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6600 /* Alpha specific */
6601 case TARGET_NR_getxuid
:
6605 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6607 ret
= get_errno(getuid());
6610 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6611 /* Alpha specific */
6612 case TARGET_NR_getxgid
:
6616 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6618 ret
= get_errno(getgid());
6621 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6622 /* Alpha specific */
6623 case TARGET_NR_osf_getsysinfo
:
6624 ret
= -TARGET_EOPNOTSUPP
;
6626 case TARGET_GSI_IEEE_FP_CONTROL
:
6628 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6630 /* Copied from linux ieee_fpcr_to_swcr. */
6631 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
6632 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
6633 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
6634 | SWCR_TRAP_ENABLE_DZE
6635 | SWCR_TRAP_ENABLE_OVF
);
6636 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
6637 | SWCR_TRAP_ENABLE_INE
);
6638 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
6639 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
6641 if (put_user_u64 (swcr
, arg2
))
6647 /* case GSI_IEEE_STATE_AT_SIGNAL:
6648 -- Not implemented in linux kernel.
6650 -- Retrieves current unaligned access state; not much used.
6652 -- Retrieves implver information; surely not used.
6654 -- Grabs a copy of the HWRPB; surely not used.
6659 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
6660 /* Alpha specific */
6661 case TARGET_NR_osf_setsysinfo
:
6662 ret
= -TARGET_EOPNOTSUPP
;
6664 case TARGET_SSI_IEEE_FP_CONTROL
:
6665 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
6667 uint64_t swcr
, fpcr
, orig_fpcr
;
6669 if (get_user_u64 (swcr
, arg2
))
6671 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6672 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
6674 /* Copied from linux ieee_swcr_to_fpcr. */
6675 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
6676 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
6677 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
6678 | SWCR_TRAP_ENABLE_DZE
6679 | SWCR_TRAP_ENABLE_OVF
)) << 48;
6680 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
6681 | SWCR_TRAP_ENABLE_INE
)) << 57;
6682 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
6683 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
6685 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
6688 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
6689 /* Old exceptions are not signaled. */
6690 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
6692 /* If any exceptions set by this call, and are unmasked,
6699 /* case SSI_NVPAIRS:
6700 -- Used with SSIN_UACPROC to enable unaligned accesses.
6701 case SSI_IEEE_STATE_AT_SIGNAL:
6702 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
6703 -- Not implemented in linux kernel
6708 #ifdef TARGET_NR_osf_sigprocmask
6709 /* Alpha specific. */
6710 case TARGET_NR_osf_sigprocmask
:
6714 sigset_t set
, oldset
;
6717 case TARGET_SIG_BLOCK
:
6720 case TARGET_SIG_UNBLOCK
:
6723 case TARGET_SIG_SETMASK
:
6727 ret
= -TARGET_EINVAL
;
6731 target_to_host_old_sigset(&set
, &mask
);
6732 sigprocmask(arg1
, &set
, &oldset
);
6733 host_to_target_old_sigset(&mask
, &oldset
);
6739 #ifdef TARGET_NR_getgid32
6740 case TARGET_NR_getgid32
:
6741 ret
= get_errno(getgid());
6744 #ifdef TARGET_NR_geteuid32
6745 case TARGET_NR_geteuid32
:
6746 ret
= get_errno(geteuid());
6749 #ifdef TARGET_NR_getegid32
6750 case TARGET_NR_getegid32
:
6751 ret
= get_errno(getegid());
6754 #ifdef TARGET_NR_setreuid32
6755 case TARGET_NR_setreuid32
:
6756 ret
= get_errno(setreuid(arg1
, arg2
));
6759 #ifdef TARGET_NR_setregid32
6760 case TARGET_NR_setregid32
:
6761 ret
= get_errno(setregid(arg1
, arg2
));
6764 #ifdef TARGET_NR_getgroups32
6765 case TARGET_NR_getgroups32
:
6767 int gidsetsize
= arg1
;
6768 uint32_t *target_grouplist
;
6772 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6773 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6774 if (gidsetsize
== 0)
6776 if (!is_error(ret
)) {
6777 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
6778 if (!target_grouplist
) {
6779 ret
= -TARGET_EFAULT
;
6782 for(i
= 0;i
< ret
; i
++)
6783 target_grouplist
[i
] = tswap32(grouplist
[i
]);
6784 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
6789 #ifdef TARGET_NR_setgroups32
6790 case TARGET_NR_setgroups32
:
6792 int gidsetsize
= arg1
;
6793 uint32_t *target_grouplist
;
6797 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6798 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
6799 if (!target_grouplist
) {
6800 ret
= -TARGET_EFAULT
;
6803 for(i
= 0;i
< gidsetsize
; i
++)
6804 grouplist
[i
] = tswap32(target_grouplist
[i
]);
6805 unlock_user(target_grouplist
, arg2
, 0);
6806 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6810 #ifdef TARGET_NR_fchown32
6811 case TARGET_NR_fchown32
:
6812 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
6815 #ifdef TARGET_NR_setresuid32
6816 case TARGET_NR_setresuid32
:
6817 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
6820 #ifdef TARGET_NR_getresuid32
6821 case TARGET_NR_getresuid32
:
6823 uid_t ruid
, euid
, suid
;
6824 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6825 if (!is_error(ret
)) {
6826 if (put_user_u32(ruid
, arg1
)
6827 || put_user_u32(euid
, arg2
)
6828 || put_user_u32(suid
, arg3
))
6834 #ifdef TARGET_NR_setresgid32
6835 case TARGET_NR_setresgid32
:
6836 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
6839 #ifdef TARGET_NR_getresgid32
6840 case TARGET_NR_getresgid32
:
6842 gid_t rgid
, egid
, sgid
;
6843 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6844 if (!is_error(ret
)) {
6845 if (put_user_u32(rgid
, arg1
)
6846 || put_user_u32(egid
, arg2
)
6847 || put_user_u32(sgid
, arg3
))
6853 #ifdef TARGET_NR_chown32
6854 case TARGET_NR_chown32
:
6855 if (!(p
= lock_user_string(arg1
)))
6857 ret
= get_errno(chown(p
, arg2
, arg3
));
6858 unlock_user(p
, arg1
, 0);
6861 #ifdef TARGET_NR_setuid32
6862 case TARGET_NR_setuid32
:
6863 ret
= get_errno(setuid(arg1
));
6866 #ifdef TARGET_NR_setgid32
6867 case TARGET_NR_setgid32
:
6868 ret
= get_errno(setgid(arg1
));
6871 #ifdef TARGET_NR_setfsuid32
6872 case TARGET_NR_setfsuid32
:
6873 ret
= get_errno(setfsuid(arg1
));
6876 #ifdef TARGET_NR_setfsgid32
6877 case TARGET_NR_setfsgid32
:
6878 ret
= get_errno(setfsgid(arg1
));
6882 case TARGET_NR_pivot_root
:
6884 #ifdef TARGET_NR_mincore
6885 case TARGET_NR_mincore
:
6888 ret
= -TARGET_EFAULT
;
6889 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
6891 if (!(p
= lock_user_string(arg3
)))
6893 ret
= get_errno(mincore(a
, arg2
, p
));
6894 unlock_user(p
, arg3
, ret
);
6896 unlock_user(a
, arg1
, 0);
6900 #ifdef TARGET_NR_arm_fadvise64_64
6901 case TARGET_NR_arm_fadvise64_64
:
6904 * arm_fadvise64_64 looks like fadvise64_64 but
6905 * with different argument order
6913 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
6914 #ifdef TARGET_NR_fadvise64_64
6915 case TARGET_NR_fadvise64_64
:
6917 #ifdef TARGET_NR_fadvise64
6918 case TARGET_NR_fadvise64
:
6922 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
6923 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
6924 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
6925 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
6929 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
6932 #ifdef TARGET_NR_madvise
6933 case TARGET_NR_madvise
:
6934 /* A straight passthrough may not be safe because qemu sometimes
6935 turns private flie-backed mappings into anonymous mappings.
6936 This will break MADV_DONTNEED.
6937 This is a hint, so ignoring and returning success is ok. */
6941 #if TARGET_ABI_BITS == 32
6942 case TARGET_NR_fcntl64
:
6946 struct target_flock64
*target_fl
;
6948 struct target_eabi_flock64
*target_efl
;
6951 cmd
= target_to_host_fcntl_cmd(arg2
);
6952 if (cmd
== -TARGET_EINVAL
)
6956 case TARGET_F_GETLK64
:
6958 if (((CPUARMState
*)cpu_env
)->eabi
) {
6959 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6961 fl
.l_type
= tswap16(target_efl
->l_type
);
6962 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6963 fl
.l_start
= tswap64(target_efl
->l_start
);
6964 fl
.l_len
= tswap64(target_efl
->l_len
);
6965 fl
.l_pid
= tswap32(target_efl
->l_pid
);
6966 unlock_user_struct(target_efl
, arg3
, 0);
6970 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6972 fl
.l_type
= tswap16(target_fl
->l_type
);
6973 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6974 fl
.l_start
= tswap64(target_fl
->l_start
);
6975 fl
.l_len
= tswap64(target_fl
->l_len
);
6976 fl
.l_pid
= tswap32(target_fl
->l_pid
);
6977 unlock_user_struct(target_fl
, arg3
, 0);
6979 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6982 if (((CPUARMState
*)cpu_env
)->eabi
) {
6983 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
6985 target_efl
->l_type
= tswap16(fl
.l_type
);
6986 target_efl
->l_whence
= tswap16(fl
.l_whence
);
6987 target_efl
->l_start
= tswap64(fl
.l_start
);
6988 target_efl
->l_len
= tswap64(fl
.l_len
);
6989 target_efl
->l_pid
= tswap32(fl
.l_pid
);
6990 unlock_user_struct(target_efl
, arg3
, 1);
6994 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
6996 target_fl
->l_type
= tswap16(fl
.l_type
);
6997 target_fl
->l_whence
= tswap16(fl
.l_whence
);
6998 target_fl
->l_start
= tswap64(fl
.l_start
);
6999 target_fl
->l_len
= tswap64(fl
.l_len
);
7000 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7001 unlock_user_struct(target_fl
, arg3
, 1);
7006 case TARGET_F_SETLK64
:
7007 case TARGET_F_SETLKW64
:
7009 if (((CPUARMState
*)cpu_env
)->eabi
) {
7010 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7012 fl
.l_type
= tswap16(target_efl
->l_type
);
7013 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7014 fl
.l_start
= tswap64(target_efl
->l_start
);
7015 fl
.l_len
= tswap64(target_efl
->l_len
);
7016 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7017 unlock_user_struct(target_efl
, arg3
, 0);
7021 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7023 fl
.l_type
= tswap16(target_fl
->l_type
);
7024 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7025 fl
.l_start
= tswap64(target_fl
->l_start
);
7026 fl
.l_len
= tswap64(target_fl
->l_len
);
7027 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7028 unlock_user_struct(target_fl
, arg3
, 0);
7030 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7033 ret
= do_fcntl(arg1
, arg2
, arg3
);
7039 #ifdef TARGET_NR_cacheflush
7040 case TARGET_NR_cacheflush
:
7041 /* self-modifying code is handled automatically, so nothing needed */
7045 #ifdef TARGET_NR_security
7046 case TARGET_NR_security
:
7049 #ifdef TARGET_NR_getpagesize
7050 case TARGET_NR_getpagesize
:
7051 ret
= TARGET_PAGE_SIZE
;
7054 case TARGET_NR_gettid
:
7055 ret
= get_errno(gettid());
7057 #ifdef TARGET_NR_readahead
7058 case TARGET_NR_readahead
:
7059 #if TARGET_ABI_BITS == 32
7061 if (((CPUARMState
*)cpu_env
)->eabi
)
7068 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7070 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7074 #ifdef TARGET_NR_setxattr
7075 case TARGET_NR_setxattr
:
7076 case TARGET_NR_lsetxattr
:
7077 case TARGET_NR_fsetxattr
:
7078 case TARGET_NR_getxattr
:
7079 case TARGET_NR_lgetxattr
:
7080 case TARGET_NR_fgetxattr
:
7081 case TARGET_NR_listxattr
:
7082 case TARGET_NR_llistxattr
:
7083 case TARGET_NR_flistxattr
:
7084 case TARGET_NR_removexattr
:
7085 case TARGET_NR_lremovexattr
:
7086 case TARGET_NR_fremovexattr
:
7087 ret
= -TARGET_EOPNOTSUPP
;
7090 #ifdef TARGET_NR_set_thread_area
7091 case TARGET_NR_set_thread_area
:
7092 #if defined(TARGET_MIPS)
7093 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7096 #elif defined(TARGET_CRIS)
7098 ret
= -TARGET_EINVAL
;
7100 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7104 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7105 ret
= do_set_thread_area(cpu_env
, arg1
);
7108 goto unimplemented_nowarn
;
7111 #ifdef TARGET_NR_get_thread_area
7112 case TARGET_NR_get_thread_area
:
7113 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7114 ret
= do_get_thread_area(cpu_env
, arg1
);
7116 goto unimplemented_nowarn
;
7119 #ifdef TARGET_NR_getdomainname
7120 case TARGET_NR_getdomainname
:
7121 goto unimplemented_nowarn
;
7124 #ifdef TARGET_NR_clock_gettime
7125 case TARGET_NR_clock_gettime
:
7128 ret
= get_errno(clock_gettime(arg1
, &ts
));
7129 if (!is_error(ret
)) {
7130 host_to_target_timespec(arg2
, &ts
);
7135 #ifdef TARGET_NR_clock_getres
7136 case TARGET_NR_clock_getres
:
7139 ret
= get_errno(clock_getres(arg1
, &ts
));
7140 if (!is_error(ret
)) {
7141 host_to_target_timespec(arg2
, &ts
);
7146 #ifdef TARGET_NR_clock_nanosleep
7147 case TARGET_NR_clock_nanosleep
:
7150 target_to_host_timespec(&ts
, arg3
);
7151 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7153 host_to_target_timespec(arg4
, &ts
);
7158 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7159 case TARGET_NR_set_tid_address
:
7160 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7164 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7165 case TARGET_NR_tkill
:
7166 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7170 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7171 case TARGET_NR_tgkill
:
7172 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7173 target_to_host_signal(arg3
)));
7177 #ifdef TARGET_NR_set_robust_list
7178 case TARGET_NR_set_robust_list
:
7179 goto unimplemented_nowarn
;
7182 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7183 case TARGET_NR_utimensat
:
7185 struct timespec
*tsp
, ts
[2];
7189 target_to_host_timespec(ts
, arg3
);
7190 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7194 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7196 if (!(p
= lock_user_string(arg2
))) {
7197 ret
= -TARGET_EFAULT
;
7200 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7201 unlock_user(p
, arg2
, 0);
7206 #if defined(CONFIG_USE_NPTL)
7207 case TARGET_NR_futex
:
7208 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7211 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7212 case TARGET_NR_inotify_init
:
7213 ret
= get_errno(sys_inotify_init());
7216 #ifdef CONFIG_INOTIFY1
7217 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7218 case TARGET_NR_inotify_init1
:
7219 ret
= get_errno(sys_inotify_init1(arg1
));
7223 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7224 case TARGET_NR_inotify_add_watch
:
7225 p
= lock_user_string(arg2
);
7226 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7227 unlock_user(p
, arg2
, 0);
7230 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7231 case TARGET_NR_inotify_rm_watch
:
7232 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7236 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7237 case TARGET_NR_mq_open
:
7239 struct mq_attr posix_mq_attr
;
7241 p
= lock_user_string(arg1
- 1);
7243 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7244 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7245 unlock_user (p
, arg1
, 0);
7249 case TARGET_NR_mq_unlink
:
7250 p
= lock_user_string(arg1
- 1);
7251 ret
= get_errno(mq_unlink(p
));
7252 unlock_user (p
, arg1
, 0);
7255 case TARGET_NR_mq_timedsend
:
7259 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7261 target_to_host_timespec(&ts
, arg5
);
7262 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7263 host_to_target_timespec(arg5
, &ts
);
7266 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7267 unlock_user (p
, arg2
, arg3
);
7271 case TARGET_NR_mq_timedreceive
:
7276 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7278 target_to_host_timespec(&ts
, arg5
);
7279 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7280 host_to_target_timespec(arg5
, &ts
);
7283 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7284 unlock_user (p
, arg2
, arg3
);
7286 put_user_u32(prio
, arg4
);
7290 /* Not implemented for now... */
7291 /* case TARGET_NR_mq_notify: */
7294 case TARGET_NR_mq_getsetattr
:
7296 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7299 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7300 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7303 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7304 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7311 #ifdef CONFIG_SPLICE
7312 #ifdef TARGET_NR_tee
7315 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7319 #ifdef TARGET_NR_splice
7320 case TARGET_NR_splice
:
7322 loff_t loff_in
, loff_out
;
7323 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7325 get_user_u64(loff_in
, arg2
);
7326 ploff_in
= &loff_in
;
7329 get_user_u64(loff_out
, arg2
);
7330 ploff_out
= &loff_out
;
7332 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7336 #ifdef TARGET_NR_vmsplice
7337 case TARGET_NR_vmsplice
:
7342 vec
= alloca(count
* sizeof(struct iovec
));
7343 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7345 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7346 unlock_iovec(vec
, arg2
, count
, 0);
7350 #endif /* CONFIG_SPLICE */
7351 #ifdef CONFIG_EVENTFD
7352 #if defined(TARGET_NR_eventfd)
7353 case TARGET_NR_eventfd
:
7354 ret
= get_errno(eventfd(arg1
, 0));
7357 #if defined(TARGET_NR_eventfd2)
7358 case TARGET_NR_eventfd2
:
7359 ret
= get_errno(eventfd(arg1
, arg2
));
7362 #endif /* CONFIG_EVENTFD */
7363 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7364 case TARGET_NR_fallocate
:
7365 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7370 gemu_log("qemu: Unsupported syscall: %d\n", num
);
7371 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7372 unimplemented_nowarn
:
7374 ret
= -TARGET_ENOSYS
;
7379 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
7382 print_syscall_ret(num
, ret
);
7385 ret
= -TARGET_EFAULT
;